mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
Compare commits
74 Commits
query-debu
...
v1.79.5
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3282829e30 | ||
|
|
6b54ad8cea | ||
|
|
7ac3f19ee1 | ||
|
|
ac03ad8b6c | ||
|
|
66f0015cd5 | ||
|
|
b931b9151a | ||
|
|
2ddea5be36 | ||
|
|
1fbf94a1c4 | ||
|
|
3d7e03cffa | ||
|
|
3195aa44fd | ||
|
|
ae64be4f2c | ||
|
|
c32d3695e7 | ||
|
|
b5ec4379d0 | ||
|
|
3ddab2d789 | ||
|
|
8b08761c56 | ||
|
|
82d6610426 | ||
|
|
b4f4eaf710 | ||
|
|
7d4101931c | ||
|
|
70a579b725 | ||
|
|
f6211309c5 | ||
|
|
f088162c8a | ||
|
|
7115b8610a | ||
|
|
4fc3495bed | ||
|
|
00e55947e4 | ||
|
|
9b4ebf95b8 | ||
|
|
f68333a8ce | ||
|
|
7543bdfd54 | ||
|
|
2db5ec5509 | ||
|
|
5839112cda | ||
|
|
dca89c7d2f | ||
|
|
64c0133b88 | ||
|
|
dbf0ef5b38 | ||
|
|
4ef2d46b8b | ||
|
|
dd46521676 | ||
|
|
a037180167 | ||
|
|
32cbc0f497 | ||
|
|
e7119de7f7 | ||
|
|
d54cf15478 | ||
|
|
56d7f3e37b | ||
|
|
cd422a5435 | ||
|
|
c35b63cd0c | ||
|
|
3e2b434bad | ||
|
|
240acdf3b7 | ||
|
|
a9c5766ebc | ||
|
|
0aa41430dd | ||
|
|
992f36702f | ||
|
|
86d85591a4 | ||
|
|
e8b1131f97 | ||
|
|
2838ee93c6 | ||
|
|
72342939d6 | ||
|
|
e3fd90e35e | ||
|
|
e690bdda09 | ||
|
|
0a8fdc5b6a | ||
|
|
fc0edfab11 | ||
|
|
d3b38ddb2e | ||
|
|
056960102a | ||
|
|
aef7b33867 | ||
|
|
095933eef8 | ||
|
|
d335436b9a | ||
|
|
d77455a485 | ||
|
|
e3f8796e90 | ||
|
|
33268b261e | ||
|
|
2426695571 | ||
|
|
7e9794cf9f | ||
|
|
05356d2a49 | ||
|
|
b161fc46dc | ||
|
|
8f74f1bc91 | ||
|
|
0bbe842c31 | ||
|
|
0d5a025934 | ||
|
|
5b0b4e1078 | ||
|
|
c28aba604d | ||
|
|
8afcc01582 | ||
|
|
208a63e045 | ||
|
|
5df6790daf |
1
Makefile
1
Makefile
@@ -16,6 +16,7 @@ GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(DATEINFO_TA
|
||||
include app/*/Makefile
|
||||
include deployment/*/Makefile
|
||||
include snap/local/Makefile
|
||||
include package/release/Makefile
|
||||
|
||||
all: \
|
||||
victoria-metrics-prod \
|
||||
|
||||
@@ -56,10 +56,12 @@ var (
|
||||
|
||||
awsUseSigv4 = flagutil.NewArrayBool("remoteWrite.aws.useSigv4", "Enables SigV4 request signing for the corresponding -remoteWrite.url. "+
|
||||
"It is expected that other -remoteWrite.aws.* command-line flags are set if sigv4 request signing is enabled")
|
||||
awsRegion = flagutil.NewArray("remoteWrite.aws.region", "Optional AWS region to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsRoleARN = flagutil.NewArray("remoteWrite.aws.roleARN", "Optional AWS roleARN to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsAccessKey = flagutil.NewArray("remoteWrite.aws.accessKey", "Optional AWS AccessKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsService = flagutil.NewArray("remoteWrite.aws.service", "Optional AWS Service to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set. "+
|
||||
awsEC2Endpoint = flagutil.NewArray("remoteWrite.aws.ec2Endpoint", "Optional AWS EC2 API endpoint to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsSTSEndpoint = flagutil.NewArray("remoteWrite.aws.stsEndpoint", "Optional AWS STS API endpoint to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsRegion = flagutil.NewArray("remoteWrite.aws.region", "Optional AWS region to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsRoleARN = flagutil.NewArray("remoteWrite.aws.roleARN", "Optional AWS roleARN to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsAccessKey = flagutil.NewArray("remoteWrite.aws.accessKey", "Optional AWS AccessKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsService = flagutil.NewArray("remoteWrite.aws.service", "Optional AWS Service to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set. "+
|
||||
"Defaults to \"aps\"")
|
||||
awsSecretKey = flagutil.NewArray("remoteWrite.aws.secretKey", "Optional AWS SecretKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
)
|
||||
@@ -154,6 +156,9 @@ func (c *client) init(argIdx, concurrency int, sanitizedURL string) {
|
||||
c.packetsDropped = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_packets_dropped_total{url=%q}`, c.sanitizedURL))
|
||||
c.retriesCount = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_retries_count_total{url=%q}`, c.sanitizedURL))
|
||||
c.sendDuration = metrics.GetOrCreateFloatCounter(fmt.Sprintf(`vmagent_remotewrite_send_duration_seconds_total{url=%q}`, c.sanitizedURL))
|
||||
metrics.GetOrCreateGauge(fmt.Sprintf(`vmagent_remotewrite_queues{url=%q}`, c.sanitizedURL), func() float64 {
|
||||
return float64(*queues)
|
||||
})
|
||||
for i := 0; i < concurrency; i++ {
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
@@ -231,12 +236,14 @@ func getAWSAPIConfig(argIdx int) (*awsapi.Config, error) {
|
||||
if !awsUseSigv4.GetOptionalArg(argIdx) {
|
||||
return nil, nil
|
||||
}
|
||||
ec2Endpoint := awsEC2Endpoint.GetOptionalArg(argIdx)
|
||||
stsEndpoint := awsSTSEndpoint.GetOptionalArg(argIdx)
|
||||
region := awsRegion.GetOptionalArg(argIdx)
|
||||
roleARN := awsRoleARN.GetOptionalArg(argIdx)
|
||||
accessKey := awsAccessKey.GetOptionalArg(argIdx)
|
||||
secretKey := awsSecretKey.GetOptionalArg(argIdx)
|
||||
service := awsService.GetOptionalArg(argIdx)
|
||||
cfg, err := awsapi.NewConfig(region, roleARN, accessKey, secretKey, service)
|
||||
cfg, err := awsapi.NewConfig(ec2Endpoint, stsEndpoint, region, roleARN, accessKey, secretKey, service)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -637,11 +637,13 @@ The shortlist of configuration flags is the following:
|
||||
-datasource.oauth2.tokenUrl string
|
||||
Optional OAuth2 tokenURL to use for -datasource.url.
|
||||
-datasource.queryStep duration
|
||||
queryStep defines how far a value can fallback to when evaluating queries. For example, if datasource.queryStep=15s then param "step" with value "15s" will be added to every query.If queryStep isn't specified, rule's evaluationInterval will be used instead.
|
||||
How far a value can fallback to when evaluating queries. For example, if -datasource.queryStep=15s then param "step" with value "15s" will be added to every query. If set to 0, rule's evaluation interval will be used instead. (default 5m0s)
|
||||
-datasource.queryTimeAlignment
|
||||
Whether to align "time" parameter with evaluation interval.Alignment supposed to produce deterministic results despite of number of vmalert replicas or time they were started. See more details here https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257 (default true)
|
||||
-datasource.roundDigits int
|
||||
Adds "round_digits" GET param to datasource requests. In VM "round_digits" limits the number of digits after the decimal point in response values.
|
||||
-datasource.showURL
|
||||
Whether to show -datasource.url in the exported metrics. It is hidden by default, since it can contain sensitive info such as auth key
|
||||
-datasource.tlsCAFile string
|
||||
Optional path to TLS CA file to use for verifying connections to -datasource.url. By default, system CA is used
|
||||
-datasource.tlsCertFile string
|
||||
@@ -653,7 +655,7 @@ The shortlist of configuration flags is the following:
|
||||
-datasource.tlsServerName string
|
||||
Optional TLS server name to use for connections to -datasource.url. By default, the server name from -datasource.url is used
|
||||
-datasource.url string
|
||||
VictoriaMetrics or vmselect url. Required parameter. E.g. http://127.0.0.1:8428 . See also -remoteRead.disablePathAppend
|
||||
Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. E.g. http://127.0.0.1:8428 . See also '-datasource.disablePathAppend', '-datasource.showURL'.
|
||||
-defaultTenant.graphite string
|
||||
Default tenant for Graphite alerting groups. See https://docs.victoriametrics.com/vmalert.html#multitenancy
|
||||
-defaultTenant.prometheus string
|
||||
@@ -673,8 +675,8 @@ The shortlist of configuration flags is the following:
|
||||
-evaluationInterval duration
|
||||
How often to evaluate the rules (default 1m0s)
|
||||
-external.alert.source string
|
||||
External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service.
|
||||
eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/vmalert/api/v1/alert?group_id=&alert_id=' is used
|
||||
External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service. Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . For example, link to Grafana: -external.alert.source='explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr":{{$expr|jsonEscape|queryEscape}} },{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]' . If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used
|
||||
If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.
|
||||
-external.label array
|
||||
Optional label in the form 'Name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets.
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
@@ -813,6 +815,8 @@ The shortlist of configuration flags is the following:
|
||||
Optional OAuth2 scopes to use for -remoteRead.url. Scopes must be delimited by ';'.
|
||||
-remoteRead.oauth2.tokenUrl string
|
||||
Optional OAuth2 tokenURL to use for -remoteRead.url.
|
||||
-remoteRead.showURL
|
||||
Whether to show -remoteRead.url in the exported metrics. It is hidden by default, since it can contain sensitive info such as auth key
|
||||
-remoteRead.tlsCAFile string
|
||||
Optional path to TLS CA file to use for verifying connections to -remoteRead.url. By default system CA is used
|
||||
-remoteRead.tlsCertFile string
|
||||
@@ -824,7 +828,7 @@ The shortlist of configuration flags is the following:
|
||||
-remoteRead.tlsServerName string
|
||||
Optional TLS server name to use for connections to -remoteRead.url. By default the server name from -remoteRead.url is used
|
||||
-remoteRead.url vmalert
|
||||
Optional URL to VictoriaMetrics or vmselect that will be used to restore alerts state. This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428. See also -remoteRead.disablePathAppend
|
||||
Optional URL to datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect.Remote read is used to restore alerts state.This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428. See also '-remoteRead.disablePathAppend', '-remoteRead.showURL'.
|
||||
-remoteWrite.basicAuth.password string
|
||||
Optional basic auth password for -remoteWrite.url
|
||||
-remoteWrite.basicAuth.passwordFile string
|
||||
@@ -855,6 +859,8 @@ The shortlist of configuration flags is the following:
|
||||
Optional OAuth2 scopes to use for -notifier.url. Scopes must be delimited by ';'.
|
||||
-remoteWrite.oauth2.tokenUrl string
|
||||
Optional OAuth2 tokenURL to use for -notifier.url.
|
||||
-remoteWrite.showURL
|
||||
Whether to show -remoteWrite.url in the exported metrics. It is hidden by default, since it can contain sensitive info such as auth key
|
||||
-remoteWrite.tlsCAFile string
|
||||
Optional path to TLS CA file to use for verifying connections to -remoteWrite.url. By default system CA is used
|
||||
-remoteWrite.tlsCertFile string
|
||||
@@ -866,7 +872,7 @@ The shortlist of configuration flags is the following:
|
||||
-remoteWrite.tlsServerName string
|
||||
Optional TLS server name to use for connections to -remoteWrite.url. By default the server name from -remoteWrite.url is used
|
||||
-remoteWrite.url string
|
||||
Optional URL to VictoriaMetrics or vminsert where to persist alerts state and recording rules results in form of timeseries. For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend
|
||||
Optional URL to VictoriaMetrics or vminsert where to persist alerts state and recording rules results in form of timeseries. For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend, '-remoteWrite.showURL'.
|
||||
-replay.disableProgressBar
|
||||
Whether to disable rendering progress bars during the replay. Progress bar rendering might be verbose or break the logs parsing, so it is recommended to be disabled when not used in interactive mode.
|
||||
-replay.maxDatapointsPerQuery int
|
||||
|
||||
@@ -285,15 +285,11 @@ func (ar *AlertingRule) Exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
a.State = notifier.StatePending
|
||||
a.ActiveAt = ts
|
||||
}
|
||||
if a.Value != m.Values[0] {
|
||||
// update Value field with latest value
|
||||
a.Value = m.Values[0]
|
||||
// and re-exec template since Value can be used
|
||||
// in annotations
|
||||
a.Annotations, err = a.ExecTemplate(qFn, ls.origin, ar.Annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.Value = m.Values[0]
|
||||
// re-exec template since Value or query can be used in annotations
|
||||
a.Annotations, err = a.ExecTemplate(qFn, ls.origin, ar.Annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -6,14 +6,18 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("datasource.url", "", "VictoriaMetrics or vmselect url. Required parameter. "+
|
||||
"E.g. http://127.0.0.1:8428 . See also -remoteRead.disablePathAppend")
|
||||
appendTypePrefix = flag.Bool("datasource.appendTypePrefix", false, "Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL.")
|
||||
addr = flag.String("datasource.url", "", "Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. "+
|
||||
"E.g. http://127.0.0.1:8428 . See also '-datasource.disablePathAppend', '-datasource.showURL'.")
|
||||
appendTypePrefix = flag.Bool("datasource.appendTypePrefix", false, "Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL.")
|
||||
showDatasourceURL = flag.Bool("datasource.showURL", false, "Whether to show -datasource.url in the exported metrics. "+
|
||||
"It is hidden by default, since it can contain sensitive info such as auth key")
|
||||
|
||||
basicAuthUsername = flag.String("datasource.basicAuth.username", "", "Optional basic auth username for -datasource.url")
|
||||
basicAuthPassword = flag.String("datasource.basicAuth.password", "", "Optional basic auth password for -datasource.url")
|
||||
@@ -35,9 +39,9 @@ var (
|
||||
oauth2Scopes = flag.String("datasource.oauth2.scopes", "", "Optional OAuth2 scopes to use for -datasource.url. Scopes must be delimited by ';'")
|
||||
|
||||
lookBack = flag.Duration("datasource.lookback", 0, `Lookback defines how far into the past to look when evaluating queries. For example, if the datasource.lookback=5m then param "time" with value now()-5m will be added to every query.`)
|
||||
queryStep = flag.Duration("datasource.queryStep", 0, "queryStep defines how far a value can fallback to when evaluating queries. "+
|
||||
"For example, if datasource.queryStep=15s then param \"step\" with value \"15s\" will be added to every query."+
|
||||
"If queryStep isn't specified, rule's evaluationInterval will be used instead.")
|
||||
queryStep = flag.Duration("datasource.queryStep", 5*time.Minute, "How far a value can fallback to when evaluating queries. "+
|
||||
"For example, if -datasource.queryStep=15s then param \"step\" with value \"15s\" will be added to every query. "+
|
||||
"If set to 0, rule's evaluation interval will be used instead.")
|
||||
queryTimeAlignment = flag.Bool("datasource.queryTimeAlignment", true, `Whether to align "time" parameter with evaluation interval.`+
|
||||
"Alignment supposed to produce deterministic results despite of number of vmalert replicas or time they were started. See more details here https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257")
|
||||
maxIdleConnections = flag.Int("datasource.maxIdleConnections", 100, `Defines the number of idle (keep-alive connections) to each configured datasource. Consider setting this value equal to the value: groups_total * group.concurrency. Too low a value may result in a high number of sockets in TIME_WAIT state.`)
|
||||
@@ -47,6 +51,13 @@ var (
|
||||
`In VM "round_digits" limits the number of digits after the decimal point in response values.`)
|
||||
)
|
||||
|
||||
// InitSecretFlags must be called after flag.Parse and before any logging
|
||||
func InitSecretFlags() {
|
||||
if !*showDatasourceURL {
|
||||
flagutil.RegisterSecretFlag("datasource.url")
|
||||
}
|
||||
}
|
||||
|
||||
// Param represents an HTTP GET param
|
||||
type Param struct {
|
||||
Key, Value string
|
||||
|
||||
@@ -58,8 +58,11 @@ absolute path to all .tpl files in root.`)
|
||||
resendDelay = flag.Duration("rule.resendDelay", 0, "Minimum amount of time to wait before resending an alert to notifier")
|
||||
|
||||
externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier")
|
||||
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service.
|
||||
eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/vmalert/api/v1/alert?group_id=&alert_id=' is used`)
|
||||
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager `+
|
||||
`for cases where you want to build a custom link to Grafana, Prometheus or any other service. `+
|
||||
`Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . `+
|
||||
`For example, link to Grafana: -external.alert.source='explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr":{{$expr|jsonEscape|queryEscape}} },{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]' . `+
|
||||
`If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.`)
|
||||
externalLabels = flagutil.NewArray("external.label", "Optional label in the form 'Name=value' to add to all generated recording rules and alerts. "+
|
||||
"Pass multiple -label flags in order to add multiple label sets.")
|
||||
|
||||
@@ -79,6 +82,9 @@ func main() {
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
remoteread.InitSecretFlags()
|
||||
remotewrite.InitSecretFlags()
|
||||
datasource.InitSecretFlags()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
err := templates.Load(*ruleTemplatesPath, true)
|
||||
|
||||
@@ -66,15 +66,21 @@ func TestAlert_ExecTemplate(t *testing.T) {
|
||||
{
|
||||
name: "expression-template",
|
||||
alert: &Alert{
|
||||
Expr: `vm_rows{"label"="bar"}>0`,
|
||||
Expr: `vm_rows{"label"="bar"}<0`,
|
||||
},
|
||||
annotations: map[string]string{
|
||||
"exprEscapedQuery": "{{ $expr|quotesEscape|queryEscape }}",
|
||||
"exprEscapedPath": "{{ $expr|quotesEscape|pathEscape }}",
|
||||
"exprEscapedQuery": "{{ $expr|queryEscape }}",
|
||||
"exprEscapedPath": "{{ $expr|pathEscape }}",
|
||||
"exprEscapedJSON": "{{ $expr|jsonEscape }}",
|
||||
"exprEscapedQuotes": "{{ $expr|quotesEscape }}",
|
||||
"exprEscapedHTML": "{{ $expr|htmlEscape }}",
|
||||
},
|
||||
expTpl: map[string]string{
|
||||
"exprEscapedQuery": "vm_rows%7B%5C%22label%5C%22%3D%5C%22bar%5C%22%7D%3E0",
|
||||
"exprEscapedPath": "vm_rows%7B%5C%22label%5C%22=%5C%22bar%5C%22%7D%3E0",
|
||||
"exprEscapedQuery": "vm_rows%7B%22label%22%3D%22bar%22%7D%3C0",
|
||||
"exprEscapedPath": "vm_rows%7B%22label%22=%22bar%22%7D%3C0",
|
||||
"exprEscapedJSON": `"vm_rows{\"label\"=\"bar\"}\u003c0"`,
|
||||
"exprEscapedQuotes": `vm_rows{\"label\"=\"bar\"}\u003c0`,
|
||||
"exprEscapedHTML": "vm_rows{"label"="bar"}<0",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -7,12 +7,17 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("remoteRead.url", "", "Optional URL to VictoriaMetrics or vmselect that will be used to restore alerts "+
|
||||
"state. This configuration makes sense only if `vmalert` was configured with `remoteWrite.url` before and has been successfully persisted its state. "+
|
||||
"E.g. http://127.0.0.1:8428. See also -remoteRead.disablePathAppend")
|
||||
addr = flag.String("remoteRead.url", "", "Optional URL to datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect."+
|
||||
"Remote read is used to restore alerts state."+
|
||||
"This configuration makes sense only if `vmalert` was configured with `remoteWrite.url` before and has been successfully persisted its state. "+
|
||||
"E.g. http://127.0.0.1:8428. See also '-remoteRead.disablePathAppend', '-remoteRead.showURL'.")
|
||||
|
||||
showRemoteReadURL = flag.Bool("remoteRead.showURL", false, "Whether to show -remoteRead.url in the exported metrics. "+
|
||||
"It is hidden by default, since it can contain sensitive info such as auth key")
|
||||
|
||||
basicAuthUsername = flag.String("remoteRead.basicAuth.username", "", "Optional basic auth username for -remoteRead.url")
|
||||
basicAuthPassword = flag.String("remoteRead.basicAuth.password", "", "Optional basic auth password for -remoteRead.url")
|
||||
@@ -36,6 +41,13 @@ var (
|
||||
oauth2Scopes = flag.String("remoteRead.oauth2.scopes", "", "Optional OAuth2 scopes to use for -remoteRead.url. Scopes must be delimited by ';'.")
|
||||
)
|
||||
|
||||
// InitSecretFlags must be called after flag.Parse and before any logging
|
||||
func InitSecretFlags() {
|
||||
if !*showRemoteReadURL {
|
||||
flagutil.RegisterSecretFlag("remoteRead.url")
|
||||
}
|
||||
}
|
||||
|
||||
// Init creates a Querier from provided flag values.
|
||||
// Returns nil if addr flag wasn't set.
|
||||
func Init() (datasource.QuerierBuilder, error) {
|
||||
|
||||
@@ -7,12 +7,15 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("remoteWrite.url", "", "Optional URL to VictoriaMetrics or vminsert where to persist alerts state "+
|
||||
"and recording rules results in form of timeseries. For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, "+
|
||||
"then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend")
|
||||
"then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend, '-remoteWrite.showURL'.")
|
||||
showRemoteWriteURL = flag.Bool("remoteWrite.showURL", false, "Whether to show -remoteWrite.url in the exported metrics. "+
|
||||
"It is hidden by default, since it can contain sensitive info such as auth key")
|
||||
|
||||
basicAuthUsername = flag.String("remoteWrite.basicAuth.username", "", "Optional basic auth username for -remoteWrite.url")
|
||||
basicAuthPassword = flag.String("remoteWrite.basicAuth.password", "", "Optional basic auth password for -remoteWrite.url")
|
||||
@@ -41,6 +44,13 @@ var (
|
||||
oauth2Scopes = flag.String("remoteWrite.oauth2.scopes", "", "Optional OAuth2 scopes to use for -notifier.url. Scopes must be delimited by ';'.")
|
||||
)
|
||||
|
||||
// InitSecretFlags must be called after flag.Parse and before any logging
|
||||
func InitSecretFlags() {
|
||||
if !*showRemoteWriteURL {
|
||||
flagutil.RegisterSecretFlag("remoteWrite.url")
|
||||
}
|
||||
}
|
||||
|
||||
// Init creates Client object from given flags.
|
||||
// Returns nil if addr flag wasn't set.
|
||||
func Init(ctx context.Context) (*Client, error) {
|
||||
|
||||
@@ -218,7 +218,7 @@ func (c *Client) flush(ctx context.Context, wr *prompbmarshal.WriteRequest) {
|
||||
return
|
||||
}
|
||||
|
||||
logger.Errorf("attempt %d to send request failed: %s", i+1, err)
|
||||
logger.Warnf("attempt %d to send request failed: %s", i+1, err)
|
||||
// sleeping to avoid remote db hammering
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
|
||||
15
app/vmalert/templates/funcs.qtpl
Normal file
15
app/vmalert/templates/funcs.qtpl
Normal file
@@ -0,0 +1,15 @@
|
||||
{% stripspace %}
|
||||
|
||||
{% func quotesEscape(s string) %}
|
||||
{%j= s %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func jsonEscape(s string) %}
|
||||
{%q= s %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func htmlEscape(s string) %}
|
||||
{%s s %}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
117
app/vmalert/templates/funcs.qtpl.go
Normal file
117
app/vmalert/templates/funcs.qtpl.go
Normal file
@@ -0,0 +1,117 @@
|
||||
// Code generated by qtc from "funcs.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmalert/templates/funcs.qtpl:3
|
||||
package templates
|
||||
|
||||
//line app/vmalert/templates/funcs.qtpl:3
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmalert/templates/funcs.qtpl:3
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmalert/templates/funcs.qtpl:3
|
||||
func streamquotesEscape(qw422016 *qt422016.Writer, s string) {
|
||||
//line app/vmalert/templates/funcs.qtpl:4
|
||||
qw422016.N().J(s)
|
||||
//line app/vmalert/templates/funcs.qtpl:5
|
||||
}
|
||||
|
||||
//line app/vmalert/templates/funcs.qtpl:5
|
||||
func writequotesEscape(qq422016 qtio422016.Writer, s string) {
|
||||
//line app/vmalert/templates/funcs.qtpl:5
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/templates/funcs.qtpl:5
|
||||
streamquotesEscape(qw422016, s)
|
||||
//line app/vmalert/templates/funcs.qtpl:5
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/templates/funcs.qtpl:5
|
||||
}
|
||||
|
||||
//line app/vmalert/templates/funcs.qtpl:5
|
||||
func quotesEscape(s string) string {
|
||||
//line app/vmalert/templates/funcs.qtpl:5
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/templates/funcs.qtpl:5
|
||||
writequotesEscape(qb422016, s)
|
||||
//line app/vmalert/templates/funcs.qtpl:5
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/templates/funcs.qtpl:5
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/templates/funcs.qtpl:5
|
||||
return qs422016
|
||||
//line app/vmalert/templates/funcs.qtpl:5
|
||||
}
|
||||
|
||||
//line app/vmalert/templates/funcs.qtpl:7
|
||||
func streamjsonEscape(qw422016 *qt422016.Writer, s string) {
|
||||
//line app/vmalert/templates/funcs.qtpl:8
|
||||
qw422016.N().Q(s)
|
||||
//line app/vmalert/templates/funcs.qtpl:9
|
||||
}
|
||||
|
||||
//line app/vmalert/templates/funcs.qtpl:9
|
||||
func writejsonEscape(qq422016 qtio422016.Writer, s string) {
|
||||
//line app/vmalert/templates/funcs.qtpl:9
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/templates/funcs.qtpl:9
|
||||
streamjsonEscape(qw422016, s)
|
||||
//line app/vmalert/templates/funcs.qtpl:9
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/templates/funcs.qtpl:9
|
||||
}
|
||||
|
||||
//line app/vmalert/templates/funcs.qtpl:9
|
||||
func jsonEscape(s string) string {
|
||||
//line app/vmalert/templates/funcs.qtpl:9
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/templates/funcs.qtpl:9
|
||||
writejsonEscape(qb422016, s)
|
||||
//line app/vmalert/templates/funcs.qtpl:9
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/templates/funcs.qtpl:9
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/templates/funcs.qtpl:9
|
||||
return qs422016
|
||||
//line app/vmalert/templates/funcs.qtpl:9
|
||||
}
|
||||
|
||||
//line app/vmalert/templates/funcs.qtpl:11
|
||||
func streamhtmlEscape(qw422016 *qt422016.Writer, s string) {
|
||||
//line app/vmalert/templates/funcs.qtpl:12
|
||||
qw422016.E().S(s)
|
||||
//line app/vmalert/templates/funcs.qtpl:13
|
||||
}
|
||||
|
||||
//line app/vmalert/templates/funcs.qtpl:13
|
||||
func writehtmlEscape(qq422016 qtio422016.Writer, s string) {
|
||||
//line app/vmalert/templates/funcs.qtpl:13
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/templates/funcs.qtpl:13
|
||||
streamhtmlEscape(qw422016, s)
|
||||
//line app/vmalert/templates/funcs.qtpl:13
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/templates/funcs.qtpl:13
|
||||
}
|
||||
|
||||
//line app/vmalert/templates/funcs.qtpl:13
|
||||
func htmlEscape(s string) string {
|
||||
//line app/vmalert/templates/funcs.qtpl:13
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/templates/funcs.qtpl:13
|
||||
writehtmlEscape(qb422016, s)
|
||||
//line app/vmalert/templates/funcs.qtpl:13
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/templates/funcs.qtpl:13
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/templates/funcs.qtpl:13
|
||||
return qs422016
|
||||
//line app/vmalert/templates/funcs.qtpl:13
|
||||
}
|
||||
@@ -207,23 +207,10 @@ func FuncsWithExternalURL(externalURL *url.URL) textTpl.FuncMap {
|
||||
// templateFuncs initiates template helper functions
|
||||
func templateFuncs() textTpl.FuncMap {
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/template_reference/
|
||||
// and https://github.com/prometheus/prometheus/blob/fa6e05903fd3ce52e374a6e1bf4eb98c9f1f45a7/template/template.go#L150
|
||||
return textTpl.FuncMap{
|
||||
/* Strings */
|
||||
|
||||
// reReplaceAll ReplaceAllString returns a copy of src, replacing matches of the Regexp with
|
||||
// the replacement string repl. Inside repl, $ signs are interpreted as in Expand,
|
||||
// so for instance $1 represents the text of the first submatch.
|
||||
// alias for https://golang.org/pkg/regexp/#Regexp.ReplaceAllString
|
||||
"reReplaceAll": func(pattern, repl, text string) string {
|
||||
re := regexp.MustCompile(pattern)
|
||||
return re.ReplaceAllString(text, repl)
|
||||
},
|
||||
|
||||
// match reports whether the string s
|
||||
// contains any match of the regular expression pattern.
|
||||
// alias for https://golang.org/pkg/regexp/#MatchString
|
||||
"match": regexp.MatchString,
|
||||
|
||||
// title returns a copy of the string s with all Unicode letters
|
||||
// that begin words mapped to their Unicode title case.
|
||||
// alias for https://golang.org/pkg/strings/#Title
|
||||
@@ -237,6 +224,31 @@ func templateFuncs() textTpl.FuncMap {
|
||||
// alias for https://golang.org/pkg/strings/#ToLower
|
||||
"toLower": strings.ToLower,
|
||||
|
||||
// crlfEscape replaces '\n' and '\r' chars with `\\n` and `\\r`.
|
||||
// This funcion is deprectated.
|
||||
//
|
||||
// It is better to use quotesEscape, jsonEscape, queryEscape or pathEscape instead -
|
||||
// these functions properly escape `\n` and `\r` chars according to their purpose.
|
||||
"crlfEscape": func(q string) string {
|
||||
q = strings.Replace(q, "\n", `\n`, -1)
|
||||
return strings.Replace(q, "\r", `\r`, -1)
|
||||
},
|
||||
|
||||
// quotesEscape escapes the string, so it can be safely put inside JSON string.
|
||||
//
|
||||
// See also jsonEscape.
|
||||
"quotesEscape": quotesEscape,
|
||||
|
||||
// jsonEscape converts the string to properly encoded JSON string.
|
||||
//
|
||||
// See also quotesEscape.
|
||||
"jsonEscape": jsonEscape,
|
||||
|
||||
// htmlEscape applies html-escaping to q, so it can be safely embedded as plaintext into html.
|
||||
//
|
||||
// See also safeHtml.
|
||||
"htmlEscape": htmlEscape,
|
||||
|
||||
// stripPort splits string into host and port, then returns only host.
|
||||
"stripPort": func(hostPort string) string {
|
||||
host, _, err := net.SplitHostPort(hostPort)
|
||||
@@ -246,6 +258,37 @@ func templateFuncs() textTpl.FuncMap {
|
||||
return host
|
||||
},
|
||||
|
||||
// stripDomain removes the domain part of a FQDN. Leaves port untouched.
|
||||
"stripDomain": func(hostPort string) string {
|
||||
host, port, err := net.SplitHostPort(hostPort)
|
||||
if err != nil {
|
||||
host = hostPort
|
||||
}
|
||||
ip := net.ParseIP(host)
|
||||
if ip != nil {
|
||||
return hostPort
|
||||
}
|
||||
host = strings.Split(host, ".")[0]
|
||||
if port != "" {
|
||||
return net.JoinHostPort(host, port)
|
||||
}
|
||||
return host
|
||||
},
|
||||
|
||||
// match reports whether the string s
|
||||
// contains any match of the regular expression pattern.
|
||||
// alias for https://golang.org/pkg/regexp/#MatchString
|
||||
"match": regexp.MatchString,
|
||||
|
||||
// reReplaceAll ReplaceAllString returns a copy of src, replacing matches of the Regexp with
|
||||
// the replacement string repl. Inside repl, $ signs are interpreted as in Expand,
|
||||
// so for instance $1 represents the text of the first submatch.
|
||||
// alias for https://golang.org/pkg/regexp/#Regexp.ReplaceAllString
|
||||
"reReplaceAll": func(pattern, repl, text string) string {
|
||||
re := regexp.MustCompile(pattern)
|
||||
return re.ReplaceAllString(text, repl)
|
||||
},
|
||||
|
||||
// parseDuration parses a duration string such as "1h" into the number of seconds it represents
|
||||
"parseDuration": func(s string) (float64, error) {
|
||||
d, err := promutils.ParseDuration(s)
|
||||
@@ -399,31 +442,15 @@ func templateFuncs() textTpl.FuncMap {
|
||||
return ""
|
||||
},
|
||||
|
||||
// pathEscape escapes the string so it can be safely placed inside a URL path segment,
|
||||
// replacing special characters (including /) with %XX sequences as needed.
|
||||
// alias for https://golang.org/pkg/net/url/#PathEscape
|
||||
"pathEscape": func(u string) string {
|
||||
return url.PathEscape(u)
|
||||
},
|
||||
// pathEscape escapes the string so it can be safely placed inside a URL path segment.
|
||||
//
|
||||
// See also queryEscape.
|
||||
"pathEscape": url.PathEscape,
|
||||
|
||||
// queryEscape escapes the string so it can be safely placed
|
||||
// inside a URL query.
|
||||
// alias for https://golang.org/pkg/net/url/#QueryEscape
|
||||
"queryEscape": func(q string) string {
|
||||
return url.QueryEscape(q)
|
||||
},
|
||||
|
||||
// crlfEscape replaces new line chars to skip URL encoding.
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/890
|
||||
"crlfEscape": func(q string) string {
|
||||
q = strings.Replace(q, "\n", `\n`, -1)
|
||||
return strings.Replace(q, "\r", `\r`, -1)
|
||||
},
|
||||
|
||||
// quotesEscape escapes quote char
|
||||
"quotesEscape": func(q string) string {
|
||||
return strings.Replace(q, `"`, `\"`, -1)
|
||||
},
|
||||
// queryEscape escapes the string so it can be safely placed inside a query arg in URL.
|
||||
//
|
||||
// See also queryEscape.
|
||||
"queryEscape": url.QueryEscape,
|
||||
|
||||
// query executes the MetricsQL/PromQL query against
|
||||
// configured `datasource.url` address.
|
||||
@@ -455,6 +482,17 @@ func templateFuncs() textTpl.FuncMap {
|
||||
return m.Labels[label]
|
||||
},
|
||||
|
||||
// value returns the value of the given metric.
|
||||
// usually used alongside with `query` template function.
|
||||
"value": func(m metric) float64 {
|
||||
return m.Value
|
||||
},
|
||||
|
||||
// strvalue returns metric name.
|
||||
"strvalue": func(m metric) string {
|
||||
return m.Labels["__name__"]
|
||||
},
|
||||
|
||||
// sortByLabel sorts the given metrics by provided label key
|
||||
"sortByLabel": func(label string, metrics []metric) []metric {
|
||||
sort.SliceStable(metrics, func(i, j int) bool {
|
||||
@@ -463,12 +501,6 @@ func templateFuncs() textTpl.FuncMap {
|
||||
return metrics
|
||||
},
|
||||
|
||||
// value returns the value of the given metric.
|
||||
// usually used alongside with `query` template function.
|
||||
"value": func(m metric) float64 {
|
||||
return m.Value
|
||||
},
|
||||
|
||||
/* Helpers */
|
||||
|
||||
// Converts a list of objects to a map with keys arg0, arg1 etc.
|
||||
@@ -482,6 +514,8 @@ func templateFuncs() textTpl.FuncMap {
|
||||
},
|
||||
|
||||
// safeHtml marks string as HTML not requiring auto-escaping.
|
||||
//
|
||||
// See also htmlEscape.
|
||||
"safeHtml": func(text string) htmlTpl.HTML {
|
||||
return htmlTpl.HTML(text)
|
||||
},
|
||||
|
||||
@@ -6,6 +6,52 @@ import (
|
||||
textTpl "text/template"
|
||||
)
|
||||
|
||||
func TestTemplateFuncs(t *testing.T) {
|
||||
funcs := templateFuncs()
|
||||
f := func(funcName, s, resultExpected string) {
|
||||
t.Helper()
|
||||
v := funcs[funcName]
|
||||
fLocal := v.(func(s string) string)
|
||||
result := fLocal(s)
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result for %s(%q); got\n%s\nwant\n%s", funcName, s, result, resultExpected)
|
||||
}
|
||||
}
|
||||
f("title", "foo bar", "Foo Bar")
|
||||
f("toUpper", "foo", "FOO")
|
||||
f("toLower", "FOO", "foo")
|
||||
f("pathEscape", "foo/bar\n+baz", "foo%2Fbar%0A+baz")
|
||||
f("queryEscape", "foo+bar\n+baz", "foo%2Bbar%0A%2Bbaz")
|
||||
f("jsonEscape", `foo{bar="baz"}`+"\n + 1", `"foo{bar=\"baz\"}\n + 1"`)
|
||||
f("quotesEscape", `foo{bar="baz"}`+"\n + 1", `foo{bar=\"baz\"}\n + 1`)
|
||||
f("htmlEscape", "foo < 10\nabc", "foo < 10\nabc")
|
||||
f("crlfEscape", "foo\nbar\rx", `foo\nbar\rx`)
|
||||
f("stripPort", "foo", "foo")
|
||||
f("stripPort", "foo:1234", "foo")
|
||||
f("stripDomain", "foo.bar.baz", "foo")
|
||||
f("stripDomain", "foo.bar:123", "foo:123")
|
||||
|
||||
// check "match" func
|
||||
matchFunc := funcs["match"].(func(pattern, s string) (bool, error))
|
||||
if _, err := matchFunc("invalid[regexp", "abc"); err == nil {
|
||||
t.Fatalf("expecting non-nil error on invalid regexp")
|
||||
}
|
||||
ok, err := matchFunc("abc", "def")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error")
|
||||
}
|
||||
if ok {
|
||||
t.Fatalf("unexpected match")
|
||||
}
|
||||
ok, err = matchFunc("a.+b", "acsdb")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error")
|
||||
}
|
||||
if !ok {
|
||||
t.Fatalf("unexpected mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
func mkTemplate(current, replacement interface{}) textTemplate {
|
||||
tmpl := textTemplate{}
|
||||
if current != nil {
|
||||
|
||||
@@ -160,7 +160,7 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
if strings.HasPrefix(r.URL.Path, "/api/v1/") {
|
||||
redirectURL = alert.APILink()
|
||||
}
|
||||
http.Redirect(w, r, "/"+redirectURL, http.StatusPermanentRedirect)
|
||||
httpserver.RedirectPermanent(w, "/"+redirectURL)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,10 +39,19 @@ func createTargetURL(ui *UserInfo, uOrig *url.URL) (*url.URL, []Header, error) {
|
||||
u := *uOrig
|
||||
// Prevent from attacks with using `..` in r.URL.Path
|
||||
u.Path = path.Clean(u.Path)
|
||||
if !strings.HasSuffix(u.Path, "/") && strings.HasSuffix(uOrig.Path, "/") {
|
||||
// The path.Clean() removes traling slash.
|
||||
// Return it back if needed.
|
||||
// This should fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1752
|
||||
u.Path += "/"
|
||||
}
|
||||
if !strings.HasPrefix(u.Path, "/") {
|
||||
u.Path = "/" + u.Path
|
||||
}
|
||||
u.Path = strings.TrimSuffix(u.Path, "/")
|
||||
if u.Path == "/" {
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1554
|
||||
u.Path = ""
|
||||
}
|
||||
for _, e := range ui.URLMap {
|
||||
for _, sp := range e.SrcPaths {
|
||||
if sp.match(u.Path) {
|
||||
|
||||
@@ -54,6 +54,9 @@ func (bw *Writer) reset() {
|
||||
|
||||
// Write writes p to bw.
|
||||
func (bw *Writer) Write(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
bw.lock.Lock()
|
||||
defer bw.lock.Unlock()
|
||||
if bw.err != nil {
|
||||
|
||||
@@ -168,7 +168,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
_ = r.ParseForm()
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
newURL := path + "/?" + r.Form.Encode()
|
||||
http.Redirect(w, r, newURL, http.StatusMovedPermanently)
|
||||
httpserver.RedirectPermanent(w, newURL)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/vmui/") {
|
||||
@@ -217,7 +217,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
// vmalert access via incomplete url without `/` in the end. Redirecto to complete url.
|
||||
// Use relative redirect, since, since the hostname and path prefix may be incorrect if VictoriaMetrics
|
||||
// is hidden behind vmauth or similar proxy.
|
||||
http.Redirect(w, r, "vmalert/", http.StatusMovedPermanently)
|
||||
httpserver.RedirectPermanent(w, "vmalert/")
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/vmalert/") {
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
{% import (
|
||||
"math"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
) %}
|
||||
|
||||
@@ -7,10 +9,25 @@
|
||||
// Federate writes rs in /federate format.
|
||||
// See https://prometheus.io/docs/prometheus/latest/federation/
|
||||
{% func Federate(rs *netstorage.Result) %}
|
||||
{% if len(rs.Timestamps) == 0 || len(rs.Values) == 0 %}{% return %}{% endif %}
|
||||
{% code
|
||||
values := rs.Values
|
||||
timestamps := rs.Timestamps
|
||||
%}
|
||||
{% if len(timestamps) == 0 || len(values) == 0 %}{% return %}{% endif %}
|
||||
{% code
|
||||
lastValue := values[len(values)-1]
|
||||
%}
|
||||
{% if math.IsNaN(lastValue) %}
|
||||
{% comment %}
|
||||
This is most likely a staleness marker.
|
||||
Return nothing after the staleness marker.
|
||||
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3185
|
||||
{% endcomment %}
|
||||
{% return %}
|
||||
{% endif %}
|
||||
{%= prometheusMetricName(&rs.MetricName) %}{% space %}
|
||||
{%f= rs.Values[len(rs.Values)-1] %}{% space %}
|
||||
{%dl= rs.Timestamps[len(rs.Timestamps)-1] %}{% newline %}
|
||||
{%f= lastValue %}{% space %}
|
||||
{%dl= timestamps[len(timestamps)-1] %}{% newline %}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
|
||||
@@ -6,70 +6,85 @@ package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:1
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
)
|
||||
|
||||
// Federate writes rs in /federate format.// See https://prometheus.io/docs/prometheus/latest/federation/
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
func StreamFederate(qw422016 *qt422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
if len(rs.Timestamps) == 0 || len(rs.Values) == 0 {
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
values := rs.Values
|
||||
timestamps := rs.Timestamps
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:16
|
||||
if len(timestamps) == 0 || len(values) == 0 {
|
||||
//line app/vmselect/prometheus/federate.qtpl:16
|
||||
return
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
//line app/vmselect/prometheus/federate.qtpl:16
|
||||
}
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
//line app/vmselect/prometheus/federate.qtpl:18
|
||||
lastValue := values[len(values)-1]
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:20
|
||||
if math.IsNaN(lastValue) {
|
||||
//line app/vmselect/prometheus/federate.qtpl:26
|
||||
return
|
||||
//line app/vmselect/prometheus/federate.qtpl:27
|
||||
}
|
||||
//line app/vmselect/prometheus/federate.qtpl:28
|
||||
streamprometheusMetricName(qw422016, &rs.MetricName)
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
//line app/vmselect/prometheus/federate.qtpl:28
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/federate.qtpl:12
|
||||
qw422016.N().F(rs.Values[len(rs.Values)-1])
|
||||
//line app/vmselect/prometheus/federate.qtpl:12
|
||||
//line app/vmselect/prometheus/federate.qtpl:29
|
||||
qw422016.N().F(lastValue)
|
||||
//line app/vmselect/prometheus/federate.qtpl:29
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
qw422016.N().DL(rs.Timestamps[len(rs.Timestamps)-1])
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
//line app/vmselect/prometheus/federate.qtpl:30
|
||||
qw422016.N().DL(timestamps[len(timestamps)-1])
|
||||
//line app/vmselect/prometheus/federate.qtpl:30
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
func WriteFederate(qq422016 qtio422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
StreamFederate(qw422016, rs)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
func Federate(rs *netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
WriteFederate(qb422016, rs)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
}
|
||||
|
||||
@@ -104,6 +104,9 @@ func removeGroupTags(metricName *storage.MetricName, modifier *metricsql.Modifie
|
||||
|
||||
func aggrFuncExt(afe func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries, argOrig []*timeseries,
|
||||
modifier *metricsql.ModifierExpr, maxSeries int, keepOriginal bool) ([]*timeseries, error) {
|
||||
// Remove empty time series, e.g. series with all NaN samples,
|
||||
// since such series are ignored by aggregate functions.
|
||||
argOrig = removeEmptySeries(argOrig)
|
||||
arg := copyTimeseriesMetricNames(argOrig, keepOriginal)
|
||||
|
||||
// Perform grouping.
|
||||
|
||||
@@ -36,9 +36,9 @@ var binaryOpFuncs = map[string]binaryOpFunc{
|
||||
"unless": binaryOpUnless,
|
||||
|
||||
// New ops
|
||||
"if": newBinaryOpArithFunc(binaryop.If),
|
||||
"ifnot": newBinaryOpArithFunc(binaryop.Ifnot),
|
||||
"default": newBinaryOpArithFunc(binaryop.Default),
|
||||
"if": binaryOpIf,
|
||||
"ifnot": binaryOpIfnot,
|
||||
"default": binaryOpDefault,
|
||||
}
|
||||
|
||||
func getBinaryOpFunc(op string) binaryOpFunc {
|
||||
@@ -86,17 +86,6 @@ func newBinaryOpFunc(bf func(left, right float64, isBool bool) float64) binaryOp
|
||||
right := bfa.right
|
||||
op := bfa.be.Op
|
||||
switch true {
|
||||
case op == "ifnot":
|
||||
left = removeEmptySeries(left)
|
||||
// Do not remove empty series on the right side,
|
||||
// so the left-side series could be matched against them.
|
||||
case op == "default":
|
||||
// Do not remove empty series on the left and the right side,
|
||||
// since this may lead to missing result:
|
||||
// - if empty time series are removed on the left side,
|
||||
// then they won't be substituted by time series from the right side.
|
||||
// - if empty time series are removed on the right side,
|
||||
// then this may result in missing time series from the left side.
|
||||
case metricsql.IsBinaryOpCmp(op):
|
||||
// Do not remove empty series for comparison operations,
|
||||
// since this may lead to missing result.
|
||||
@@ -136,7 +125,7 @@ func newBinaryOpFunc(bf func(left, right float64, isBool bool) float64) binaryOp
|
||||
|
||||
func adjustBinaryOpTags(be *metricsql.BinaryOpExpr, left, right []*timeseries) ([]*timeseries, []*timeseries, []*timeseries, error) {
|
||||
if len(be.GroupModifier.Op) == 0 && len(be.JoinModifier.Op) == 0 {
|
||||
if isScalar(left) && be.Op != "default" && be.Op != "if" && be.Op != "ifnot" {
|
||||
if isScalar(left) {
|
||||
// Fast path: `scalar op vector`
|
||||
rvsLeft := make([]*timeseries, len(right))
|
||||
tsLeft := left[0]
|
||||
@@ -324,14 +313,23 @@ func resetMetricGroupIfRequired(be *metricsql.BinaryOpExpr, ts *timeseries) {
|
||||
// Do not reset MetricGroup for non-boolean `compare` binary ops like Prometheus does.
|
||||
return
|
||||
}
|
||||
switch be.Op {
|
||||
case "default", "if", "ifnot":
|
||||
// Do not reset MetricGroup for these ops.
|
||||
return
|
||||
}
|
||||
ts.MetricName.ResetMetricGroup()
|
||||
}
|
||||
|
||||
func binaryOpIf(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
|
||||
var rvs []*timeseries
|
||||
for k, tssLeft := range mLeft {
|
||||
tssRight := seriesByKey(mRight, k)
|
||||
if tssRight == nil {
|
||||
continue
|
||||
}
|
||||
tssLeft = addRightNaNsToLeft(tssLeft, tssRight)
|
||||
rvs = append(rvs, tssLeft...)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func binaryOpAnd(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
|
||||
var rvs []*timeseries
|
||||
@@ -340,24 +338,47 @@ func binaryOpAnd(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
if tssLeft == nil {
|
||||
continue
|
||||
}
|
||||
// Add gaps to tssLeft if there are gaps at tssRight.
|
||||
for _, tsLeft := range tssLeft {
|
||||
valuesLeft := tsLeft.Values
|
||||
for i := range valuesLeft {
|
||||
hasValue := false
|
||||
for _, tsRight := range tssRight {
|
||||
if !math.IsNaN(tsRight.Values[i]) {
|
||||
hasValue = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasValue {
|
||||
valuesLeft[i] = nan
|
||||
tssLeft = addRightNaNsToLeft(tssLeft, tssRight)
|
||||
rvs = append(rvs, tssLeft...)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func addRightNaNsToLeft(tssLeft, tssRight []*timeseries) []*timeseries {
|
||||
for _, tsLeft := range tssLeft {
|
||||
valuesLeft := tsLeft.Values
|
||||
for i := range valuesLeft {
|
||||
hasValue := false
|
||||
for _, tsRight := range tssRight {
|
||||
if !math.IsNaN(tsRight.Values[i]) {
|
||||
hasValue = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasValue {
|
||||
valuesLeft[i] = nan
|
||||
}
|
||||
}
|
||||
tssLeft = removeEmptySeries(tssLeft)
|
||||
}
|
||||
return removeEmptySeries(tssLeft)
|
||||
}
|
||||
|
||||
func binaryOpDefault(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
|
||||
var rvs []*timeseries
|
||||
if len(mLeft) == 0 {
|
||||
for _, tss := range mRight {
|
||||
rvs = append(rvs, tss...)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
for k, tssLeft := range mLeft {
|
||||
rvs = append(rvs, tssLeft...)
|
||||
tssRight := seriesByKey(mRight, k)
|
||||
if tssRight == nil {
|
||||
continue
|
||||
}
|
||||
fillLeftNaNsWithRightValues(tssLeft, tssRight)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
@@ -374,24 +395,43 @@ func binaryOpOr(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
rvs = append(rvs, tssRight...)
|
||||
continue
|
||||
}
|
||||
// Fill gaps in tssLeft with values from tssRight as Prometheus does.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/552
|
||||
for _, tsLeft := range tssLeft {
|
||||
valuesLeft := tsLeft.Values
|
||||
for i, v := range valuesLeft {
|
||||
if !math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
for _, tsRight := range tssRight {
|
||||
vRight := tsRight.Values[i]
|
||||
if !math.IsNaN(vRight) {
|
||||
valuesLeft[i] = vRight
|
||||
break
|
||||
}
|
||||
fillLeftNaNsWithRightValues(tssLeft, tssRight)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func fillLeftNaNsWithRightValues(tssLeft, tssRight []*timeseries) {
|
||||
// Fill gaps in tssLeft with values from tssRight as Prometheus does.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/552
|
||||
for _, tsLeft := range tssLeft {
|
||||
valuesLeft := tsLeft.Values
|
||||
for i, v := range valuesLeft {
|
||||
if !math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
for _, tsRight := range tssRight {
|
||||
vRight := tsRight.Values[i]
|
||||
if !math.IsNaN(vRight) {
|
||||
valuesLeft[i] = vRight
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func binaryOpIfnot(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
|
||||
var rvs []*timeseries
|
||||
for k, tssLeft := range mLeft {
|
||||
tssRight := seriesByKey(mRight, k)
|
||||
if tssRight == nil {
|
||||
rvs = append(rvs, tssLeft...)
|
||||
continue
|
||||
}
|
||||
tssLeft = addLeftNaNsIfNoRightNaNs(tssLeft, tssRight)
|
||||
rvs = append(rvs, tssLeft...)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
@@ -404,24 +444,44 @@ func binaryOpUnless(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
rvs = append(rvs, tssLeft...)
|
||||
continue
|
||||
}
|
||||
// Add gaps to tssLeft if the are no gaps at tssRight.
|
||||
for _, tsLeft := range tssLeft {
|
||||
valuesLeft := tsLeft.Values
|
||||
for i := range valuesLeft {
|
||||
for _, tsRight := range tssRight {
|
||||
if !math.IsNaN(tsRight.Values[i]) {
|
||||
valuesLeft[i] = nan
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
tssLeft = removeEmptySeries(tssLeft)
|
||||
tssLeft = addLeftNaNsIfNoRightNaNs(tssLeft, tssRight)
|
||||
rvs = append(rvs, tssLeft...)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func addLeftNaNsIfNoRightNaNs(tssLeft, tssRight []*timeseries) []*timeseries {
|
||||
for _, tsLeft := range tssLeft {
|
||||
valuesLeft := tsLeft.Values
|
||||
for i := range valuesLeft {
|
||||
for _, tsRight := range tssRight {
|
||||
if !math.IsNaN(tsRight.Values[i]) {
|
||||
valuesLeft[i] = nan
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return removeEmptySeries(tssLeft)
|
||||
}
|
||||
|
||||
func seriesByKey(m map[string][]*timeseries, key string) []*timeseries {
|
||||
tss := m[key]
|
||||
if tss != nil {
|
||||
return tss
|
||||
}
|
||||
if len(m) != 1 {
|
||||
return nil
|
||||
}
|
||||
for _, tss := range m {
|
||||
if isScalar(tss) {
|
||||
return tss
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTimeseriesMapByTagSet(be *metricsql.BinaryOpExpr, left, right []*timeseries) (map[string][]*timeseries, map[string][]*timeseries) {
|
||||
groupTags := be.GroupModifier.Args
|
||||
groupOp := strings.ToLower(be.GroupModifier.Op)
|
||||
|
||||
@@ -2280,6 +2280,27 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`limit_offset NaN`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// q returns 3 time series, where foo=3 contains only NaN values
|
||||
// limit_offset suppose to apply offset for non-NaN series only
|
||||
q := `limit_offset(1, 1, sort_by_label_desc((
|
||||
label_set(time()*1, "foo", "1"),
|
||||
label_set(time()*2, "foo", "2"),
|
||||
label_set(time()*3, "foo", "3"),
|
||||
) < 3000, "foo"))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("1"),
|
||||
}}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`sum(label_graphite_group)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(sum by (__name__) (
|
||||
@@ -2803,7 +2824,12 @@ func TestExecSuccess(t *testing.T) {
|
||||
t.Run(`scalar default vector1`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `time() > 1400 default label_set(123, "foo", "bar")`
|
||||
resultExpected := []netstorage.Result{}
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{nan, nan, nan, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`scalar default vector2`, func(t *testing.T) {
|
||||
@@ -3762,6 +3788,27 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(duplicate-le)`, func(t *testing.T) {
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3225
|
||||
t.Parallel()
|
||||
q := `round(sort(histogram_quantile(0.6,
|
||||
label_set(90, "foo", "bar", "le", "5")
|
||||
or label_set(100, "foo", "bar", "le", "5.0")
|
||||
or label_set(200, "foo", "bar", "le", "6.0")
|
||||
or label_set(300, "foo", "bar", "le", "+Inf")
|
||||
)), 0.1)`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{4.7, 4.7, 4.7, 4.7, 4.7, 4.7},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
}}
|
||||
resultExpected := []netstorage.Result{r1}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(valid)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(histogram_quantile(0.6,
|
||||
@@ -5084,7 +5131,40 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`quantiles_over_time`, func(t *testing.T) {
|
||||
t.Run(`quantiles_over_time(single_sample)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort_by_label(
|
||||
quantiles_over_time("phi", 0.5, 0.9,
|
||||
time()[100s:100s]
|
||||
),
|
||||
"phi",
|
||||
)`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.Tags = []storage.Tag{
|
||||
{
|
||||
Key: []byte("phi"),
|
||||
Value: []byte("0.5"),
|
||||
},
|
||||
}
|
||||
r2 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r2.MetricName.Tags = []storage.Tag{
|
||||
{
|
||||
Key: []byte("phi"),
|
||||
Value: []byte("0.9"),
|
||||
},
|
||||
}
|
||||
resultExpected := []netstorage.Result{r1, r2}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`quantiles_over_time(multiple_samples)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort_by_label(
|
||||
quantiles_over_time("phi", 0.5, 0.9,
|
||||
@@ -5485,6 +5565,12 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`any(empty-series)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `any(label_set(time()<0, "foo", "bar"))`
|
||||
resultExpected := []netstorage.Result{}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`group() by (test)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `group((
|
||||
@@ -6092,7 +6178,18 @@ func TestExecSuccess(t *testing.T) {
|
||||
t.Run(`ifnot-no-matching-timeseries`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `label_set(time(), "foo", "bar") ifnot label_set(time() > 1400, "x", "y")`
|
||||
resultExpected := []netstorage.Result{}
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r.MetricName.Tags = []storage.Tag{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
},
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`quantile(-2)`, func(t *testing.T) {
|
||||
@@ -7925,6 +8022,7 @@ func TestExecError(t *testing.T) {
|
||||
f(`limit_offet(1, (alias(1,"foo"),alias(2,"bar")), 10)`)
|
||||
f(`round(1, 1 or label_set(2, "xx", "foo"))`)
|
||||
f(`histogram_quantile(1 or label_set(2, "xx", "foo"), 1)`)
|
||||
f(`histogram_quantiles("foo", 1 or label_set(2, "xxx", "foo"), 2)`)
|
||||
f(`label_set(1, 2, 3)`)
|
||||
f(`label_set(1, "foo", (label_set(1, "foo", bar") or label_set(2, "xxx", "yy")))`)
|
||||
f(`label_set(1, "foo", 3)`)
|
||||
|
||||
@@ -167,6 +167,8 @@ var rollupFuncsCanAdjustWindow = map[string]bool{
|
||||
"timestamp": true,
|
||||
}
|
||||
|
||||
// rollupFuncsRemoveCounterResets contains functions, which need to call removeCounterResets
|
||||
// over input samples before calling the corresponding rollup functions.
|
||||
var rollupFuncsRemoveCounterResets = map[string]bool{
|
||||
"increase": true,
|
||||
"increase_prometheus": true,
|
||||
@@ -177,6 +179,36 @@ var rollupFuncsRemoveCounterResets = map[string]bool{
|
||||
"rollup_rate": true,
|
||||
}
|
||||
|
||||
// rollupFuncsSamplesScannedPerCall contains functions, which scan lower number of samples
|
||||
// than is passed to the rollup func.
|
||||
//
|
||||
// It is expected that the remaining rollupFuncs scan all the samples passed to them.
|
||||
var rollupFuncsSamplesScannedPerCall = map[string]int{
|
||||
"absent_over_time": 1,
|
||||
"count_over_time": 1,
|
||||
"default_rollup": 1,
|
||||
"delta": 2,
|
||||
"delta_prometheus": 2,
|
||||
"deriv_fast": 2,
|
||||
"first_over_time": 1,
|
||||
"idelta": 2,
|
||||
"ideriv": 2,
|
||||
"increase": 2,
|
||||
"increase_prometheus": 2,
|
||||
"increase_pure": 2,
|
||||
"irate": 2,
|
||||
"lag": 1,
|
||||
"last_over_time": 1,
|
||||
"lifetime": 2,
|
||||
"present_over_time": 1,
|
||||
"rate": 2,
|
||||
"scrape_interval": 2,
|
||||
"tfirst_over_time": 1,
|
||||
"timestamp": 1,
|
||||
"timestamp_with_name": 1,
|
||||
"tlast_over_time": 1,
|
||||
}
|
||||
|
||||
// These functions don't change physical meaning of input time series,
|
||||
// so they don't drop metric name
|
||||
var rollupFuncsKeepMetricName = map[string]bool{
|
||||
@@ -248,26 +280,29 @@ func getRollupAggrFuncNames(expr metricsql.Expr) ([]string, error) {
|
||||
return aggrFuncNames, nil
|
||||
}
|
||||
|
||||
func getRollupConfigs(name string, rf rollupFunc, expr metricsql.Expr, start, end, step, window int64, lookbackDelta int64, sharedTimestamps []int64) (
|
||||
func getRollupConfigs(funcName string, rf rollupFunc, expr metricsql.Expr, start, end, step, window int64, lookbackDelta int64, sharedTimestamps []int64) (
|
||||
func(values []float64, timestamps []int64), []*rollupConfig, error) {
|
||||
preFunc := func(values []float64, timestamps []int64) {}
|
||||
if rollupFuncsRemoveCounterResets[name] {
|
||||
funcName = strings.ToLower(funcName)
|
||||
if rollupFuncsRemoveCounterResets[funcName] {
|
||||
preFunc = func(values []float64, timestamps []int64) {
|
||||
removeCounterResets(values)
|
||||
}
|
||||
}
|
||||
samplesScannedPerCall := rollupFuncsSamplesScannedPerCall[funcName]
|
||||
newRollupConfig := func(rf rollupFunc, tagValue string) *rollupConfig {
|
||||
return &rollupConfig{
|
||||
TagValue: tagValue,
|
||||
Func: rf,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
Window: window,
|
||||
MayAdjustWindow: rollupFuncsCanAdjustWindow[name],
|
||||
LookbackDelta: lookbackDelta,
|
||||
Timestamps: sharedTimestamps,
|
||||
isDefaultRollup: name == "default_rollup",
|
||||
TagValue: tagValue,
|
||||
Func: rf,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
Window: window,
|
||||
MayAdjustWindow: rollupFuncsCanAdjustWindow[funcName],
|
||||
LookbackDelta: lookbackDelta,
|
||||
Timestamps: sharedTimestamps,
|
||||
isDefaultRollup: funcName == "default_rollup",
|
||||
samplesScannedPerCall: samplesScannedPerCall,
|
||||
}
|
||||
}
|
||||
appendRollupConfigs := func(dst []*rollupConfig) []*rollupConfig {
|
||||
@@ -277,7 +312,7 @@ func getRollupConfigs(name string, rf rollupFunc, expr metricsql.Expr, start, en
|
||||
return dst
|
||||
}
|
||||
var rcs []*rollupConfig
|
||||
switch name {
|
||||
switch funcName {
|
||||
case "rollup":
|
||||
rcs = appendRollupConfigs(rcs)
|
||||
case "rollup_rate", "rollup_deriv":
|
||||
@@ -414,6 +449,11 @@ type rollupConfig struct {
|
||||
|
||||
// Whether default_rollup is used.
|
||||
isDefaultRollup bool
|
||||
|
||||
// The estimated number of samples scanned per Func call.
|
||||
//
|
||||
// If zero, then it is considered that Func scans all the samples passed to it.
|
||||
samplesScannedPerCall int
|
||||
}
|
||||
|
||||
func (rc *rollupConfig) String() string {
|
||||
@@ -552,7 +592,8 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
|
||||
ni := 0
|
||||
nj := 0
|
||||
f := rc.Func
|
||||
var samplesScanned uint64
|
||||
samplesScanned := uint64(len(values))
|
||||
samplesScannedPerCall := uint64(rc.samplesScannedPerCall)
|
||||
for _, tEnd := range rc.Timestamps {
|
||||
tStart := tEnd - window
|
||||
ni = seekFirstTimestampIdxAfter(timestamps[i:], tStart, ni)
|
||||
@@ -584,7 +625,11 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
|
||||
rfa.currTimestamp = tEnd
|
||||
value := f(rfa)
|
||||
rfa.idx++
|
||||
samplesScanned += uint64(len(rfa.values))
|
||||
if samplesScannedPerCall > 0 {
|
||||
samplesScanned += samplesScannedPerCall
|
||||
} else {
|
||||
samplesScanned += uint64(len(rfa.values))
|
||||
}
|
||||
dstValues = append(dstValues, value)
|
||||
}
|
||||
putRollupFuncArg(rfa)
|
||||
@@ -1112,11 +1157,7 @@ func newRollupQuantiles(args []interface{}) (rollupFunc, error) {
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return rfa.prevValue
|
||||
}
|
||||
if len(values) == 1 {
|
||||
// Fast path - only a single value.
|
||||
return values[0]
|
||||
return nan
|
||||
}
|
||||
qs := getFloat64s()
|
||||
qs.A = quantiles(qs.A[:0], phis, values)
|
||||
@@ -1339,15 +1380,11 @@ func rollupRateOverSum(rfa *rollupFuncArg) float64 {
|
||||
// Assume that the value didn't change since rfa.prevValue.
|
||||
return 0
|
||||
}
|
||||
dt := rfa.window
|
||||
if !math.IsNaN(rfa.prevValue) {
|
||||
dt = timestamps[len(timestamps)-1] - rfa.prevTimestamp
|
||||
}
|
||||
sum := float64(0)
|
||||
for _, v := range rfa.values {
|
||||
sum += v
|
||||
}
|
||||
return sum / (float64(dt) / 1e3)
|
||||
return sum / (float64(rfa.window) / 1e3)
|
||||
}
|
||||
|
||||
func rollupRange(rfa *rollupFuncArg) float64 {
|
||||
@@ -1489,11 +1526,8 @@ func rollupDelta(rfa *rollupFuncArg) float64 {
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/894
|
||||
return values[len(values)-1] - rfa.realPrevValue
|
||||
}
|
||||
// Assume that the previous non-existing value was 0 only in the following cases:
|
||||
//
|
||||
// - If the delta with the next value equals to 0.
|
||||
// This is the case for slow-changing counter - see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/962
|
||||
// - If the first value doesn't exceed too much the delta with the next value.
|
||||
// Assume that the previous non-existing value was 0
|
||||
// only if the first value doesn't exceed too much the delta with the next value.
|
||||
//
|
||||
// This should prevent from improper increase() results for os-level counters
|
||||
// such as cpu time or bytes sent over the network interface.
|
||||
@@ -1507,9 +1541,6 @@ func rollupDelta(rfa *rollupFuncArg) float64 {
|
||||
} else if !math.IsNaN(rfa.realNextValue) {
|
||||
d = rfa.realNextValue - values[0]
|
||||
}
|
||||
if d == 0 {
|
||||
d = 10
|
||||
}
|
||||
if math.Abs(values[0]) < 10*(math.Abs(d)+1) {
|
||||
prevValue = 0
|
||||
} else {
|
||||
|
||||
@@ -152,6 +152,9 @@ func InitRollupResultCache(cachePath string) {
|
||||
metrics.GetOrCreateGauge(`vm_cache_size_bytes{type="promql/rollupResult"}`, func() float64 {
|
||||
return float64(fcs().BytesSize)
|
||||
})
|
||||
metrics.GetOrCreateGauge(`vm_cache_size_max_bytes{type="promql/rollupResult"}`, func() float64 {
|
||||
return float64(fcs().MaxBytesSize)
|
||||
})
|
||||
metrics.GetOrCreateGauge(`vm_cache_requests_total{type="promql/rollupResult"}`, func() float64 {
|
||||
return float64(fcs().GetCalls)
|
||||
})
|
||||
|
||||
@@ -586,8 +586,8 @@ func TestRollupNoWindowNoPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned != 0 {
|
||||
t.Fatalf("expecting zero samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
if samplesScanned != 12 {
|
||||
t.Fatalf("expecting 12 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, nan, nan, nan}
|
||||
timestampsExpected := []int64{0, 1, 2, 3, 4}
|
||||
@@ -623,8 +623,8 @@ func TestRollupWindowNoPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned != 0 {
|
||||
t.Fatalf("expecting zero samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
if samplesScanned != 12 {
|
||||
t.Fatalf("expecting 12 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, nan, nan, nan}
|
||||
timestampsExpected := []int64{0, 1, 2, 3, 4}
|
||||
@@ -640,8 +640,8 @@ func TestRollupWindowNoPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned != 0 {
|
||||
t.Fatalf("expecting zero samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
if samplesScanned != 12 {
|
||||
t.Fatalf("expecting 12 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, nan, nan}
|
||||
timestampsExpected := []int64{161, 171, 181, 191}
|
||||
@@ -660,8 +660,8 @@ func TestRollupNoWindowPartialPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 15 {
|
||||
t.Fatalf("expecting 15 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 123, nan, 34, nan, 44}
|
||||
timestampsExpected := []int64{0, 5, 10, 15, 20, 25}
|
||||
@@ -677,8 +677,8 @@ func TestRollupNoWindowPartialPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 16 {
|
||||
t.Fatalf("expecting 16 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{44, 32, 34, nan}
|
||||
timestampsExpected := []int64{100, 120, 140, 160}
|
||||
@@ -694,8 +694,8 @@ func TestRollupNoWindowPartialPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, 123, 34, 32}
|
||||
timestampsExpected := []int64{-50, 0, 50, 100, 150}
|
||||
@@ -714,8 +714,8 @@ func TestRollupWindowPartialPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 16 {
|
||||
t.Fatalf("expecting 16 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 123, 123, 34, 34}
|
||||
timestampsExpected := []int64{0, 5, 10, 15, 20}
|
||||
@@ -731,8 +731,8 @@ func TestRollupWindowPartialPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 16 {
|
||||
t.Fatalf("expecting 16 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{44, 34, 34, nan}
|
||||
timestampsExpected := []int64{100, 120, 140, 160}
|
||||
@@ -748,8 +748,8 @@ func TestRollupWindowPartialPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 15 {
|
||||
t.Fatalf("expecting 15 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 54, 44, nan}
|
||||
timestampsExpected := []int64{0, 50, 100, 150}
|
||||
@@ -768,8 +768,8 @@ func TestRollupFuncsLookbackDelta(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 18 {
|
||||
t.Fatalf("expecting 18 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{99, nan, 44, nan, 32, 34, nan}
|
||||
timestampsExpected := []int64{80, 90, 100, 110, 120, 130, 140}
|
||||
@@ -785,8 +785,8 @@ func TestRollupFuncsLookbackDelta(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 18 {
|
||||
t.Fatalf("expecting 18 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{99, nan, 44, nan, 32, 34, nan}
|
||||
timestampsExpected := []int64{80, 90, 100, 110, 120, 130, 140}
|
||||
@@ -802,8 +802,8 @@ func TestRollupFuncsLookbackDelta(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 18 {
|
||||
t.Fatalf("expecting 18 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{99, nan, 44, nan, 32, 34, nan}
|
||||
timestampsExpected := []int64{80, 90, 100, 110, 120, 130, 140}
|
||||
@@ -822,8 +822,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 123, 54, 44, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -839,8 +839,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 4, 4, 3, 1}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -856,8 +856,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 21, 12, 32, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -873,8 +873,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 123, 99, 44, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -890,8 +890,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 222, 199, 110, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -907,8 +907,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 21, -9, 22, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -924,8 +924,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, -102, -42, -10, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -941,8 +941,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{123, 33, -87, 0}
|
||||
timestampsExpected := []int64{10, 50, 90, 130}
|
||||
@@ -958,8 +958,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.004, 0, 0, 0.03}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -975,8 +975,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.031, 0.044, 0.04, 0.01}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -992,8 +992,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 47 {
|
||||
t.Fatalf("expecting 47 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.031, 0.075, 0.115, 0.125}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1009,8 +1009,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.010333333333333333, 0.011, 0.013333333333333334, 0.01}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1026,8 +1026,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.010333333333333333, 0.010714285714285714, 0.012, 0.0125}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1043,8 +1043,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 4, 4, 3, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1060,8 +1060,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 3, 3, 2, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1077,8 +1077,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 16 {
|
||||
t.Fatalf("expecting 16 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 1, 1, 1, 1, 0}
|
||||
timestampsExpected := []int64{0, 9, 18, 27, 36, 45}
|
||||
@@ -1094,8 +1094,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 2, 2, 1, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1111,8 +1111,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 55.5, 49.75, 36.666666666666664, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1128,8 +1128,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, -2879.310344827588, 127.87627310448904, -496.5831435079728, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1145,8 +1145,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 14 {
|
||||
t.Fatalf("expecting 14 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, nan, 0, -8900, 0}
|
||||
timestampsExpected := []int64{0, 4, 8, 12, 16, 20}
|
||||
@@ -1162,8 +1162,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, -1916.6666666666665, -43500, 400, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1179,8 +1179,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 39.81519810323691, 32.080952292598795, 5.2493385826745405, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1196,8 +1196,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 2.148, 1.593, 1.156, 1.36}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1213,8 +1213,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 4, 4, 3, 1}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1230,8 +1230,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 4, 7, 6, 3}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1247,8 +1247,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 21, 34, 34, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1264,10 +1264,10 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 2775, 5262.5, 3678.5714285714284, 2880}
|
||||
valuesExpected := []float64{nan, 2775, 5262.5, 3862.5, 1800}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
@@ -1281,8 +1281,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, -0.86650328627136, -1.1200838283548589, -0.40035755084856683, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1306,8 +1306,8 @@ func TestRollupBigNumberOfValues(t *testing.T) {
|
||||
srcTimestamps[i] = int64(i / 2)
|
||||
}
|
||||
values, samplesScanned := rc.Do(nil, srcValues, srcTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 22002 {
|
||||
t.Fatalf("expecting 22002 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{1, 4001, 8001, 9999, nan, nan}
|
||||
timestampsExpected := []int64{0, 2000, 4000, 6000, 8000, 10000}
|
||||
@@ -1383,19 +1383,16 @@ func TestRollupDelta(t *testing.T) {
|
||||
|
||||
// Small initial value
|
||||
f(nan, nan, nan, []float64{1}, 1)
|
||||
f(nan, nan, nan, []float64{10}, 10)
|
||||
f(nan, nan, nan, []float64{100}, 100)
|
||||
f(nan, nan, nan, []float64{10}, 0)
|
||||
f(nan, nan, nan, []float64{100}, 0)
|
||||
f(nan, nan, nan, []float64{1, 2, 3}, 3)
|
||||
f(1, nan, nan, []float64{1, 2, 3}, 2)
|
||||
f(nan, nan, nan, []float64{5, 6, 8}, 8)
|
||||
f(2, nan, nan, []float64{5, 6, 8}, 6)
|
||||
|
||||
// Moderate initial value with zero delta after that.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/962
|
||||
f(nan, nan, nan, []float64{100}, 100)
|
||||
f(nan, nan, nan, []float64{100, 100}, 100)
|
||||
f(nan, nan, nan, []float64{100, 100}, 0)
|
||||
|
||||
// Big initial value with with zero delta after that.
|
||||
// Big initial value with zero delta after that.
|
||||
f(nan, nan, nan, []float64{1000}, 0)
|
||||
f(nan, nan, nan, []float64{1000, 1000}, 0)
|
||||
|
||||
|
||||
@@ -659,6 +659,7 @@ func transformHistogramShare(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
sort.Slice(xss, func(i, j int) bool {
|
||||
return xss[i].le < xss[j].le
|
||||
})
|
||||
xss = mergeSameLE(xss)
|
||||
dst := xss[0].ts
|
||||
var tsLower, tsUpper *timeseries
|
||||
if len(boundsLabel) > 0 {
|
||||
@@ -822,8 +823,12 @@ func transformHistogramQuantiles(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
tssOrig := args[len(args)-1]
|
||||
// Calculate quantile individually per each phi.
|
||||
var rvs []*timeseries
|
||||
for _, phiArg := range phiArgs {
|
||||
phiStr := fmt.Sprintf("%g", phiArg[0].Values[0])
|
||||
for i, phiArg := range phiArgs {
|
||||
phis, err := getScalar(phiArg, i)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse phi: %w", err)
|
||||
}
|
||||
phiStr := fmt.Sprintf("%g", phis[0])
|
||||
tss := copyTimeseries(tssOrig)
|
||||
tfaTmp := &transformFuncArg{
|
||||
ec: tfa.ec,
|
||||
@@ -935,6 +940,7 @@ func transformHistogramQuantile(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
sort.Slice(xss, func(i, j int) bool {
|
||||
return xss[i].le < xss[j].le
|
||||
})
|
||||
xss = mergeSameLE(xss)
|
||||
dst := xss[0].ts
|
||||
var tsLower, tsUpper *timeseries
|
||||
if len(boundsLabel) > 0 {
|
||||
@@ -1002,6 +1008,7 @@ func fixBrokenBuckets(i int, xss []leTimeseries) {
|
||||
if len(xss) < 2 {
|
||||
return
|
||||
}
|
||||
// Fill NaN in upper buckets with the first non-NaN value found in lower buckets.
|
||||
for j := len(xss) - 1; j >= 0; j-- {
|
||||
v := xss[j].ts.Values[i]
|
||||
if !math.IsNaN(v) {
|
||||
@@ -1013,6 +1020,8 @@ func fixBrokenBuckets(i int, xss []leTimeseries) {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Substitute lower bucket values with upper values if the lower values are NaN
|
||||
// or are bigger than the upper bucket values.
|
||||
vNext := xss[len(xss)-1].ts.Values[i]
|
||||
for j := len(xss) - 2; j >= 0; j-- {
|
||||
v := xss[j].ts.Values[i]
|
||||
@@ -1024,6 +1033,26 @@ func fixBrokenBuckets(i int, xss []leTimeseries) {
|
||||
}
|
||||
}
|
||||
|
||||
func mergeSameLE(xss []leTimeseries) []leTimeseries {
|
||||
// Merge buckets with identical le values.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3225
|
||||
xsDst := xss[0]
|
||||
dst := xss[:1]
|
||||
for j := 1; j < len(xss); j++ {
|
||||
xs := xss[j]
|
||||
if xs.le != xsDst.le {
|
||||
dst = append(dst, xs)
|
||||
xsDst = xs
|
||||
continue
|
||||
}
|
||||
dstValues := xsDst.ts.Values
|
||||
for k, v := range xs.ts.Values {
|
||||
dstValues[k] += v
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func transformHour(t time.Time) int {
|
||||
return t.Hour()
|
||||
}
|
||||
@@ -1844,7 +1873,9 @@ func transformLimitOffset(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain offset arg: %w", err)
|
||||
}
|
||||
rvs := args[2]
|
||||
// removeEmptySeries so offset will be calculated after empty series
|
||||
// were filtered out.
|
||||
rvs := removeEmptySeries(args[2])
|
||||
if len(rvs) >= offset {
|
||||
rvs = rvs[offset:]
|
||||
}
|
||||
@@ -2183,17 +2214,13 @@ func transformTimezoneOffset(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
return nil, fmt.Errorf("cannot load timezone %q: %w", tzString, err)
|
||||
}
|
||||
|
||||
var ts timeseries
|
||||
ts.denyReuse = true
|
||||
timestamps := tfa.ec.getSharedTimestamps()
|
||||
values := make([]float64, len(timestamps))
|
||||
for i, v := range timestamps {
|
||||
_, offset := time.Unix(v/1000, 0).In(loc).Zone()
|
||||
values[i] = float64(offset)
|
||||
tss := evalNumber(tfa.ec, nan)
|
||||
ts := tss[0]
|
||||
for i, timestamp := range ts.Timestamps {
|
||||
_, offset := time.Unix(timestamp/1000, 0).In(loc).Zone()
|
||||
ts.Values[i] = float64(offset)
|
||||
}
|
||||
ts.Values = values
|
||||
ts.Timestamps = timestamps
|
||||
return []*timeseries{&ts}, nil
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
func transformTime(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.7e6d0c89.css",
|
||||
"main.js": "./static/js/main.a6398eac.js",
|
||||
"main.js": "./static/js/main.9f52c638.js",
|
||||
"static/js/27.939f971b.chunk.js": "./static/js/27.939f971b.chunk.js",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.7e6d0c89.css",
|
||||
"static/js/main.a6398eac.js"
|
||||
"static/js/main.9f52c638.js"
|
||||
]
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script src="./dashboards/index.js" type="module"></script><script defer="defer" src="./static/js/main.a6398eac.js"></script><link href="./static/css/main.7e6d0c89.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script src="./dashboards/index.js" type="module"></script><script defer="defer" src="./static/js/main.9f52c638.js"></script><link href="./static/css/main.7e6d0c89.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
File diff suppressed because one or more lines are too long
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.18.4 as build-web-stage
|
||||
FROM golang:1.19.3 as build-web-stage
|
||||
COPY build /build
|
||||
|
||||
WORKDIR /build
|
||||
@@ -6,7 +6,7 @@ COPY web/ /build/
|
||||
RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o web-amd64 github.com/VictoriMetrics/vmui/ && \
|
||||
GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o web-windows github.com/VictoriMetrics/vmui/
|
||||
|
||||
FROM alpine:3.16.0
|
||||
FROM alpine:3.16.2
|
||||
USER root
|
||||
|
||||
COPY --from=build-web-stage /build/web-amd64 /app/web
|
||||
|
||||
55
app/vmui/packages/vmui/package-lock.json
generated
55
app/vmui/packages/vmui/package-lock.json
generated
@@ -13301,9 +13301,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/loader-utils": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.2.tgz",
|
||||
"integrity": "sha512-TM57VeHptv569d/GKh6TAYdzKblwDNiumOdkFnejjD0XwTH87K90w3O7AiJRqdQoXygvi1VQTJTLGhJl7WqA7A==",
|
||||
"version": "2.0.3",
|
||||
"resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.3.tgz",
|
||||
"integrity": "sha512-THWqIsn8QRnvLl0shHYVBN9syumU8pYWEHPTmkiVGd+7K5eFNVSY6AJhRvgGF70gg1Dz+l/k8WicvFCxdEs60A==",
|
||||
"dev": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
@@ -16366,29 +16366,16 @@
|
||||
}
|
||||
},
|
||||
"node_modules/recursive-readdir": {
|
||||
"version": "2.2.2",
|
||||
"resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.2.tgz",
|
||||
"integrity": "sha512-nRCcW9Sj7NuZwa2XvH9co8NPeXUBhZP7CRKJtU+cS6PW9FpCIFoI5ib0NT1ZrbNuPoRy0ylyCaUL8Gih4LSyFg==",
|
||||
"version": "2.2.3",
|
||||
"resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz",
|
||||
"integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==",
|
||||
"dev": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"minimatch": "3.0.4"
|
||||
"minimatch": "^3.0.5"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/recursive-readdir/node_modules/minimatch": {
|
||||
"version": "3.0.4",
|
||||
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
|
||||
"integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
|
||||
"dev": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"brace-expansion": "^1.1.7"
|
||||
},
|
||||
"engines": {
|
||||
"node": "*"
|
||||
"node": ">=6.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/redent": {
|
||||
@@ -29424,9 +29411,9 @@
|
||||
"peer": true
|
||||
},
|
||||
"loader-utils": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.2.tgz",
|
||||
"integrity": "sha512-TM57VeHptv569d/GKh6TAYdzKblwDNiumOdkFnejjD0XwTH87K90w3O7AiJRqdQoXygvi1VQTJTLGhJl7WqA7A==",
|
||||
"version": "2.0.3",
|
||||
"resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.3.tgz",
|
||||
"integrity": "sha512-THWqIsn8QRnvLl0shHYVBN9syumU8pYWEHPTmkiVGd+7K5eFNVSY6AJhRvgGF70gg1Dz+l/k8WicvFCxdEs60A==",
|
||||
"dev": true,
|
||||
"peer": true,
|
||||
"requires": {
|
||||
@@ -31614,25 +31601,13 @@
|
||||
}
|
||||
},
|
||||
"recursive-readdir": {
|
||||
"version": "2.2.2",
|
||||
"resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.2.tgz",
|
||||
"integrity": "sha512-nRCcW9Sj7NuZwa2XvH9co8NPeXUBhZP7CRKJtU+cS6PW9FpCIFoI5ib0NT1ZrbNuPoRy0ylyCaUL8Gih4LSyFg==",
|
||||
"version": "2.2.3",
|
||||
"resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz",
|
||||
"integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==",
|
||||
"dev": true,
|
||||
"peer": true,
|
||||
"requires": {
|
||||
"minimatch": "3.0.4"
|
||||
},
|
||||
"dependencies": {
|
||||
"minimatch": {
|
||||
"version": "3.0.4",
|
||||
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
|
||||
"integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
|
||||
"dev": true,
|
||||
"peer": true,
|
||||
"requires": {
|
||||
"brace-expansion": "^1.1.7"
|
||||
}
|
||||
}
|
||||
"minimatch": "^3.0.5"
|
||||
}
|
||||
},
|
||||
"redent": {
|
||||
|
||||
@@ -110,7 +110,9 @@ export const TimeSelector: FC = () => {
|
||||
open={open}
|
||||
anchorEl={anchorEl}
|
||||
placement="bottom-end"
|
||||
modifiers={[{name: "offset", options: {offset: [0, 6]}}]}>
|
||||
modifiers={[{name: "offset", options: {offset: [0, 6]}}]}
|
||||
sx={{zIndex: 3, position: "relative"}}
|
||||
>
|
||||
<ClickAwayListener onClickAway={() => setAnchorEl(null)}>
|
||||
<Paper elevation={3}>
|
||||
<Box sx={classes.container}>
|
||||
|
||||
@@ -50,9 +50,10 @@ export const useFetchQuery = ({predefinedQuery, visible, display, customStep}: F
|
||||
const fetchData = async (fetchUrl: string[], fetchQueue: AbortController[], displayType: DisplayType, query: string[]) => {
|
||||
const controller = new AbortController();
|
||||
setFetchQueue([...fetchQueue, controller]);
|
||||
const isDisplayChart = displayType === "chart";
|
||||
try {
|
||||
const responses = await Promise.all(fetchUrl.map(url => fetch(url, {signal: controller.signal})));
|
||||
const tempData = [];
|
||||
const tempData: MetricBase[] = [];
|
||||
const tempTraces: Trace[] = [];
|
||||
let counter = 1;
|
||||
for await (const response of responses) {
|
||||
@@ -63,16 +64,16 @@ export const useFetchQuery = ({predefinedQuery, visible, display, customStep}: F
|
||||
const trace = new Trace(resp.trace, query[counter-1]);
|
||||
tempTraces.push(trace);
|
||||
}
|
||||
tempData.push(...resp.data.result.map((d: MetricBase) => {
|
||||
resp.data.result.forEach((d: MetricBase) => {
|
||||
d.group = counter;
|
||||
return d;
|
||||
}));
|
||||
tempData.push(d);
|
||||
});
|
||||
counter++;
|
||||
} else {
|
||||
setError(`${resp.errorType}\r\n${resp?.error}`);
|
||||
}
|
||||
}
|
||||
displayType === "chart" ? setGraphData(tempData) : setLiveData(tempData);
|
||||
isDisplayChart ? setGraphData(tempData as MetricResult[]) : setLiveData(tempData as InstantMetricResult[]);
|
||||
setTraces(tempTraces);
|
||||
} catch (e) {
|
||||
if (e instanceof Error && e.name !== "AbortError") {
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "8.5.3"
|
||||
"version": "8.4.4"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
@@ -61,12 +61,12 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "Overview for VictoriaMetrics vmagent v1.73.0 or higher",
|
||||
"description": "Overview for VictoriaMetrics vmagent v1.79.0 or higher",
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"iteration": 1656943336787,
|
||||
"iteration": 1657810604530,
|
||||
"links": [
|
||||
{
|
||||
"icon": "doc",
|
||||
@@ -154,7 +154,7 @@
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(vm_promscrape_targets{job=~\"$job\", instance=~\"$instance\", status=\"up\"})",
|
||||
@@ -218,7 +218,7 @@
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(vm_promscrape_targets{job=~\"$job\", instance=~\"$instance\", status=\"down\"})",
|
||||
@@ -285,7 +285,7 @@
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(increase(vm_log_messages_total{job=~\"$job\", instance=~\"$instance\", level!=\"info\"}[30m]))",
|
||||
@@ -344,7 +344,7 @@
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(vm_persistentqueue_bytes_pending{job=~\"$job\", instance=~\"$instance\"})",
|
||||
@@ -490,7 +490,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -589,7 +589,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -702,7 +702,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -805,7 +805,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -946,7 +946,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -1039,7 +1039,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -1138,7 +1138,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -1237,7 +1237,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -1344,7 +1344,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -2457,7 +2457,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 4
|
||||
"y": 43
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 60,
|
||||
@@ -2480,7 +2480,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -2555,7 +2555,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 4
|
||||
"y": 43
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 66,
|
||||
@@ -2578,7 +2578,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -2652,7 +2652,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 12
|
||||
"y": 51
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 61,
|
||||
@@ -2675,7 +2675,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -2748,7 +2748,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 12
|
||||
"y": 51
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 65,
|
||||
@@ -2771,7 +2771,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -2837,7 +2837,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 20
|
||||
"y": 59
|
||||
},
|
||||
"heatmap": {},
|
||||
"hideZeroBuckets": false,
|
||||
@@ -2881,9 +2881,10 @@
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "$ds"
|
||||
},
|
||||
"description": "Shows saturation of every connection to remote storage. If the threshold of 0.9sec is reached, then the connection is saturated by more than 90% and vmagent won't be able to keep up. This usually means that `-remoteWrite.queues` command-line flag must be increased in order to increase the number of connections per each remote storage.\n",
|
||||
"description": "Shows saturation of every connection to remote storage. If the threshold of 90% is reached, then the connection is saturated (busy or slow) by more than 90%, so vmagent won't be able to keep up and can start buffering data. \n\nThis usually means that `-remoteWrite.queues` command-line flag must be increased in order to increase the number of connections per each remote storage.\n",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"links": []
|
||||
@@ -2896,7 +2897,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 20
|
||||
"y": 59
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 84,
|
||||
@@ -2919,7 +2920,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -2930,7 +2931,7 @@
|
||||
"targets": [
|
||||
{
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(vmagent_remotewrite_send_duration_seconds_total{job=~\"$job\", instance=~\"$instance\", url=~\"$url\"}[$__rate_interval])) by (instance, url)",
|
||||
"expr": "sum(rate(vmagent_remotewrite_send_duration_seconds_total{job=~\"$job\", instance=~\"$instance\", url=~\"$url\"}[$__rate_interval])) by (instance, url)\n/\nmax(vmagent_remotewrite_queues{job=~\"$job\", instance=~\"$instance\", url=~\"$url\"}) by(instance, url)",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"refId": "A"
|
||||
@@ -2943,7 +2944,7 @@
|
||||
"fill": true,
|
||||
"line": true,
|
||||
"op": "gt",
|
||||
"value": 0.9,
|
||||
"value": 90,
|
||||
"yaxis": "left"
|
||||
}
|
||||
],
|
||||
@@ -2963,7 +2964,7 @@
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:662",
|
||||
"format": "s",
|
||||
"format": "percentunit",
|
||||
"logBase": 1,
|
||||
"min": "0",
|
||||
"show": true
|
||||
@@ -2997,7 +2998,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 28
|
||||
"y": 67
|
||||
},
|
||||
"heatmap": {},
|
||||
"hideZeroBuckets": false,
|
||||
@@ -3053,7 +3054,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 28
|
||||
"y": 67
|
||||
},
|
||||
"heatmap": {},
|
||||
"hideZeroBuckets": false,
|
||||
@@ -3104,7 +3105,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 36
|
||||
"y": 75
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 88,
|
||||
@@ -3124,7 +3125,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -3207,7 +3208,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 36
|
||||
"y": 75
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 90,
|
||||
@@ -3227,7 +3228,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -4567,7 +4568,7 @@
|
||||
}
|
||||
],
|
||||
"refresh": "",
|
||||
"schemaVersion": 36,
|
||||
"schemaVersion": 35,
|
||||
"style": "dark",
|
||||
"tags": [
|
||||
"vmagent",
|
||||
@@ -4577,7 +4578,9 @@
|
||||
"list": [
|
||||
{
|
||||
"current": {
|
||||
"selected": false
|
||||
"selected": true,
|
||||
"text": "VM",
|
||||
"value": "VM"
|
||||
},
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
DOCKER_NAMESPACE := victoriametrics
|
||||
|
||||
ROOT_IMAGE ?= alpine:3.16.0
|
||||
CERTS_IMAGE := alpine:3.16.0
|
||||
GO_BUILDER_IMAGE := golang:1.18.4-alpine
|
||||
ROOT_IMAGE ?= alpine:3.16.2
|
||||
CERTS_IMAGE := alpine:3.16.2
|
||||
GO_BUILDER_IMAGE := golang:1.19.3-alpine
|
||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
|
||||
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
||||
|
||||
|
||||
@@ -270,7 +270,9 @@ groups:
|
||||
Ensure that destination is up and reachable."
|
||||
|
||||
- alert: RemoteWriteConnectionIsSaturated
|
||||
expr: rate(vmagent_remotewrite_send_duration_seconds_total[5m]) > 0.9
|
||||
expr: |
|
||||
sum(rate(vmagent_remotewrite_send_duration_seconds_total[5m])) by(job, instance, url)
|
||||
> 0.9 * max(vmagent_remotewrite_queues) by(job, instance, url)
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
|
||||
@@ -72,7 +72,8 @@ services:
|
||||
- "--rule=/etc/alerts/*.yml"
|
||||
# display source of alerts in grafana
|
||||
- "--external.url=http://127.0.0.1:3000" #grafana outside container
|
||||
- '--external.alert.source=explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr":"{{$$expr|quotesEscape|crlfEscape|queryEscape}}"},{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]' ## when copypaste the line be aware of '$$' for escaping in '$expr'
|
||||
# when copypaste the line be aware of '$$' for escaping in '$expr'
|
||||
- '--external.alert.source=explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr":{{$$expr|jsonEscape|queryEscape}} },{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]'
|
||||
networks:
|
||||
- vm_net
|
||||
restart: always
|
||||
|
||||
@@ -13,12 +13,102 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
||||
* [How to build vmauth](https://docs.victoriametrics.com/vmauth.html#how-to-build-from-sources)
|
||||
* [How to build vmctl](https://docs.victoriametrics.com/vmctl.html#how-to-build)
|
||||
|
||||
## tip
|
||||
## v1.79.x long-time support release (LTS)
|
||||
|
||||
## [v1.79.5](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.5)
|
||||
|
||||
Released at 10-11-2022
|
||||
|
||||
**Update note 1:** [vmalert](https://docs.victoriametrics.com/vmalert.html): the `crlfEscape` [template function](https://docs.victoriametrics.com/vmalert.html#template-functions) becames obsolete starting from this release. It can be safely removed from alerting templates, since `\n` chars are properly escaped with other `*Escape` functions now. See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3139) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/890) issue for details.
|
||||
|
||||
* SECURITY: update Go builder to v1.19.3. This fixes [CVE-2022 security issue](https://github.com/golang/go/issues/56328). See [the changelog](https://github.com/golang/go/issues?q=milestone%3AGo1.19.3+label%3ACherryPickApproved).
|
||||
|
||||
* BUGFIX: properly register new time series in per-day inverted index if they were ingested during the last 10 seconds of the day. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3309). Thanks to @lmarszal for the bugreport and for the [initial fix](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3320).
|
||||
* BUGFIX: properly accept [OpenTSDB telnet put lines](https://docs.victoriametrics.com/#sending-data-via-telnet-put-protocol) without tags without the need to specify the trailing whitespace. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3290).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly merge buckets with identical `le` values, but with different string representation of these values when calculating [histogram_quantile](https://docs.victoriametrics.com/MetricsQL.html#histogram_quantile) and [histogram_share](https://docs.victoriametrics.com/MetricsQL.html#histogram_share). For example, `http_request_duration_seconds_bucket{le="5"}` and `http_requests_duration_seconds_bucket{le="5.0"}`. Such buckets may be returned from distinct targets. Thanks to @647-coder for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3225).
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): change severity level for log messages about failed attempts for sending data to remote storage from `error` to `warn`. The message for about all failed send attempts remains at `error` severity level.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly escape string passed to `quotesEscape` [template function](https://docs.victoriametrics.com/vmalert.html#template-functions), so it can be safely embedded into JSON string. This makes obsolete the `crlfEscape` function. See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3139) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/890) issue.
|
||||
* BUGFIX: `vmselect`: expose missing metric `vm_cache_size_max_bytes{type="promql/rollupResult"}` . This metric is used for monitoring rollup cache usage with the query `vm_cache_size_bytes{type="promql/rollupResult"} / vm_cache_size_max_bytes{type="promql/rollupResult"}` in the same way as this is done for other cache types.
|
||||
|
||||
|
||||
## [v1.79.4](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.4)
|
||||
|
||||
Released at 07-10-2022
|
||||
|
||||
**v1.79.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
|
||||
The v1.79.x line will be supported for at least 12 months since [v1.79.0](https://docs.victoriametrics.com/CHANGELOG.html#v1790) release**
|
||||
|
||||
**Update note 1:** [vmalert](https://docs.victoriametrics.com/vmalert.html) changes default value for command-line flag `-datasource.queryStep` from `0s` to `5m`. The change supposed to improve reliability of the rules evaluation when evaluation interval is lower than scraping interval.
|
||||
|
||||
* FEATURE: expose `vmagent_remotewrite_queues` metric and use it in alerting rules in order to improve the detection of remote storage connection saturation. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2871).
|
||||
|
||||
* BUGFIX: do not export stale metrics via [/federate api](https://docs.victoriametrics.com/#federation) after the staleness markers. Previously such metrics were exported with `NaN` values. this could break some setups. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3185).
|
||||
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth.html): properly handle request paths ending with `/` such as `/vmui/`. Previously `vmui` was dropping the traling `/`, which could prevent from using `vmui` via `vmauth`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1752).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly encode query params for aws signed requests, use `%20` instead of `+` as api requires. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3171).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate `rate_over_sum(m[d])` as `sum_over_time(m[d])/d`. Previously the `sum_over_time(m[d])` could be improperly divided by smaller than `d` time range. See [rate_over_sum() docs](https://docs.victoriametrics.com/MetricsQL.html#rate_over_sum) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3045).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate `increase(m[d])` over slow-changing counters with values smaller than 100. Previously [increase](https://docs.victoriametrics.com/MetricsQL.html#increase) could return unexpectedly big results in this case. See [the related issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/962) and [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3163).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): ignore empty series when applying [limit_offset](https://docs.victoriametrics.com/MetricsQL.html#limit_offset). It should improve queries with additional filters by value in expressions like `limit_offset(1,1, foo > 1)`.
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate [quantiles_over_time](https://docs.victoriametrics.com/MetricsQL.html#quantiles_over_time) when the lookbehind window contains only a single sample. Previously an empty result was incorrectly returned in this case.
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix `RangeError: Maximum call stack size exceeded` error when the query returns too many data points at `Table` view. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3092/files).
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): re-evaluate annotations per each alert evaluation. Previously, annotations were evaluated only on alert's value change. This could result in stale annotations in some cases described in [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3119).
|
||||
* BUGFIX: prevent from excessive CPU usage when the storage enters [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode). The previous fix in [v1.79.3](https://docs.victoriametrics.com/CHANGELOG.html#v1793) wasn't complete.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): change default value for command-line flag `-datasource.queryStep` from `0s` to `5m`. Param `step` is added by vmalert to every rule evaluation request sent to datasource. Before this change, `step` was equal to group's evaluation interval by default. Param `step` for instant queries defines how far VM can look back for the last written data point. The change supposed to improve reliability of the rules evaluation when evaluation interval is lower than scraping interval.
|
||||
* BUGFIX: properly calculate `vm_rows_scanned_per_query` histogram exported at `/metrics` page of `vmselect` and single-node VictoriaMetrics. Previously it could return misleadingly high numbers for [rollup functions](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions), which scan only a few samples on the provided lookbehind window in square brackets. For example, `increase(m[1d])` always scans only 2 rows (aka `raw samples`) per each returned time series.
|
||||
|
||||
|
||||
## [v1.79.3](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.3)
|
||||
|
||||
Released at 30-08-2022
|
||||
|
||||
**v1.79.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
|
||||
The v1.79.x line will be supported for at least 12 months since [v1.79.0](https://docs.victoriametrics.com/CHANGELOG.html#v1790) release**
|
||||
|
||||
* SECURITY: [vmalert](https://docs.victoriametrics.com/vmalert.html): do not expose `-remoteWrite.url`, `-remoteRead.url` and `-datasource.url` command-line flag values in logs and at `http://vmalert:8880/flags` page by default, since they may contain sensitive data such as auth keys. This aligns `vmalert` behaviour with [vmagent](), which doesn't expose `-remoteWrite.url` command-line flag value in logs and at `http://vmagent:8429/flags` page by default. Specify `-remoteWrite.showURL`, `-remoteRead.showURL` and `-datasource.showURL` command-line flags for showing values for the corresponding `-*.url` flags in logs. Thanks to @mble for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2965).
|
||||
* SECURITY: upgrade base docker image (alpine) from 3.16.1 to 3.16.2. See [alpine 3.16.2 release notes](https://alpinelinux.org/posts/Alpine-3.13.12-3.14.8-3.15.6-3.16.2-released.html).
|
||||
|
||||
* BUGFIX: prevent from excess CPU usage when the storage enters [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode).
|
||||
* BUGFIX: improve performance for requests to [/api/v1/labels](https://docs.victoriametrics.com/url-examples.html#apiv1labels) and [/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples.html#apiv1labelvalues) when the filter in the `match[]` query arg matches small number of time series. The performance for this case has been reduced in [v1.78.0](https://docs.victoriametrics.com/CHANGELOG.html#v1780). See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2978) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1533) issues.
|
||||
* BUGFIX: increase the default limit on the number of concurrent merges for small parts from 8 to 16. This should help resolving potential issues with heavy data ingestion. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2673#issuecomment-1218185978) from @lukepalmer .
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): fix panic when incorrect arg is passed as `phi` into [histogram_quantiles](https://docs.victoriametrics.com/MetricsQL.html#histogram_quantiles) function. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3026).
|
||||
|
||||
## [v1.79.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.2)
|
||||
|
||||
Released at 08-08-2022
|
||||
|
||||
**v1.79.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
|
||||
The v1.79.x line will be supported for at least 12 months since [v1.79.0](https://docs.victoriametrics.com/CHANGELOG.html#v1790) release**
|
||||
|
||||
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): fix potential panic in [multi-level cluster setup](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multi-level-cluster-setup) when top-level `vmselect` is configured with `-replicationFactor` bigger than 1. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2961).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly handle custom `endpoint` value in [ec2_sd_configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config). It was ignored since [v1.77.0](https://docs.victoriametrics.com/CHANGELOG.html#v1770) because of a bug in the implementation of [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1287).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): add missing `__meta_kubernetes_ingress_class_name` meta-label for `role: ingress` service discovery in Kubernetes. See [this commit from Prometheus](https://github.com/prometheus/prometheus/commit/7e65ad3e432bd2837c17e3e63e85dcbcc30f4a8a).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow stale responses from Consul service discovery (aka [consul_sd_configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config)) by default in the same way as Prometheus does. This should reduce load on Consul when discovering big number of targets. Stale responses can be disabled by specifying `allow_stale: false` option in `consul_sd_config`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2940).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): [dockerswarm_sd_configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config): properly set `__meta_dockerswarm_container_label_*` labels instead of `__meta_dockerswarm_task_label_*` labels as Prometheus does. See [this issue](https://github.com/prometheus/prometheus/issues/9187).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): set `up` metric to `0` for partial scrapes in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). Previously the `up` metric was set to `1` when at least a single metric has been scraped before the error. This aligns the behaviour of `vmselect` with Prometheus.
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): restart all the scrape jobs during [config reload](https://docs.victoriametrics.com/vmagent.html#configuration-update) after `global` section is changed inside `-promscrape.config`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2884).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly assume role with AWS ECS credentials. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2875). Thanks to @transacid for [the fix](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2876).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not split regex in [relabeling rules](https://docs.victoriametrics.com/vmagent.html#relabeling) into multiple lines if it contains groups. This fixes [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2928).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): return series from `q1` if `q2` doesn't return matching time series in the query `q1 ifnot q2`. Previously series from `q1` weren't returned in this case.
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly show date picker at `Table` tab. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2874).
|
||||
* BUGFIX: properly generate http redirects if `-http.pathPrefix` command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2918).
|
||||
|
||||
|
||||
## [v1.79.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.1)
|
||||
|
||||
Released at 02-08-2022
|
||||
|
||||
**v1.79.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
|
||||
The v1.79.x line will be supported for at least 12 months since [v1.79.0](https://docs.victoriametrics.com/CHANGELOG.html#v1790) release**
|
||||
|
||||
* SECURITY: upgrade base docker image (alpine) from 3.16.0 to 3.16.1 . See [alpine 3.16.1 release notes](https://alpinelinux.org/posts/Alpine-3.16.1-released.html).
|
||||
|
||||
|
||||
## [v1.79.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.0)
|
||||
|
||||
Released at 14-07-2022
|
||||
|
||||
**v1.79.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
|
||||
The v1.79.x line will be supported for at least 12 months since [v1.79.0](https://docs.victoriametrics.com/CHANGELOG.html#v1790) release**
|
||||
|
||||
**Update note 1:** this release introduces backwards-incompatible changes to `vm_partial_results_total` metric by changing its labels to be consistent with `vm_requests_total` metric. If you use alerting rules or Grafana dashboards, which rely on this metric, then they must be updated. The official dashboards for VictoriaMetrics don't use this metric.
|
||||
|
||||
**Update note 2:** [vmalert](https://docs.victoriametrics.com/vmalert.html) adds `/vmalert/` prefix to [web urls](https://docs.victoriametrics.com/vmalert.html#web) according to [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2825). This may affect `vmalert` instances with non-empty `-http.pathPrefix` command-line flag. After the update, configuring this flag is no longer needed. Here's [why](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2799#issuecomment-1171392005).
|
||||
|
||||
@@ -487,7 +487,7 @@ See also [implicit query conversions](#implicit-query-conversions).
|
||||
|
||||
#### histogram_quantiles
|
||||
|
||||
`histogram_quantiles("phiLabel", phi1, ..., phiN, buckets)` calculates the given `phi*`-quantiles over the given [histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350). `phi*` must be in the range `[0...1]`. Each calculated quantile is returned in a separate time series with the corresponding `{phiLabel="phi*"}` label. See also [histogram_quantile](#histogram_quantile).
|
||||
`histogram_quantiles("phiLabel", phi1, ..., phiN, buckets)` calculates the given `phi*`-quantiles over the given [histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350). Argument `phi*` must be in the range `[0...1]`. For example, `histogram_quantiles('le', 0.3, 0.5, sum(rate(http_request_duration_seconds_bucket[5m]) by (le))`. Each calculated quantile is returned in a separate time series with the corresponding `{phiLabel="phi*"}` label. See also [histogram_quantile](#histogram_quantile).
|
||||
|
||||
#### histogram_share
|
||||
|
||||
|
||||
@@ -4,21 +4,59 @@ sort: 18
|
||||
|
||||
# Release process guidance
|
||||
|
||||
## Prereqs
|
||||
1. Make sure you have enterprise remote configured
|
||||
```
|
||||
git remote add enterprise <url>
|
||||
```
|
||||
2. Make sure you have singing key configured
|
||||
3. Make sure you have github token with at least `read:org, repo, write:packages` permissions exported under `GITHUB_TOKEN` env variable.
|
||||
You can create token [here](https://github.com/settings/tokens)
|
||||
|
||||
## Release version and Docker images
|
||||
|
||||
0. Make sure that the release commits have no security issues.
|
||||
1. Document all the changes for new release in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md).
|
||||
1a. Document all the changes for new release in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md).
|
||||
1b. Add `(available starting from v1.xx.y)` line to feature docs introduced in the upcoming release.
|
||||
2. Create the following release tags:
|
||||
* `git tag -s v1.xx.y` in `master` branch
|
||||
* `git tag -s v1.xx.y-cluster` in `cluster` branch
|
||||
* `git tag -s v1.xx.y-enterprise` in `enterprise` branch
|
||||
* `git tag -s v1.xx.y-enterprise-cluster` in `enterprise-cluster` branch
|
||||
3. Run `TAG=v1.xx.y make publish-release`. It will create `*.tar.gz` release archives with the corresponding `_checksums.txt` files inside `bin` directory and publish Docker images for the given `TAG`, `TAG-cluster`, `TAG-enterprise` and `TAG-enterprise-cluster`.
|
||||
4. Push release tags to <https://github.com/VictoriaMetrics/VictoriaMetrics> : `git push origin v1.xx.y` and `git push origin v1.xx.y-cluster`. Do not push `-enterprise` tags to public repository.
|
||||
5. Go to <https://github.com/VictoriaMetrics/VictoriaMetrics/releases> , create new release from the pushed tag on step 4 and upload `*.tar.gz` archive with the corresponding `_checksums.txt` from step 3.
|
||||
6. Copy the [CHANGELOG](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md) for this release to [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page.
|
||||
7. Bump version of the VictoriaMetrics cluster setup in for [sandbox environment](https://github.com/VictoriaMetrics/ops/blob/main/sandbox/manifests/benchmark-vm/vmcluster.yaml)
|
||||
by [opening and merging PR](https://github.com/VictoriaMetrics/ops/pull/58).
|
||||
3. Run `TAG=v1.xx.y make publish-release`. This command performs the following tasks:
|
||||
a) Build and package binaries in `*.tar.gz` release archives with the corresponding `_checksums.txt` files inside `bin` directory.
|
||||
This step can be run manually with the command `make release` from the needed git tag.
|
||||
b) Build and publish [multi-platform Docker images](https://docs.docker.com/build/buildx/multiplatform-images/)
|
||||
for the given `TAG`, `TAG-cluster`, `TAG-enterprise` and `TAG-enterprise-cluster`.
|
||||
The multi-platform Docker image is built for the following platforms:
|
||||
* linux/amd64
|
||||
* linux/arm64
|
||||
* linux/arm
|
||||
* linux/ppc64le
|
||||
* linux/386
|
||||
This step can be run manually with the command `make publish` from the needed git tag.
|
||||
4. Push the tags created `v1.xx.y` and `v1.xx.y-cluster` at step 2 to public GitHub repository at https://github.com/VictoriaMetrics/VictoriaMetrics .
|
||||
**Important note:** do not push enteprise tags to public GitHub repository - they must be pushed only to private repository.
|
||||
5. Run `TAG=v1.xx.yy make github-create-release github-upload-assets`. This command performs the following tasks:
|
||||
a) Create draft GitHub release with the name `TAG`. This step can be run manually
|
||||
with the command `TAG=v1.xx.y make github-create-release`.
|
||||
The release id is stored at `/tmp/vm-github-release` file.
|
||||
b) Upload all the binaries and checksums created at step `3a` to that release.
|
||||
This step can be run manually with the command `make github-upload-assets`.
|
||||
It is expected that the needed release id is stored at `/tmp/vm-github-release` file,
|
||||
which must be created at the step `a`.
|
||||
If the upload process is interrupted by any reason, then the following recovery steps must be performed:
|
||||
- To delete the created draft release by running the command `make github-delete-release`.
|
||||
This command expects that the id of the release to delete is located at `/tmp/vm-github-release`
|
||||
file created at the step `a`.
|
||||
- To run the command `TAG=v1.xx.y make github-create-release github-upload-assets`, so new release is created
|
||||
and all the needed assets are re-uploaded to it.
|
||||
6. Go to <https://github.com/VictoriaMetrics/VictoriaMetrics/releases> and verify that draft release with the name `TAG` has been created
|
||||
and this release contains all the needed binaries and checksums.
|
||||
7. Update the release description with the [CHANGELOG](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md) for this release.
|
||||
8. Remove the `draft` checkbox for the `TAG` release and manually publish it.
|
||||
9. Bump version of the VictoriaMetrics cluster in the [sandbox environment](https://github.com/VictoriaMetrics/ops/blob/main/sandbox/manifests/benchmark-vm/vmcluster.yaml)
|
||||
by [opening and merging PR](https://github.com/VictoriaMetrics/ops/pull/58).
|
||||
|
||||
## Building snap package
|
||||
|
||||
@@ -74,7 +112,3 @@ Repository [https://github.com/VictoriaMetrics/ansible-playbooks](https://github
|
||||
5. Commit changes
|
||||
6. Create a new tag
|
||||
7. Create a new release. This automatically publishes the new versions to galaxy.ansible.com
|
||||
|
||||
## Github pages
|
||||
|
||||
All changes in `README.md`, `docs` folder and `.md` extension automatically push to Wiki
|
||||
|
||||
@@ -641,11 +641,13 @@ The shortlist of configuration flags is the following:
|
||||
-datasource.oauth2.tokenUrl string
|
||||
Optional OAuth2 tokenURL to use for -datasource.url.
|
||||
-datasource.queryStep duration
|
||||
queryStep defines how far a value can fallback to when evaluating queries. For example, if datasource.queryStep=15s then param "step" with value "15s" will be added to every query.If queryStep isn't specified, rule's evaluationInterval will be used instead.
|
||||
How far a value can fallback to when evaluating queries. For example, if -datasource.queryStep=15s then param "step" with value "15s" will be added to every query. If set to 0, rule's evaluation interval will be used instead. (default 5m0s)
|
||||
-datasource.queryTimeAlignment
|
||||
Whether to align "time" parameter with evaluation interval.Alignment supposed to produce deterministic results despite of number of vmalert replicas or time they were started. See more details here https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257 (default true)
|
||||
-datasource.roundDigits int
|
||||
Adds "round_digits" GET param to datasource requests. In VM "round_digits" limits the number of digits after the decimal point in response values.
|
||||
-datasource.showURL
|
||||
Whether to show -datasource.url in the exported metrics. It is hidden by default, since it can contain sensitive info such as auth key
|
||||
-datasource.tlsCAFile string
|
||||
Optional path to TLS CA file to use for verifying connections to -datasource.url. By default, system CA is used
|
||||
-datasource.tlsCertFile string
|
||||
@@ -657,7 +659,7 @@ The shortlist of configuration flags is the following:
|
||||
-datasource.tlsServerName string
|
||||
Optional TLS server name to use for connections to -datasource.url. By default, the server name from -datasource.url is used
|
||||
-datasource.url string
|
||||
VictoriaMetrics or vmselect url. Required parameter. E.g. http://127.0.0.1:8428 . See also -remoteRead.disablePathAppend
|
||||
Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. E.g. http://127.0.0.1:8428 . See also '-datasource.disablePathAppend', '-datasource.showURL'.
|
||||
-defaultTenant.graphite string
|
||||
Default tenant for Graphite alerting groups. See https://docs.victoriametrics.com/vmalert.html#multitenancy
|
||||
-defaultTenant.prometheus string
|
||||
@@ -677,8 +679,8 @@ The shortlist of configuration flags is the following:
|
||||
-evaluationInterval duration
|
||||
How often to evaluate the rules (default 1m0s)
|
||||
-external.alert.source string
|
||||
External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service.
|
||||
eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/vmalert/api/v1/alert?group_id=&alert_id=' is used
|
||||
External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service. Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . For example, link to Grafana: -external.alert.source='explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr":{{$expr|jsonEscape|queryEscape}} },{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]' . If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used
|
||||
If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.
|
||||
-external.label array
|
||||
Optional label in the form 'Name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets.
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
@@ -817,6 +819,8 @@ The shortlist of configuration flags is the following:
|
||||
Optional OAuth2 scopes to use for -remoteRead.url. Scopes must be delimited by ';'.
|
||||
-remoteRead.oauth2.tokenUrl string
|
||||
Optional OAuth2 tokenURL to use for -remoteRead.url.
|
||||
-remoteRead.showURL
|
||||
Whether to show -remoteRead.url in the exported metrics. It is hidden by default, since it can contain sensitive info such as auth key
|
||||
-remoteRead.tlsCAFile string
|
||||
Optional path to TLS CA file to use for verifying connections to -remoteRead.url. By default system CA is used
|
||||
-remoteRead.tlsCertFile string
|
||||
@@ -828,7 +832,7 @@ The shortlist of configuration flags is the following:
|
||||
-remoteRead.tlsServerName string
|
||||
Optional TLS server name to use for connections to -remoteRead.url. By default the server name from -remoteRead.url is used
|
||||
-remoteRead.url vmalert
|
||||
Optional URL to VictoriaMetrics or vmselect that will be used to restore alerts state. This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428. See also -remoteRead.disablePathAppend
|
||||
Optional URL to datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect.Remote read is used to restore alerts state.This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428. See also '-remoteRead.disablePathAppend', '-remoteRead.showURL'.
|
||||
-remoteWrite.basicAuth.password string
|
||||
Optional basic auth password for -remoteWrite.url
|
||||
-remoteWrite.basicAuth.passwordFile string
|
||||
@@ -859,6 +863,8 @@ The shortlist of configuration flags is the following:
|
||||
Optional OAuth2 scopes to use for -notifier.url. Scopes must be delimited by ';'.
|
||||
-remoteWrite.oauth2.tokenUrl string
|
||||
Optional OAuth2 tokenURL to use for -notifier.url.
|
||||
-remoteWrite.showURL
|
||||
Whether to show -remoteWrite.url in the exported metrics. It is hidden by default, since it can contain sensitive info such as auth key
|
||||
-remoteWrite.tlsCAFile string
|
||||
Optional path to TLS CA file to use for verifying connections to -remoteWrite.url. By default system CA is used
|
||||
-remoteWrite.tlsCertFile string
|
||||
@@ -870,7 +876,7 @@ The shortlist of configuration flags is the following:
|
||||
-remoteWrite.tlsServerName string
|
||||
Optional TLS server name to use for connections to -remoteWrite.url. By default the server name from -remoteWrite.url is used
|
||||
-remoteWrite.url string
|
||||
Optional URL to VictoriaMetrics or vminsert where to persist alerts state and recording rules results in form of timeseries. For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend
|
||||
Optional URL to VictoriaMetrics or vminsert where to persist alerts state and recording rules results in form of timeseries. For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend, '-remoteWrite.showURL'.
|
||||
-replay.disableProgressBar
|
||||
Whether to disable rendering progress bars during the replay. Progress bar rendering might be verbose or break the logs parsing, so it is recommended to be disabled when not used in interactive mode.
|
||||
-replay.maxDatapointsPerQuery int
|
||||
|
||||
76
go.mod
76
go.mod
@@ -1,17 +1,17 @@
|
||||
module github.com/VictoriaMetrics/VictoriaMetrics
|
||||
|
||||
go 1.17
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.23.0
|
||||
github.com/VictoriaMetrics/fastcache v1.10.0
|
||||
cloud.google.com/go/storage v1.28.0
|
||||
github.com/VictoriaMetrics/fastcache v1.12.0
|
||||
|
||||
// Do not use the original github.com/valyala/fasthttp because of issues
|
||||
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||
github.com/VictoriaMetrics/metrics v1.18.1
|
||||
github.com/VictoriaMetrics/metricsql v0.44.1
|
||||
github.com/aws/aws-sdk-go v1.44.53
|
||||
github.com/VictoriaMetrics/metrics v1.23.0
|
||||
github.com/VictoriaMetrics/metricsql v0.45.0
|
||||
github.com/aws/aws-sdk-go v1.44.134
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
|
||||
// TODO: switch back to https://github.com/cheggaaa/pb/v3 when v3-pooling branch
|
||||
@@ -19,26 +19,27 @@ require (
|
||||
// See https://github.com/cheggaaa/pb/pull/192#issuecomment-1121285954 for details.
|
||||
github.com/dmitryk-dk/pb/v3 v3.0.9
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.9.8
|
||||
github.com/klauspost/compress v1.15.8
|
||||
github.com/influxdata/influxdb v1.10.0
|
||||
github.com/klauspost/compress v1.15.12
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||
github.com/urfave/cli/v2 v2.11.0
|
||||
github.com/urfave/cli/v2 v2.23.5
|
||||
github.com/valyala/fastjson v1.6.3
|
||||
github.com/valyala/fastrand v1.1.0
|
||||
github.com/valyala/fasttemplate v1.2.1
|
||||
github.com/valyala/fasttemplate v1.2.2
|
||||
github.com/valyala/gozstd v1.17.0
|
||||
github.com/valyala/quicktemplate v1.7.0
|
||||
golang.org/x/net v0.0.0-20220708220712-1185a9018129
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0
|
||||
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e
|
||||
google.golang.org/api v0.87.0
|
||||
golang.org/x/net v0.2.0
|
||||
golang.org/x/oauth2 v0.2.0
|
||||
golang.org/x/sys v0.2.0
|
||||
google.golang.org/api v0.103.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.103.0 // indirect
|
||||
cloud.google.com/go/compute v1.7.0 // indirect
|
||||
cloud.google.com/go/iam v0.3.0 // indirect
|
||||
cloud.google.com/go v0.106.0 // indirect
|
||||
cloud.google.com/go/compute v1.12.1 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.1 // indirect
|
||||
cloud.google.com/go/iam v0.7.0 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
@@ -48,35 +49,34 @@ require (
|
||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
|
||||
github.com/googleapis/go-type-adapters v1.0.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.12.2 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.36.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.2 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/histogram v1.2.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d // indirect
|
||||
google.golang.org/grpc v1.48.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66 // indirect
|
||||
google.golang.org/grpc v1.50.1 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
)
|
||||
|
||||
374
go.sum
374
go.sum
@@ -15,24 +15,8 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
|
||||
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
|
||||
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
|
||||
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
|
||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
|
||||
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
|
||||
cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
|
||||
cloud.google.com/go v0.103.0 h1:YXtxp9ymmZjlGzxV7VrYQ8aaQuAgcqxSy6YhDX4I458=
|
||||
cloud.google.com/go v0.103.0/go.mod h1:vwLx1nqLrzLX/fpwSMOXmFIqBOyHsvHbnAdbGSJ+mKk=
|
||||
cloud.google.com/go v0.106.0 h1:AWaMWuZb2oFeiV91OfNHZbmwUhMVuXEaLPm9sqDAOl8=
|
||||
cloud.google.com/go v0.106.0/go.mod h1:5NEGxGuIeMQiPaWLwLYZ7kfNWiP6w1+QJK+xqyIT+dw=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
@@ -40,17 +24,15 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
|
||||
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
|
||||
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
|
||||
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
|
||||
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
|
||||
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
|
||||
cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk=
|
||||
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
|
||||
cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0=
|
||||
cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
|
||||
cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
|
||||
cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc=
|
||||
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
|
||||
cloud.google.com/go/iam v0.7.0 h1:k4MuwOsS7zGJJ+QfZ5vBK8SgHBAvYN/23BWsiihJ1vs=
|
||||
cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg=
|
||||
cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
@@ -60,9 +42,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
|
||||
cloud.google.com/go/storage v1.23.0 h1:wWRIaDURQA8xxHguFCshYepGlrWIrbBnAmc7wfg07qY=
|
||||
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
|
||||
cloud.google.com/go/storage v1.28.0 h1:DLrIZ6xkeZX6K70fU/boWx5INJumt6f+nwwWSHXzzGY=
|
||||
cloud.google.com/go/storage v1.28.0/go.mod h1:qlgZML35PXA3zoEnIkiPLY4/TOkUleufRlu6qmcf7sI=
|
||||
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
@@ -103,14 +84,15 @@ github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdko
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY=
|
||||
github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.0 h1:vnVi/y9yKDcD9akmc4NqAoqgQhJrOwUF+j9LTgn4QDE=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8=
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0 h1:3crd4YWHsMwu60GUXRH6OstowiFvqrwS4a/ueoLdLL0=
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR2uydjiWvoLp5ZTqQ=
|
||||
github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0=
|
||||
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
||||
github.com/VictoriaMetrics/metricsql v0.44.1 h1:qGoRt0g84uMUscVjS7P3uDZKmjJubWKaIx9v0iHKgck=
|
||||
github.com/VictoriaMetrics/metricsql v0.44.1/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
|
||||
github.com/VictoriaMetrics/metrics v1.23.0 h1:WzfqyzCaxUZip+OBbg1+lV33WChDSu4ssYII3nxtpeA=
|
||||
github.com/VictoriaMetrics/metrics v1.23.0/go.mod h1:rAr/llLpEnAdTehiNlUxKgnjcOuROSzpw0GvjpEbvFc=
|
||||
github.com/VictoriaMetrics/metricsql v0.45.0 h1:kVQHnkDJm4qyJ8f5msTclmwqAtlUdPbbEJ7zoa/FTNs=
|
||||
github.com/VictoriaMetrics/metricsql v0.45.0/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
@@ -146,8 +128,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.53 h1:2MErE8gRyBLuE1fuH2Sqlj1xoN3S6/jXb0aO/A1jGfk=
|
||||
github.com/aws/aws-sdk-go v1.44.53/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.134 h1:TzFxjVHPPsibtkD7y6KHI4V00rEKg4yzNlMNGy2ZHeg=
|
||||
github.com/aws/aws-sdk-go v1.44.134/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
@@ -173,14 +155,6 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
@@ -221,12 +195,6 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
@@ -388,8 +356,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -405,7 +371,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
@@ -428,19 +393,15 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
|
||||
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
@@ -448,34 +409,20 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201117184057-ae444373da19/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
|
||||
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
|
||||
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
|
||||
github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk=
|
||||
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
|
||||
github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA=
|
||||
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
|
||||
github.com/gophercloud/gophercloud v0.14.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
@@ -530,8 +477,8 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
|
||||
github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
|
||||
github.com/influxdata/influxdb v1.9.8 h1:wuw8ZwyIZgg/jn//9cwr4OpKIF5z9o83lIfpb19aO2Q=
|
||||
github.com/influxdata/influxdb v1.9.8/go.mod h1:8Ft9mikW2GELpV154RV+F7ocPa5FS5G/rl4rH9INT/I=
|
||||
github.com/influxdata/influxdb v1.10.0 h1:8xDpt8KO3lzrzf/ss+l8r42AGUZvoITu5824berK7SE=
|
||||
github.com/influxdata/influxdb v1.10.0/go.mod h1:IVPuoA2pOOxau/NguX7ipW0Jp9Bn+dMWlo0+VOscevU=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
|
||||
github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
|
||||
@@ -573,8 +520,8 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
|
||||
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.8 h1:JahtItbkWjf2jzm/T+qgMxkP9EMHsqEUA6vCMGmXvhA=
|
||||
github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
@@ -609,24 +556,26 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
@@ -724,15 +673,16 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP
|
||||
github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
||||
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
@@ -743,8 +693,8 @@ github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16
|
||||
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.36.0 h1:78hJTing+BLYLjhXE+Z2BubeEymH5Lr0/Mt8FKkxxYo=
|
||||
github.com/prometheus/common v0.36.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
@@ -753,14 +703,16 @@ github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9 h1:F2A86PGVYqn3P7oWbrSmSlJHae9y6wwpAdoWb/pZi6Q=
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8=
|
||||
github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
@@ -806,14 +758,19 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
@@ -822,8 +779,8 @@ github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW
|
||||
github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli/v2 v2.11.0 h1:c6bD90aLd2iEsokxhxkY5Er0zA2V9fId2aJfwmrF+do=
|
||||
github.com/urfave/cli/v2 v2.11.0/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo=
|
||||
github.com/urfave/cli/v2 v2.23.5 h1:xbrU7tAYviSpqeR3X4nEFWUdB/uDZ6DE+HxmRU7Xtyw=
|
||||
github.com/urfave/cli/v2 v2.23.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||
@@ -831,8 +788,8 @@ github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4x
|
||||
github.com/valyala/fastjson v1.6.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
||||
github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8=
|
||||
github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
|
||||
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
|
||||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/valyala/gozstd v1.17.0 h1:M4Ds4MIrw+pD+s6vYtuFZ8D3iEw9htzfdytOV3C3iQU=
|
||||
github.com/valyala/gozstd v1.17.0/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
|
||||
github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ=
|
||||
@@ -854,6 +811,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
@@ -869,15 +827,13 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
@@ -905,6 +861,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -931,8 +888,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
@@ -941,9 +896,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -984,26 +938,17 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220708220712-1185a9018129 h1:vucSRfWwTsoXro7P+3Cjlr6flUMtzCwzlvkxEQtHHB0=
|
||||
golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1011,22 +956,10 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 h1:VnGaRqoLmqZH/3TMLJwYCEWkR4j1nuIU1U9TvbqsDUw=
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.2.0 h1:GtQkldQ9m7yvzCL1V+LrYow3Khe0eJH0w7RbX/VbaIU=
|
||||
golang.org/x/oauth2 v0.2.0/go.mod h1:Cwn6afJ8jrQwYMxQDTpISoXmXW9I6qF6vDeuuoX3Ibs=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -1039,8 +972,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -1100,57 +1034,36 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e h1:NHvCuwuS43lGnYhten69ZWqi2QOj/CiDNcKbVqwVoew=
|
||||
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1219,25 +1132,15 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201119054027-25dc3e1ccc3c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0=
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
|
||||
@@ -1262,32 +1165,8 @@ google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
||||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
|
||||
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
|
||||
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
|
||||
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
|
||||
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
|
||||
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
||||
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
|
||||
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
|
||||
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
|
||||
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
|
||||
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
|
||||
google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
|
||||
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
|
||||
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
|
||||
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
||||
google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
|
||||
google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/api v0.87.0 h1:pUQVF/F+X7Tl1lo4LJoJf5BOpjtmINU80p9XpYTU2p4=
|
||||
google.golang.org/api v0.87.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ=
|
||||
google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -1331,58 +1210,8 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
|
||||
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
|
||||
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d h1:YbuF5+kdiC516xIP60RvlHeFbY9sRDR73QsAGHpkeVw=
|
||||
google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66 h1:wx7sJ5GRBQLRcslTNcrTklsHhHevQvxgztW18txbbZM=
|
||||
google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
@@ -1403,25 +1232,8 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
|
||||
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY=
|
||||
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -1434,9 +1246,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -1466,6 +1277,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
@@ -43,8 +43,8 @@ type credentials struct {
|
||||
Expiration time.Time
|
||||
}
|
||||
|
||||
// NewConfig returns new AWS Config.
|
||||
func NewConfig(region, roleARN, accessKey, secretKey, service string) (*Config, error) {
|
||||
// NewConfig returns new AWS Config from the given args.
|
||||
func NewConfig(ec2Endpoint, stsEndpoint, region, roleARN, accessKey, secretKey, service string) (*Config, error) {
|
||||
cfg := &Config{
|
||||
client: http.DefaultClient,
|
||||
region: region,
|
||||
@@ -65,8 +65,8 @@ func NewConfig(region, roleARN, accessKey, secretKey, service string) (*Config,
|
||||
}
|
||||
cfg.region = r
|
||||
}
|
||||
cfg.ec2Endpoint = buildAPIEndpoint(cfg.ec2Endpoint, cfg.region, "ec2")
|
||||
cfg.stsEndpoint = buildAPIEndpoint(cfg.stsEndpoint, cfg.region, "sts")
|
||||
cfg.ec2Endpoint = buildAPIEndpoint(ec2Endpoint, cfg.region, "ec2")
|
||||
cfg.stsEndpoint = buildAPIEndpoint(stsEndpoint, cfg.region, "sts")
|
||||
if cfg.roleARN == "" {
|
||||
cfg.roleARN = os.Getenv("AWS_ROLE_ARN")
|
||||
}
|
||||
@@ -204,7 +204,11 @@ func (cfg *Config) getAPICredentials() (*credentials, error) {
|
||||
}
|
||||
if ecsMetaURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); len(ecsMetaURI) > 0 {
|
||||
path := "http://169.254.170.2" + ecsMetaURI
|
||||
return getECSRoleCredentialsByPath(cfg.client, path)
|
||||
ac, err := getECSRoleCredentialsByPath(cfg.client, path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain ECS role credentials: %w", err)
|
||||
}
|
||||
acNew = ac
|
||||
}
|
||||
|
||||
// we need instance credentials if dont have access keys
|
||||
|
||||
@@ -41,6 +41,9 @@ func signRequestWithTime(req *http.Request, service, region, payloadHash string,
|
||||
datestamp := t.Format("20060102")
|
||||
canonicalURL := uri.Path
|
||||
canonicalQS := uri.Query().Encode()
|
||||
// Replace "%20" with "+" according to AWS requirements.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3171
|
||||
canonicalQS = strings.ReplaceAll(canonicalQS, "+", "%20")
|
||||
|
||||
canonicalHeaders := fmt.Sprintf("host:%s\nx-amz-date:%s\n", uri.Host, amzdate)
|
||||
signedHeaders := "host;x-amz-date"
|
||||
|
||||
@@ -235,13 +235,27 @@ func handlerWrapper(s *server, w http.ResponseWriter, r *http.Request, rh Reques
|
||||
connTimeoutClosedConns.Inc()
|
||||
w.Header().Set("Connection", "close")
|
||||
}
|
||||
path, err := getCanonicalPath(r.URL.Path)
|
||||
if err != nil {
|
||||
Errorf(w, r, "cannot get canonical path: %s", err)
|
||||
unsupportedRequestErrors.Inc()
|
||||
return
|
||||
path := r.URL.Path
|
||||
prefix := GetPathPrefix()
|
||||
if prefix != "" {
|
||||
// Trim -http.pathPrefix from path
|
||||
prefixNoTrailingSlash := strings.TrimSuffix(prefix, "/")
|
||||
if path == prefixNoTrailingSlash {
|
||||
// Redirect to url with / at the end.
|
||||
// This is needed for proper handling of relative urls in web browsers.
|
||||
// Intentionally ignore query args, since it is expected that the requested url
|
||||
// is composed by a human, so it doesn't contain query args.
|
||||
RedirectPermanent(w, prefix)
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(path, prefix) {
|
||||
Errorf(w, r, "missing -http.pathPrefix=%q in the requested path %q", *pathPrefix, path)
|
||||
unsupportedRequestErrors.Inc()
|
||||
return
|
||||
}
|
||||
path = path[len(prefix)-1:]
|
||||
r.URL.Path = path
|
||||
}
|
||||
r.URL.Path = path
|
||||
switch r.URL.Path {
|
||||
case "/health":
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
@@ -327,24 +341,6 @@ func handlerWrapper(s *server, w http.ResponseWriter, r *http.Request, rh Reques
|
||||
}
|
||||
}
|
||||
|
||||
func getCanonicalPath(path string) (string, error) {
|
||||
if len(*pathPrefix) == 0 || path == "/" {
|
||||
return path, nil
|
||||
}
|
||||
if *pathPrefix == path {
|
||||
return "/", nil
|
||||
}
|
||||
prefix := *pathPrefix
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix = prefix + "/"
|
||||
}
|
||||
if !strings.HasPrefix(path, prefix) {
|
||||
return "", fmt.Errorf("missing `-pathPrefix=%q` in the requested path: %q", *pathPrefix, path)
|
||||
}
|
||||
path = path[len(prefix)-1:]
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func checkBasicAuth(w http.ResponseWriter, r *http.Request) bool {
|
||||
if len(*httpAuthUsername) == 0 {
|
||||
// HTTP Basic Auth is disabled.
|
||||
@@ -643,7 +639,17 @@ func IsTLS() bool {
|
||||
|
||||
// GetPathPrefix - returns http server path prefix.
|
||||
func GetPathPrefix() string {
|
||||
return *pathPrefix
|
||||
prefix := *pathPrefix
|
||||
if prefix == "" {
|
||||
return ""
|
||||
}
|
||||
if !strings.HasPrefix(prefix, "/") {
|
||||
prefix = "/" + prefix
|
||||
}
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
// WriteAPIHelp writes pathList to w in HTML format.
|
||||
@@ -671,3 +677,12 @@ func GetRequestURI(r *http.Request) string {
|
||||
}
|
||||
return requestURI + delimiter + queryArgs
|
||||
}
|
||||
|
||||
// RedirectPermanent redirects to the given url using 301 status code.
|
||||
func RedirectPermanent(w http.ResponseWriter, url string) {
|
||||
// Do not use http.Redirect, since it breaks relative redirects
|
||||
// if the http.Request.URL contains unexpected url.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2918
|
||||
w.Header().Set("Location", url)
|
||||
w.WriteHeader(http.StatusMovedPermanently)
|
||||
}
|
||||
|
||||
@@ -710,7 +710,7 @@ func (tb *Table) mergeRawItemsBlocks(ibs []*inmemoryBlock, isFinal bool) {
|
||||
atomic.AddUint64(&tb.assistedMerges, 1)
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) {
|
||||
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) || errors.Is(err, errReadOnlyMode) {
|
||||
return
|
||||
}
|
||||
logger.Panicf("FATAL: cannot merge small parts: %s", err)
|
||||
@@ -805,12 +805,14 @@ func (tb *Table) canBackgroundMerge() bool {
|
||||
return atomic.LoadUint32(tb.isReadOnly) == 0
|
||||
}
|
||||
|
||||
var errReadOnlyMode = fmt.Errorf("storage is in readonly mode")
|
||||
|
||||
func (tb *Table) mergeExistingParts(isFinal bool) error {
|
||||
if !tb.canBackgroundMerge() {
|
||||
// Do not perform background merge in read-only mode
|
||||
// in order to prevent from disk space shortage.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2603
|
||||
return nil
|
||||
return errReadOnlyMode
|
||||
}
|
||||
n := fs.MustGetFreeSpace(tb.path)
|
||||
// Divide free space by the max number of concurrent merges.
|
||||
@@ -849,7 +851,7 @@ func (tb *Table) partMerger() error {
|
||||
// The merger has been stopped.
|
||||
return nil
|
||||
}
|
||||
if !errors.Is(err, errNothingToMerge) {
|
||||
if !errors.Is(err, errNothingToMerge) && !errors.Is(err, errReadOnlyMode) {
|
||||
return err
|
||||
}
|
||||
if fasttime.UnixTimestamp()-lastMergeTime > 30 {
|
||||
|
||||
@@ -104,6 +104,11 @@ func stringValue(v interface{}) (string, error) {
|
||||
|
||||
// MarshalYAML marshals mlr to YAML.
|
||||
func (mlr *MultiLineRegex) MarshalYAML() (interface{}, error) {
|
||||
if strings.ContainsAny(mlr.S, "([") {
|
||||
// The mlr.S contains groups. Fall back to returning the regexp as is without splitting it into parts.
|
||||
// This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2928 .
|
||||
return mlr.S, nil
|
||||
}
|
||||
a := strings.Split(mlr.S, "|")
|
||||
if len(a) == 1 {
|
||||
return a[0], nil
|
||||
|
||||
@@ -7,6 +7,30 @@ import (
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func TestMultiLineRegexUnmarshalMarshal(t *testing.T) {
|
||||
f := func(data, resultExpected string) {
|
||||
t.Helper()
|
||||
var mlr MultiLineRegex
|
||||
if err := yaml.UnmarshalStrict([]byte(data), &mlr); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", data, err)
|
||||
}
|
||||
result, err := yaml.Marshal(&mlr)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot marshal %q: %s", data, err)
|
||||
}
|
||||
if string(result) != resultExpected {
|
||||
t.Fatalf("unexpected marshaled data; got\n%q\nwant\n%q", result, resultExpected)
|
||||
}
|
||||
}
|
||||
f(``, `""`+"\n")
|
||||
f(`foo`, "foo\n")
|
||||
f(`a|b||c`, "- a\n- b\n- \"\"\n- c\n")
|
||||
f(`(a|b)`, "(a|b)\n")
|
||||
f(`a|b[c|d]`, "a|b[c|d]\n")
|
||||
f("- a\n- b", "- a\n- b\n")
|
||||
f("- a\n- (b)", "a|(b)\n")
|
||||
}
|
||||
|
||||
func TestRelabelConfigMarshalUnmarshal(t *testing.T) {
|
||||
f := func(data, resultExpected string) {
|
||||
t.Helper()
|
||||
@@ -31,7 +55,7 @@ func TestRelabelConfigMarshalUnmarshal(t *testing.T) {
|
||||
- regex:
|
||||
- 'fo.+'
|
||||
- '.*ba[r-z]a'
|
||||
`, "- regex:\n - fo.+\n - .*ba[r-z]a\n")
|
||||
`, "- regex: fo.+|.*ba[r-z]a\n")
|
||||
f(`- regex: foo|bar`, "- regex:\n - foo\n - bar\n")
|
||||
f(`- regex: True`, `- regex: "true"`+"\n")
|
||||
f(`- regex: true`, `- regex: "true"`+"\n")
|
||||
|
||||
@@ -130,6 +130,10 @@ func (cfg *Config) mustRestart(prevCfg *Config) {
|
||||
prevScrapeCfgByName[scPrev.JobName] = scPrev
|
||||
}
|
||||
|
||||
// Restart all the scrape jobs on Global config change.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2884
|
||||
needGlobalRestart := !areEqualGlobalConfigs(&cfg.Global, &prevCfg.Global)
|
||||
|
||||
// Loop over the the new jobs, start new ones and restart updated ones.
|
||||
var started, stopped, restarted int
|
||||
currentJobNames := make(map[string]struct{}, len(cfg.ScrapeConfigs))
|
||||
@@ -142,7 +146,7 @@ func (cfg *Config) mustRestart(prevCfg *Config) {
|
||||
started++
|
||||
continue
|
||||
}
|
||||
if areEqualScrapeConfigs(scPrev, sc) {
|
||||
if !needGlobalRestart && areEqualScrapeConfigs(scPrev, sc) {
|
||||
// The scrape config didn't change, so no need to restart it.
|
||||
// Use the reference to the previous job, so it could be stopped properly later.
|
||||
cfg.ScrapeConfigs[i] = scPrev
|
||||
@@ -165,6 +169,12 @@ func (cfg *Config) mustRestart(prevCfg *Config) {
|
||||
logger.Infof("restarted service discovery routines in %.3f seconds, stopped=%d, started=%d, restarted=%d", time.Since(startTime).Seconds(), stopped, started, restarted)
|
||||
}
|
||||
|
||||
func areEqualGlobalConfigs(a, b *GlobalConfig) bool {
|
||||
sa := a.marshalJSON()
|
||||
sb := b.marshalJSON()
|
||||
return string(sa) == string(sb)
|
||||
}
|
||||
|
||||
func areEqualScrapeConfigs(a, b *ScrapeConfig) bool {
|
||||
sa := a.marshalJSON()
|
||||
sb := b.marshalJSON()
|
||||
@@ -183,6 +193,14 @@ func (sc *ScrapeConfig) marshalJSON() []byte {
|
||||
return data
|
||||
}
|
||||
|
||||
func (gc *GlobalConfig) marshalJSON() []byte {
|
||||
data, err := json.Marshal(gc)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: cannot marshal GlobalConfig: %s", err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func (cfg *Config) mustStop() {
|
||||
startTime := time.Now()
|
||||
logger.Infof("stopping service discovery routines...")
|
||||
|
||||
@@ -27,7 +27,7 @@ type SDConfig struct {
|
||||
Tags []string `yaml:"tags,omitempty"`
|
||||
NodeMeta map[string]string `yaml:"node_meta,omitempty"`
|
||||
TagSeparator *string `yaml:"tag_separator,omitempty"`
|
||||
AllowStale bool `yaml:"allow_stale,omitempty"`
|
||||
AllowStale *bool `yaml:"allow_stale,omitempty"`
|
||||
// RefreshInterval time.Duration `yaml:"refresh_interval"`
|
||||
// refresh_interval is obtained from `-promscrape.consulSDCheckInterval` command-line option.
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ type serviceWatcher struct {
|
||||
// newConsulWatcher creates new watcher and starts background service discovery for Consul.
|
||||
func newConsulWatcher(client *discoveryutils.Client, sdc *SDConfig, datacenter, namespace string) *consulWatcher {
|
||||
baseQueryArgs := "?dc=" + url.QueryEscape(datacenter)
|
||||
if sdc.AllowStale {
|
||||
if sdc.AllowStale == nil || *sdc.AllowStale {
|
||||
baseQueryArgs += "&stale"
|
||||
}
|
||||
if namespace != "" {
|
||||
|
||||
@@ -15,7 +15,6 @@ type task struct {
|
||||
ID string
|
||||
ServiceID string
|
||||
NodeID string
|
||||
Labels map[string]string
|
||||
DesiredState string
|
||||
NetworksAttachments []struct {
|
||||
Addresses []string
|
||||
@@ -32,6 +31,11 @@ type task struct {
|
||||
Ports []portConfig
|
||||
}
|
||||
}
|
||||
Spec struct {
|
||||
ContainerSpec struct {
|
||||
Labels map[string]string
|
||||
}
|
||||
}
|
||||
Slot int
|
||||
}
|
||||
|
||||
@@ -82,8 +86,8 @@ func addTasksLabels(tasks []task, nodesLabels, servicesLabels []map[string]strin
|
||||
"__meta_dockerswarm_task_slot": strconv.Itoa(task.Slot),
|
||||
"__meta_dockerswarm_task_state": task.Status.State,
|
||||
}
|
||||
for k, v := range task.Labels {
|
||||
commonLabels["__meta_dockerswarm_task_label_"+discoveryutils.SanitizeLabelName(k)] = v
|
||||
for k, v := range task.Spec.ContainerSpec.Labels {
|
||||
commonLabels["__meta_dockerswarm_container_label_"+discoveryutils.SanitizeLabelName(k)] = v
|
||||
}
|
||||
var svcPorts []portConfig
|
||||
for i, v := range services {
|
||||
|
||||
@@ -27,13 +27,13 @@ func Test_parseTasks(t *testing.T) {
|
||||
"Version": {
|
||||
"Index": 23
|
||||
},
|
||||
"Labels": {
|
||||
"label1": "value1"
|
||||
},
|
||||
"Spec": {
|
||||
"ContainerSpec": {
|
||||
"Image": "redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842",
|
||||
"Init": false
|
||||
"Init": false,
|
||||
"Labels": {
|
||||
"label1": "value1"
|
||||
}
|
||||
},
|
||||
"Resources": {
|
||||
"Limits": {},
|
||||
@@ -70,8 +70,18 @@ func Test_parseTasks(t *testing.T) {
|
||||
ID: "t4rdm7j2y9yctbrksiwvsgpu5",
|
||||
ServiceID: "t91nf284wzle1ya09lqvyjgnq",
|
||||
NodeID: "qauwmifceyvqs0sipvzu8oslu",
|
||||
Labels: map[string]string{
|
||||
"label1": "value1",
|
||||
Spec: struct {
|
||||
ContainerSpec struct {
|
||||
Labels map[string]string
|
||||
}
|
||||
}{
|
||||
ContainerSpec: struct {
|
||||
Labels map[string]string
|
||||
}{
|
||||
Labels: map[string]string{
|
||||
"label1": "value1",
|
||||
},
|
||||
},
|
||||
},
|
||||
DesiredState: "running",
|
||||
Slot: 1,
|
||||
@@ -97,7 +107,7 @@ func Test_parseTasks(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("parseTasks() got = %v, want %v", got, tt.want)
|
||||
t.Errorf("parseTasks() got\n%v\nwant\n%v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -126,7 +136,6 @@ func Test_addTasksLabels(t *testing.T) {
|
||||
ID: "t4rdm7j2y9yctbrksiwvsgpu5",
|
||||
ServiceID: "t91nf284wzle1ya09lqvyjgnq",
|
||||
NodeID: "qauwmifceyvqs0sipvzu8oslu",
|
||||
Labels: map[string]string{},
|
||||
DesiredState: "running",
|
||||
Slot: 1,
|
||||
Status: struct {
|
||||
@@ -194,7 +203,6 @@ func Test_addTasksLabels(t *testing.T) {
|
||||
ID: "t4rdm7j2y9yctbrksiwvsgpu5",
|
||||
ServiceID: "tgsci5gd31aai3jyudv98pqxf",
|
||||
NodeID: "qauwmifceyvqs0sipvzu8oslu",
|
||||
Labels: map[string]string{},
|
||||
DesiredState: "running",
|
||||
Slot: 1,
|
||||
NetworksAttachments: []struct {
|
||||
|
||||
@@ -33,7 +33,11 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
|
||||
if sdc.Port != nil {
|
||||
port = *sdc.Port
|
||||
}
|
||||
awsCfg, err := awsapi.NewConfig(sdc.Region, sdc.RoleARN, sdc.AccessKey, sdc.SecretKey.String(), "ec2")
|
||||
stsEndpoint := sdc.STSEndpoint
|
||||
if stsEndpoint == "" {
|
||||
stsEndpoint = sdc.Endpoint
|
||||
}
|
||||
awsCfg, err := awsapi.NewConfig(sdc.Endpoint, stsEndpoint, sdc.Region, sdc.RoleARN, sdc.AccessKey, sdc.SecretKey.String(), "ec2")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -18,12 +18,13 @@ var SDCheckInterval = flag.Duration("promscrape.ec2SDCheckInterval", time.Minute
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config
|
||||
type SDConfig struct {
|
||||
Region string `yaml:"region,omitempty"`
|
||||
Endpoint string `yaml:"endpoint,omitempty"`
|
||||
AccessKey string `yaml:"access_key,omitempty"`
|
||||
SecretKey *promauth.Secret `yaml:"secret_key,omitempty"`
|
||||
Region string `yaml:"region,omitempty"`
|
||||
Endpoint string `yaml:"endpoint,omitempty"`
|
||||
STSEndpoint string `yaml:"sts_endpoint,omitempty"`
|
||||
AccessKey string `yaml:"access_key,omitempty"`
|
||||
SecretKey *promauth.Secret `yaml:"secret_key,omitempty"`
|
||||
// TODO add support for Profile, not working atm
|
||||
Profile string `yaml:"profile,omitempty"`
|
||||
// Profile string `yaml:"profile,omitempty"`
|
||||
RoleARN string `yaml:"role_arn,omitempty"`
|
||||
// RefreshInterval time.Duration `yaml:"refresh_interval"`
|
||||
// refresh_interval is obtained from `-promscrape.ec2SDCheckInterval` command-line option.
|
||||
|
||||
@@ -52,8 +52,9 @@ type Ingress struct {
|
||||
//
|
||||
// See https://v1-21.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#ingressspec-v1-networking-k8s-io
|
||||
type IngressSpec struct {
|
||||
TLS []IngressTLS `json:"tls"`
|
||||
Rules []IngressRule
|
||||
TLS []IngressTLS `json:"tls"`
|
||||
Rules []IngressRule
|
||||
IngressClassName string
|
||||
}
|
||||
|
||||
// IngressTLS represents ingress TLS spec in k8s.
|
||||
@@ -130,12 +131,13 @@ func matchesHostPattern(pattern, host string) bool {
|
||||
|
||||
func getLabelsForIngressPath(ig *Ingress, scheme, host, path string) map[string]string {
|
||||
m := map[string]string{
|
||||
"__address__": host,
|
||||
"__meta_kubernetes_namespace": ig.Metadata.Namespace,
|
||||
"__meta_kubernetes_ingress_name": ig.Metadata.Name,
|
||||
"__meta_kubernetes_ingress_scheme": scheme,
|
||||
"__meta_kubernetes_ingress_host": host,
|
||||
"__meta_kubernetes_ingress_path": path,
|
||||
"__address__": host,
|
||||
"__meta_kubernetes_namespace": ig.Metadata.Namespace,
|
||||
"__meta_kubernetes_ingress_name": ig.Metadata.Name,
|
||||
"__meta_kubernetes_ingress_scheme": scheme,
|
||||
"__meta_kubernetes_ingress_host": host,
|
||||
"__meta_kubernetes_ingress_path": path,
|
||||
"__meta_kubernetes_ingress_class_name": ig.Spec.IngressClassName,
|
||||
}
|
||||
ig.Metadata.registerLabelsAndAnnotations("__meta_kubernetes_ingress", m)
|
||||
return m
|
||||
|
||||
@@ -78,7 +78,8 @@ func TestParseIngressListSuccess(t *testing.T) {
|
||||
{
|
||||
"host": "foobar"
|
||||
}
|
||||
]
|
||||
],
|
||||
"ingressClassName": "foo-class"
|
||||
},
|
||||
"status": {
|
||||
"loadBalancer": {
|
||||
@@ -107,11 +108,12 @@ func TestParseIngressListSuccess(t *testing.T) {
|
||||
"__address__": "foobar",
|
||||
"__meta_kubernetes_ingress_annotation_kubectl_kubernetes_io_last_applied_configuration": `{"apiVersion":"networking.k8s.io/v1","kind":"Ingress","metadata":{"annotations":{},"name":"test-ingress","namespace":"default"},"spec":{"backend":{"serviceName":"testsvc","servicePort":80}}}` + "\n",
|
||||
"__meta_kubernetes_ingress_annotationpresent_kubectl_kubernetes_io_last_applied_configuration": "true",
|
||||
"__meta_kubernetes_ingress_host": "foobar",
|
||||
"__meta_kubernetes_ingress_name": "test-ingress",
|
||||
"__meta_kubernetes_ingress_path": "/",
|
||||
"__meta_kubernetes_ingress_scheme": "http",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_ingress_host": "foobar",
|
||||
"__meta_kubernetes_ingress_name": "test-ingress",
|
||||
"__meta_kubernetes_ingress_path": "/",
|
||||
"__meta_kubernetes_ingress_scheme": "http",
|
||||
"__meta_kubernetes_ingress_class_name": "foo-class",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
}),
|
||||
}
|
||||
if !areEqualLabelss(sortedLabelss, expectedLabelss) {
|
||||
|
||||
@@ -585,9 +585,9 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
|
||||
scrapeResponseSize.Update(float64(sbr.bodyLen))
|
||||
up := 1
|
||||
if err != nil {
|
||||
if samplesScraped == 0 {
|
||||
up = 0
|
||||
}
|
||||
// Mark the scrape as failed even if it already read and pushed some samples
|
||||
// to remote storage. This makes the logic compatible with Prometheus.
|
||||
up = 0
|
||||
scrapesFailed.Inc()
|
||||
}
|
||||
seriesAdded := 0
|
||||
|
||||
@@ -82,17 +82,26 @@ func (r *Row) unmarshal(s string, tagsPool []Tag) ([]Tag, error) {
|
||||
}
|
||||
r.Timestamp = int64(timestamp)
|
||||
tail = trimLeadingSpaces(tail[n+1:])
|
||||
valueStr := ""
|
||||
tagsStr := ""
|
||||
n = strings.IndexByte(tail, ' ')
|
||||
if n < 0 {
|
||||
return tagsPool, fmt.Errorf("cannot find whitespace between value and the first tag in %q", s)
|
||||
// Missing tags.
|
||||
// Accept this case even if OpenTSDB forbids it according to http://opentsdb.net/docs/build/html/api_telnet/put.html:
|
||||
// > At least one tag pair must be present.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3290
|
||||
valueStr = tail
|
||||
} else {
|
||||
valueStr = tail[:n]
|
||||
tagsStr = tail[n+1:]
|
||||
}
|
||||
v, err := fastfloat.Parse(tail[:n])
|
||||
v, err := fastfloat.Parse(valueStr)
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("cannot parse value from %q: %w", tail[:n], err)
|
||||
return tagsPool, fmt.Errorf("cannot parse value from %q: %w", valueStr, err)
|
||||
}
|
||||
r.Value = v
|
||||
tagsStart := len(tagsPool)
|
||||
tagsPool, err = unmarshalTags(tagsPool, tail[n+1:])
|
||||
tagsPool, err = unmarshalTags(tagsPool, tagsStr)
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("cannot unmarshal tags in %q: %w", s, err)
|
||||
}
|
||||
|
||||
@@ -37,9 +37,6 @@ func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
f("put aaa timestamp")
|
||||
f("put foobar 3df4 -123456 a=b")
|
||||
|
||||
// Missing first tag
|
||||
f("put aaa 123 43")
|
||||
|
||||
// Invalid value
|
||||
f("put aaa 123 invalid-value")
|
||||
f("put foobar 789 -123foo456 a=b")
|
||||
@@ -104,6 +101,23 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
},
|
||||
}},
|
||||
})
|
||||
// Missing first tag
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3290
|
||||
f("put aaa 123 43", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "aaa",
|
||||
Value: 43,
|
||||
Timestamp: 123,
|
||||
}},
|
||||
})
|
||||
f("put aaa 123 43 ", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "aaa",
|
||||
Value: 43,
|
||||
Timestamp: 123,
|
||||
}},
|
||||
})
|
||||
|
||||
// Fractional timestamp that is supported by Akumuli.
|
||||
f("put foobar 789.4 -123.456 a=b", &Rows{
|
||||
Rows: []Row{{
|
||||
|
||||
@@ -815,9 +815,13 @@ func (is *indexSearch) searchLabelNamesWithFiltersOnDate(qt *querytracer.Tracer,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if filter != nil && filter.Len() == 0 {
|
||||
qt.Printf("found zero label names for filter=%s", tfss)
|
||||
return nil
|
||||
if filter != nil && filter.Len() <= 100e3 {
|
||||
// It is faster to obtain label names by metricIDs from the filter
|
||||
// instead of scanning the inverted index for the matching filters.
|
||||
// This would help https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2978
|
||||
metricIDs := filter.AppendTo(nil)
|
||||
qt.Printf("sort %d metricIDs", len(metricIDs))
|
||||
return is.getLabelNamesForMetricIDs(qt, metricIDs, lns, maxLabelNames)
|
||||
}
|
||||
var prevLabelName []byte
|
||||
ts := &is.ts
|
||||
@@ -877,6 +881,41 @@ func (is *indexSearch) searchLabelNamesWithFiltersOnDate(qt *querytracer.Tracer,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is *indexSearch) getLabelNamesForMetricIDs(qt *querytracer.Tracer, metricIDs []uint64, lns map[string]struct{}, maxLabelNames int) error {
|
||||
lns["__name__"] = struct{}{}
|
||||
var mn MetricName
|
||||
foundLabelNames := 0
|
||||
var buf []byte
|
||||
for _, metricID := range metricIDs {
|
||||
var err error
|
||||
buf, err = is.searchMetricNameWithCache(buf[:0], metricID)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
// It is likely the metricID->metricName entry didn't propagate to inverted index yet.
|
||||
// Skip this metricID for now.
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("cannot find metricName by metricID %d: %w", metricID, err)
|
||||
}
|
||||
if err := mn.Unmarshal(buf); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal metricName %q: %w", buf, err)
|
||||
}
|
||||
for _, tag := range mn.Tags {
|
||||
_, ok := lns[string(tag.Key)]
|
||||
if !ok {
|
||||
foundLabelNames++
|
||||
lns[string(tag.Key)] = struct{}{}
|
||||
if len(lns) >= maxLabelNames {
|
||||
qt.Printf("hit the limit on the number of unique label names: %d", maxLabelNames)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
qt.Printf("get %d distinct label names from %d metricIDs", foundLabelNames, len(metricIDs))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SearchLabelValuesWithFiltersOnTimeRange returns label values for the given labelName, tfss and tr.
|
||||
func (db *indexDB) SearchLabelValuesWithFiltersOnTimeRange(qt *querytracer.Tracer, labelName string, tfss []*TagFilters, tr TimeRange,
|
||||
maxLabelValues, maxMetrics int, deadline uint64) ([]string, error) {
|
||||
@@ -972,9 +1011,13 @@ func (is *indexSearch) searchLabelValuesWithFiltersOnDate(qt *querytracer.Tracer
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if filter != nil && filter.Len() == 0 {
|
||||
qt.Printf("found zero label values for filter=%s", tfss)
|
||||
return nil
|
||||
if filter != nil && filter.Len() < 100e3 {
|
||||
// It is faster to obtain label values by metricIDs from the filter
|
||||
// instead of scanning the inverted index for the matching filters.
|
||||
// This would help https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2978
|
||||
metricIDs := filter.AppendTo(nil)
|
||||
qt.Printf("sort %d metricIDs", len(metricIDs))
|
||||
return is.getLabelValuesForMetricIDs(qt, lvs, labelName, metricIDs, maxLabelValues)
|
||||
}
|
||||
if labelName == "__name__" {
|
||||
// __name__ label is encoded as empty string in indexdb.
|
||||
@@ -1033,6 +1076,42 @@ func (is *indexSearch) searchLabelValuesWithFiltersOnDate(qt *querytracer.Tracer
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is *indexSearch) getLabelValuesForMetricIDs(qt *querytracer.Tracer, lvs map[string]struct{}, labelName string, metricIDs []uint64, maxLabelValues int) error {
|
||||
if labelName == "" {
|
||||
labelName = "__name__"
|
||||
}
|
||||
var mn MetricName
|
||||
foundLabelValues := 0
|
||||
var buf []byte
|
||||
for _, metricID := range metricIDs {
|
||||
var err error
|
||||
buf, err = is.searchMetricNameWithCache(buf[:0], metricID)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
// It is likely the metricID->metricName entry didn't propagate to inverted index yet.
|
||||
// Skip this metricID for now.
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("cannot find metricName by metricID %d: %w", metricID, err)
|
||||
}
|
||||
if err := mn.Unmarshal(buf); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal metricName %q: %w", buf, err)
|
||||
}
|
||||
tagValue := mn.GetTagValue(labelName)
|
||||
_, ok := lvs[string(tagValue)]
|
||||
if !ok {
|
||||
foundLabelValues++
|
||||
lvs[string(tagValue)] = struct{}{}
|
||||
if len(lvs) >= maxLabelValues {
|
||||
qt.Printf("hit the limit on the number of unique label values for label %q: %d", labelName, maxLabelValues)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
qt.Printf("get %d distinct values for label %q from %d metricIDs", foundLabelValues, labelName, len(metricIDs))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SearchTagValueSuffixes returns all the tag value suffixes for the given tagKey and tagValuePrefix on the given tr.
|
||||
//
|
||||
// This allows implementing https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find or similar APIs.
|
||||
|
||||
@@ -569,7 +569,7 @@ func (pt *partition) addRowsPart(rows []rawRow) {
|
||||
atomic.AddUint64(&pt.smallAssistedMerges, 1)
|
||||
return
|
||||
}
|
||||
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) {
|
||||
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) || errors.Is(err, errReadOnlyMode) {
|
||||
return
|
||||
}
|
||||
logger.Panicf("FATAL: cannot merge small parts: %s", err)
|
||||
@@ -871,7 +871,7 @@ func hasActiveMerges(pws []*partWrapper) bool {
|
||||
|
||||
var (
|
||||
bigMergeWorkersCount = getDefaultMergeConcurrency(4)
|
||||
smallMergeWorkersCount = getDefaultMergeConcurrency(8)
|
||||
smallMergeWorkersCount = getDefaultMergeConcurrency(16)
|
||||
)
|
||||
|
||||
func getDefaultMergeConcurrency(max int) int {
|
||||
@@ -956,7 +956,7 @@ func (pt *partition) partsMerger(mergerFunc func(isFinal bool) error) error {
|
||||
// The merger has been stopped.
|
||||
return nil
|
||||
}
|
||||
if !errors.Is(err, errNothingToMerge) {
|
||||
if !errors.Is(err, errNothingToMerge) && !errors.Is(err, errReadOnlyMode) {
|
||||
return err
|
||||
}
|
||||
if finalMergeDelaySeconds > 0 && fasttime.UnixTimestamp()-lastMergeTime > finalMergeDelaySeconds {
|
||||
@@ -1012,11 +1012,13 @@ func (pt *partition) canBackgroundMerge() bool {
|
||||
return atomic.LoadUint32(pt.isReadOnly) == 0
|
||||
}
|
||||
|
||||
var errReadOnlyMode = fmt.Errorf("storage is in readonly mode")
|
||||
|
||||
func (pt *partition) mergeBigParts(isFinal bool) error {
|
||||
if !pt.canBackgroundMerge() {
|
||||
// Do not perform merge in read-only mode, since this may result in disk space shortage.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2603
|
||||
return nil
|
||||
return errReadOnlyMode
|
||||
}
|
||||
maxOutBytes := getMaxOutBytes(pt.bigPartsPath, bigMergeWorkersCount)
|
||||
|
||||
@@ -1032,7 +1034,7 @@ func (pt *partition) mergeSmallParts(isFinal bool) error {
|
||||
if !pt.canBackgroundMerge() {
|
||||
// Do not perform merge in read-only mode, since this may result in disk space shortage.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2603
|
||||
return nil
|
||||
return errReadOnlyMode
|
||||
}
|
||||
// Try merging small parts to a big part at first.
|
||||
maxBigPartOutBytes := getMaxOutBytes(pt.bigPartsPath, bigMergeWorkersCount)
|
||||
|
||||
@@ -699,10 +699,12 @@ func (s *Storage) currHourMetricIDsUpdater() {
|
||||
for {
|
||||
select {
|
||||
case <-s.stop:
|
||||
s.updateCurrHourMetricIDs()
|
||||
hour := fasttime.UnixHour()
|
||||
s.updateCurrHourMetricIDs(hour)
|
||||
return
|
||||
case <-ticker.C:
|
||||
s.updateCurrHourMetricIDs()
|
||||
hour := fasttime.UnixHour()
|
||||
s.updateCurrHourMetricIDs(hour)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -715,10 +717,12 @@ func (s *Storage) nextDayMetricIDsUpdater() {
|
||||
for {
|
||||
select {
|
||||
case <-s.stop:
|
||||
s.updateNextDayMetricIDs()
|
||||
date := fasttime.UnixDate()
|
||||
s.updateNextDayMetricIDs(date)
|
||||
return
|
||||
case <-ticker.C:
|
||||
s.updateNextDayMetricIDs()
|
||||
date := fasttime.UnixDate()
|
||||
s.updateNextDayMetricIDs(date)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2351,8 +2355,7 @@ type byDateMetricIDEntry struct {
|
||||
v uint64set.Set
|
||||
}
|
||||
|
||||
func (s *Storage) updateNextDayMetricIDs() {
|
||||
date := fasttime.UnixDate()
|
||||
func (s *Storage) updateNextDayMetricIDs(date uint64) {
|
||||
e := s.nextDayMetricIDs.Load().(*byDateMetricIDEntry)
|
||||
s.pendingNextDayMetricIDsLock.Lock()
|
||||
pendingMetricIDs := s.pendingNextDayMetricIDs
|
||||
@@ -2366,6 +2369,11 @@ func (s *Storage) updateNextDayMetricIDs() {
|
||||
// Slow path: union pendingMetricIDs with e.v
|
||||
if e.date == date {
|
||||
pendingMetricIDs.Union(&e.v)
|
||||
} else {
|
||||
// Do not add pendingMetricIDs from the previous day to the cyrrent day,
|
||||
// since this may result in missing registration of the metricIDs in the per-day inverted index.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3309
|
||||
pendingMetricIDs = &uint64set.Set{}
|
||||
}
|
||||
eNew := &byDateMetricIDEntry{
|
||||
date: date,
|
||||
@@ -2374,14 +2382,13 @@ func (s *Storage) updateNextDayMetricIDs() {
|
||||
s.nextDayMetricIDs.Store(eNew)
|
||||
}
|
||||
|
||||
func (s *Storage) updateCurrHourMetricIDs() {
|
||||
func (s *Storage) updateCurrHourMetricIDs(hour uint64) {
|
||||
hm := s.currHourMetricIDs.Load().(*hourMetricIDs)
|
||||
s.pendingHourEntriesLock.Lock()
|
||||
newMetricIDs := s.pendingHourEntries
|
||||
s.pendingHourEntries = &uint64set.Set{}
|
||||
s.pendingHourEntriesLock.Unlock()
|
||||
|
||||
hour := fasttime.UnixHour()
|
||||
if newMetricIDs.Len() == 0 && hm.hour == hour {
|
||||
// Fast path: nothing to update.
|
||||
return
|
||||
@@ -2396,7 +2403,12 @@ func (s *Storage) updateCurrHourMetricIDs() {
|
||||
m = &uint64set.Set{}
|
||||
isFull = true
|
||||
}
|
||||
m.Union(newMetricIDs)
|
||||
if hour%24 != 0 {
|
||||
// Do not add pending metricIDs from the previous hour to the current hour on the next day,
|
||||
// since this may result in missing registration of the metricIDs in the per-day inverted index.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3309
|
||||
m.Union(newMetricIDs)
|
||||
}
|
||||
hmNew := &hourMetricIDs{
|
||||
m: m,
|
||||
hour: hour,
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"testing/quick"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
|
||||
)
|
||||
|
||||
@@ -155,7 +156,7 @@ func TestUpdateCurrHourMetricIDs(t *testing.T) {
|
||||
}
|
||||
t.Run("empty_pending_metric_ids_stale_curr_hour", func(t *testing.T) {
|
||||
s := newStorage()
|
||||
hour := uint64(timestampFromTime(time.Now())) / msecPerHour
|
||||
hour := fasttime.UnixHour()
|
||||
hmOrig := &hourMetricIDs{
|
||||
m: &uint64set.Set{},
|
||||
hour: 123,
|
||||
@@ -163,7 +164,7 @@ func TestUpdateCurrHourMetricIDs(t *testing.T) {
|
||||
hmOrig.m.Add(12)
|
||||
hmOrig.m.Add(34)
|
||||
s.currHourMetricIDs.Store(hmOrig)
|
||||
s.updateCurrHourMetricIDs()
|
||||
s.updateCurrHourMetricIDs(hour)
|
||||
hmCurr := s.currHourMetricIDs.Load().(*hourMetricIDs)
|
||||
if hmCurr.hour != hour {
|
||||
// It is possible new hour occurred. Update the hour and verify it again.
|
||||
@@ -190,7 +191,7 @@ func TestUpdateCurrHourMetricIDs(t *testing.T) {
|
||||
})
|
||||
t.Run("empty_pending_metric_ids_valid_curr_hour", func(t *testing.T) {
|
||||
s := newStorage()
|
||||
hour := uint64(timestampFromTime(time.Now())) / msecPerHour
|
||||
hour := fasttime.UnixHour()
|
||||
hmOrig := &hourMetricIDs{
|
||||
m: &uint64set.Set{},
|
||||
hour: hour,
|
||||
@@ -198,7 +199,7 @@ func TestUpdateCurrHourMetricIDs(t *testing.T) {
|
||||
hmOrig.m.Add(12)
|
||||
hmOrig.m.Add(34)
|
||||
s.currHourMetricIDs.Store(hmOrig)
|
||||
s.updateCurrHourMetricIDs()
|
||||
s.updateCurrHourMetricIDs(hour)
|
||||
hmCurr := s.currHourMetricIDs.Load().(*hourMetricIDs)
|
||||
if hmCurr.hour != hour {
|
||||
// It is possible new hour occurred. Update the hour and verify it again.
|
||||
@@ -234,7 +235,7 @@ func TestUpdateCurrHourMetricIDs(t *testing.T) {
|
||||
pendingHourEntries.Add(8293432)
|
||||
s.pendingHourEntries = pendingHourEntries
|
||||
|
||||
hour := uint64(timestampFromTime(time.Now())) / msecPerHour
|
||||
hour := fasttime.UnixHour()
|
||||
hmOrig := &hourMetricIDs{
|
||||
m: &uint64set.Set{},
|
||||
hour: 123,
|
||||
@@ -242,7 +243,7 @@ func TestUpdateCurrHourMetricIDs(t *testing.T) {
|
||||
hmOrig.m.Add(12)
|
||||
hmOrig.m.Add(34)
|
||||
s.currHourMetricIDs.Store(hmOrig)
|
||||
s.updateCurrHourMetricIDs()
|
||||
s.updateCurrHourMetricIDs(hour)
|
||||
hmCurr := s.currHourMetricIDs.Load().(*hourMetricIDs)
|
||||
if hmCurr.hour != hour {
|
||||
// It is possible new hour occurred. Update the hour and verify it again.
|
||||
@@ -275,7 +276,7 @@ func TestUpdateCurrHourMetricIDs(t *testing.T) {
|
||||
pendingHourEntries.Add(8293432)
|
||||
s.pendingHourEntries = pendingHourEntries
|
||||
|
||||
hour := uint64(timestampFromTime(time.Now())) / msecPerHour
|
||||
hour := fasttime.UnixHour()
|
||||
hmOrig := &hourMetricIDs{
|
||||
m: &uint64set.Set{},
|
||||
hour: hour,
|
||||
@@ -283,7 +284,7 @@ func TestUpdateCurrHourMetricIDs(t *testing.T) {
|
||||
hmOrig.m.Add(12)
|
||||
hmOrig.m.Add(34)
|
||||
s.currHourMetricIDs.Store(hmOrig)
|
||||
s.updateCurrHourMetricIDs()
|
||||
s.updateCurrHourMetricIDs(hour)
|
||||
hmCurr := s.currHourMetricIDs.Load().(*hourMetricIDs)
|
||||
if hmCurr.hour != hour {
|
||||
// It is possible new hour occurred. Update the hour and verify it again.
|
||||
@@ -318,6 +319,39 @@ func TestUpdateCurrHourMetricIDs(t *testing.T) {
|
||||
t.Fatalf("unexpected s.pendingHourEntries.Len(); got %d; want %d", s.pendingHourEntries.Len(), 0)
|
||||
}
|
||||
})
|
||||
t.Run("nonempty_pending_metric_ids_from_previous_hour_new_day", func(t *testing.T) {
|
||||
s := newStorage()
|
||||
|
||||
hour := fasttime.UnixHour()
|
||||
hour -= hour % 24
|
||||
|
||||
pendingHourEntries := &uint64set.Set{}
|
||||
pendingHourEntries.Add(343)
|
||||
pendingHourEntries.Add(32424)
|
||||
pendingHourEntries.Add(8293432)
|
||||
s.pendingHourEntries = pendingHourEntries
|
||||
|
||||
hmOrig := &hourMetricIDs{
|
||||
m: &uint64set.Set{},
|
||||
hour: hour - 1,
|
||||
}
|
||||
s.currHourMetricIDs.Store(hmOrig)
|
||||
s.updateCurrHourMetricIDs(hour)
|
||||
hmCurr := s.currHourMetricIDs.Load().(*hourMetricIDs)
|
||||
if hmCurr.hour != hour {
|
||||
t.Fatalf("unexpected hmCurr.hour; got %d; want %d", hmCurr.hour, hour)
|
||||
}
|
||||
if hmCurr.m.Len() != 0 {
|
||||
t.Fatalf("unexpected non-empty hmCurr.m; got %v", hmCurr.m.AppendTo(nil))
|
||||
}
|
||||
hmPrev := s.prevHourMetricIDs.Load().(*hourMetricIDs)
|
||||
if !reflect.DeepEqual(hmPrev, hmOrig) {
|
||||
t.Fatalf("unexpected hmPrev; got %v; want %v", hmPrev, hmOrig)
|
||||
}
|
||||
if s.pendingHourEntries.Len() != 0 {
|
||||
t.Fatalf("unexpected s.pendingHourEntries.Len(); got %d; want %d", s.pendingHourEntries.Len(), 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMetricRowMarshalUnmarshal(t *testing.T) {
|
||||
|
||||
71
package/release/Makefile
Normal file
71
package/release/Makefile
Normal file
@@ -0,0 +1,71 @@
|
||||
GITHUB_RELEASE_SPEC_FILE="/tmp/vm-github-release"
|
||||
GITHUB_DEBUG_FILE="/tmp/vm-github-debug"
|
||||
|
||||
github-token-check:
|
||||
ifndef GITHUB_TOKEN
|
||||
$(error missing GITHUB_TOKEN env var. It must contain github token for VictoriaMetrics project obtained from https://github.com/settings/tokens)
|
||||
endif
|
||||
|
||||
github-tag-check:
|
||||
ifndef TAG
|
||||
$(error missing TAG env var. It must contain github release tag to create)
|
||||
endif
|
||||
|
||||
github-create-release: github-token-check github-tag-check
|
||||
@result=$$(curl -o $(GITHUB_RELEASE_SPEC_FILE) -s -w "%{http_code}" \
|
||||
-X POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: token $(GITHUB_TOKEN)" \
|
||||
https://api.github.com/repos/VictoriaMetrics/VictoriaMetrics/releases \
|
||||
-d '{"tag_name":"$(TAG)","name":"$(TAG)","body":"TODO: put here the changelog for $(TAG) release from docs/CHANGELOG.md","draft":true,"prerelease":false,"generate_release_notes":false}'); \
|
||||
if [ $${result} = 201 ]; then \
|
||||
release_id=$$(cat $(GITHUB_RELEASE_SPEC_FILE) | grep '"id"' -m 1 | sed -E 's/.* ([[:digit:]]+)\,/\1/'); \
|
||||
printf "Created release $(TAG) with id=$${release_id}\n"; \
|
||||
else \
|
||||
printf "Failed to create release $(TAG)\n"; \
|
||||
cat $(GITHUB_RELEASE_SPEC_FILE); \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
github-upload-assets:
|
||||
@release_id=$$(cat $(GITHUB_RELEASE_SPEC_FILE) | grep '"id"' -m 1 | sed -E 's/.* ([[:digit:]]+)\,/\1/'); \
|
||||
$(foreach file, $(wildcard bin/*.zip), FILE=$(file) RELEASE_ID=$${release_id} CONTENT_TYPE="application/zip" $(MAKE) github-upload-asset || exit 1;) \
|
||||
$(foreach file, $(wildcard bin/*.tar.gz), FILE=$(file) RELEASE_ID=$${release_id} CONTENT_TYPE="application/x-gzip" $(MAKE) github-upload-asset || exit 1;) \
|
||||
$(foreach file, $(wildcard bin/*_checksums.txt), FILE=$(file) RELEASE_ID=$${release_id} CONTENT_TYPE="text/plain" $(MAKE) github-upload-asset || exit 1;)
|
||||
|
||||
github-upload-asset: github-token-check
|
||||
ifndef FILE
|
||||
$(error missing FILE env var. It must contain path to file to upload to github release)
|
||||
endif
|
||||
@printf "Uploading $(FILE)\n"
|
||||
@result=$$(curl -o $(GITHUB_DEBUG_FILE) -w "%{http_code}" \
|
||||
-X POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: token $(GITHUB_TOKEN)" \
|
||||
-H "Content-Type: $(CONTENT_TYPE)" \
|
||||
--data-binary "@$(FILE)" \
|
||||
https://uploads.github.com/repos/VictoriaMetrics/VictoriaMetrics/releases/$(RELEASE_ID)/assets?name=$(notdir $(FILE))); \
|
||||
if [ $${result} = 201 ]; then \
|
||||
printf "Upload OK: $${result}\n"; \
|
||||
elif [ $${result} = 422 ]; then \
|
||||
printf "Asset already uploaded, you need to delete it from UI if you want to re-upload it\n"; \
|
||||
else \
|
||||
printf "Upload failed: $${result}\n"; \
|
||||
cat $(GITHUB_DEBUG_FILE); \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
github-delete-release: github-token-check
|
||||
@release_id=$$(cat $(GITHUB_RELEASE_SPEC_FILE) | grep '"id"' -m 1 | sed -E 's/.* ([[:digit:]]+)\,/\1/'); \
|
||||
result=$$(curl -o $(GITHUB_DEBUG_FILE) -s -w "%{http_code}" \
|
||||
-X DELETE \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: token $(GITHUB_TOKEN)" \
|
||||
https://api.github.com/repos/VictoriaMetrics/VictoriaMetrics/releases/$${release_id}); \
|
||||
if [ $${result} = 204 ]; then \
|
||||
printf "Deleted release with id=$${release_id}\n"; \
|
||||
else \
|
||||
printf "Failed to delete release with id=$${release_id}\n"; \
|
||||
cat $(GITHUB_DEBUG_FILE); \
|
||||
exit 1; \
|
||||
fi
|
||||
@@ -1,4 +1,4 @@
|
||||
GO_VERSION ?=1.18.1
|
||||
GO_VERSION ?=1.19.3
|
||||
SNAP_BUILDER_IMAGE := local/snap-builder:2.0.0-$(shell echo $(GO_VERSION) | tr :/ __)
|
||||
|
||||
|
||||
|
||||
12
vendor/cloud.google.com/go/.gitignore
generated
vendored
12
vendor/cloud.google.com/go/.gitignore
generated
vendored
@@ -1,12 +0,0 @@
|
||||
# Editors
|
||||
.idea
|
||||
.vscode
|
||||
*.swp
|
||||
.history
|
||||
|
||||
# Test files
|
||||
*.test
|
||||
coverage.txt
|
||||
|
||||
# Other
|
||||
.DS_Store
|
||||
107
vendor/cloud.google.com/go/.release-please-manifest-submodules.json
generated
vendored
107
vendor/cloud.google.com/go/.release-please-manifest-submodules.json
generated
vendored
@@ -1,107 +0,0 @@
|
||||
{
|
||||
"accessapproval": "1.3.0",
|
||||
"accesscontextmanager": "1.2.0",
|
||||
"aiplatform": "1.14.0",
|
||||
"analytics": "0.8.0",
|
||||
"apigateway": "1.2.0",
|
||||
"apigeeconnect": "1.2.0",
|
||||
"appengine": "1.3.0",
|
||||
"area120": "0.4.0",
|
||||
"artifactregistry": "1.3.0",
|
||||
"asset": "1.3.0",
|
||||
"assuredworkloads": "1.0.0",
|
||||
"automl": "1.4.0",
|
||||
"baremetalsolution": "0.2.0",
|
||||
"batch": "0.1.0",
|
||||
"billing": "1.2.0",
|
||||
"binaryauthorization": "1.0.0",
|
||||
"certificatemanager": "0.2.0",
|
||||
"channel": "1.7.0",
|
||||
"cloudbuild": "1.2.0",
|
||||
"clouddms": "1.2.0",
|
||||
"cloudtasks": "1.4.0",
|
||||
"compute": "1.7.0",
|
||||
"contactcenterinsights": "1.2.0",
|
||||
"container": "1.2.0",
|
||||
"containeranalysis": "0.4.0",
|
||||
"datacatalog": "1.3.0",
|
||||
"dataflow": "0.5.0",
|
||||
"datafusion": "1.3.0",
|
||||
"datalabeling": "0.3.0",
|
||||
"dataplex": "1.0.0",
|
||||
"dataproc": "1.5.0",
|
||||
"dataqna": "0.4.0",
|
||||
"datastream": "1.0.0",
|
||||
"deploy": "1.2.0",
|
||||
"dialogflow": "1.11.0",
|
||||
"dlp": "1.4.0",
|
||||
"documentai": "1.4.0",
|
||||
"domains": "0.5.0",
|
||||
"essentialcontacts": "1.2.0",
|
||||
"eventarc": "1.6.0",
|
||||
"filestore": "1.2.0",
|
||||
"functions": "1.4.0",
|
||||
"gaming": "1.3.0",
|
||||
"gkebackup": "0.1.0",
|
||||
"gkeconnect": "0.3.0",
|
||||
"gkehub": "0.8.0",
|
||||
"gkemulticloud": "0.2.0",
|
||||
"grafeas": "0.2.0",
|
||||
"gsuiteaddons": "1.2.0",
|
||||
"iam": "0.3.0",
|
||||
"iap": "1.3.0",
|
||||
"ids": "1.0.0",
|
||||
"iot": "1.2.0",
|
||||
"kms": "1.4.0",
|
||||
"language": "1.3.0",
|
||||
"lifesciences": "0.4.0",
|
||||
"managedidentities": "1.2.0",
|
||||
"mediatranslation": "0.3.0",
|
||||
"memcache": "1.3.0",
|
||||
"metastore": "1.3.0",
|
||||
"monitoring": "1.5.0",
|
||||
"networkconnectivity": "1.2.0",
|
||||
"networkmanagement": "1.3.0",
|
||||
"networksecurity": "0.3.0",
|
||||
"notebooks": "1.0.0",
|
||||
"optimization": "1.0.0",
|
||||
"orchestration": "1.2.0",
|
||||
"orgpolicy": "1.3.0",
|
||||
"osconfig": "1.6.0",
|
||||
"oslogin": "1.3.0",
|
||||
"phishingprotection": "0.4.0",
|
||||
"policytroubleshooter": "1.2.0",
|
||||
"privatecatalog": "0.4.0",
|
||||
"recaptchaenterprise/v2": "2.0.1",
|
||||
"recommendationengine": "0.2.0",
|
||||
"recommender": "1.4.0",
|
||||
"redis": "1.6.0",
|
||||
"resourcemanager": "1.2.0",
|
||||
"resourcesettings": "1.2.0",
|
||||
"retail": "1.4.0",
|
||||
"run": "0.1.1",
|
||||
"scheduler": "1.3.0",
|
||||
"secretmanager": "1.5.0",
|
||||
"security": "1.4.0",
|
||||
"securitycenter": "1.8.0",
|
||||
"servicecontrol": "1.3.0",
|
||||
"servicedirectory": "1.3.0",
|
||||
"servicemanagement": "1.3.0",
|
||||
"serviceusage": "1.2.0",
|
||||
"shell": "1.2.0",
|
||||
"speech": "1.5.0",
|
||||
"storagetransfer": "1.3.0",
|
||||
"talent": "0.9.0",
|
||||
"texttospeech": "1.3.0",
|
||||
"tpu": "1.2.0",
|
||||
"trace": "1.2.0",
|
||||
"translate": "1.2.0",
|
||||
"video": "1.7.0",
|
||||
"videointelligence": "1.4.0",
|
||||
"vision/v2": "2.0.0",
|
||||
"vmmigration": "1.0.0",
|
||||
"vpcaccess": "1.2.0",
|
||||
"webrisk": "1.3.0",
|
||||
"websecurityscanner": "1.2.0",
|
||||
"workflows": "1.5.0"
|
||||
}
|
||||
3
vendor/cloud.google.com/go/.release-please-manifest.json
generated
vendored
3
vendor/cloud.google.com/go/.release-please-manifest.json
generated
vendored
@@ -1,3 +0,0 @@
|
||||
{
|
||||
".": "0.103.0"
|
||||
}
|
||||
2417
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
2417
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
File diff suppressed because it is too large
Load Diff
44
vendor/cloud.google.com/go/CODE_OF_CONDUCT.md
generated
vendored
44
vendor/cloud.google.com/go/CODE_OF_CONDUCT.md
generated
vendored
@@ -1,44 +0,0 @@
|
||||
# Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project,
|
||||
and in the interest of fostering an open and welcoming community,
|
||||
we pledge to respect all people who contribute through reporting issues,
|
||||
posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project
|
||||
a harassment-free experience for everyone,
|
||||
regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information,
|
||||
such as physical or electronic
|
||||
addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct.
|
||||
By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently
|
||||
applying these principles to every aspect of managing this project.
|
||||
Project maintainers who do not follow or enforce the Code of Conduct
|
||||
may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||
may be reported by opening an issue
|
||||
or contacting one or more of the project maintainers.
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
|
||||
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
|
||||
|
||||
327
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
327
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
@@ -1,327 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
1. [File an issue](https://github.com/googleapis/google-cloud-go/issues/new/choose).
|
||||
The issue will be used to discuss the bug or feature and should be created
|
||||
before sending a PR.
|
||||
|
||||
1. [Install Go](https://golang.org/dl/).
|
||||
1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`)
|
||||
is in your `PATH`.
|
||||
1. Check it's working by running `go version`.
|
||||
* If it doesn't work, check the install location, usually
|
||||
`/usr/local/go`, is on your `PATH`.
|
||||
|
||||
1. Sign one of the
|
||||
[contributor license agreements](#contributor-license-agreements) below.
|
||||
|
||||
1. Clone the repo:
|
||||
`git clone https://github.com/googleapis/google-cloud-go`
|
||||
|
||||
1. Change into the checked out source:
|
||||
`cd google-cloud-go`
|
||||
|
||||
1. Fork the repo.
|
||||
|
||||
1. Set your fork as a remote:
|
||||
`git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git`
|
||||
|
||||
1. Make changes, commit to your fork.
|
||||
|
||||
Commit messages should follow the
|
||||
[Conventional Commits Style](https://www.conventionalcommits.org). The scope
|
||||
portion should always be filled with the name of the package affected by the
|
||||
changes being made. For example:
|
||||
```
|
||||
feat(functions): add gophers codelab
|
||||
```
|
||||
|
||||
1. Send a pull request with your changes.
|
||||
|
||||
To minimize friction, consider setting `Allow edits from maintainers` on the
|
||||
PR, which will enable project committers and automation to update your PR.
|
||||
|
||||
1. A maintainer will review the pull request and make comments.
|
||||
|
||||
Prefer adding additional commits over amending and force-pushing since it can
|
||||
be difficult to follow code reviews when the commit history changes.
|
||||
|
||||
Commits will be squashed when they're merged.
|
||||
|
||||
## Testing
|
||||
|
||||
We test code against two versions of Go, the minimum and maximum versions
|
||||
supported by our clients. To see which versions these are checkout our
|
||||
[README](README.md#supported-versions).
|
||||
|
||||
### Integration Tests
|
||||
|
||||
In addition to the unit tests, you may run the integration test suite. These
|
||||
directions describe setting up your environment to run integration tests for
|
||||
_all_ packages: note that many of these instructions may be redundant if you
|
||||
intend only to run integration tests on a single package.
|
||||
|
||||
#### GCP Setup
|
||||
|
||||
To run the integrations tests, creation and configuration of two projects in
|
||||
the Google Developers Console is required: one specifically for Firestore
|
||||
integration tests, and another for all other integration tests. We'll refer to
|
||||
these projects as "general project" and "Firestore project".
|
||||
|
||||
After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount)
|
||||
for each project. Ensure the project-level **Owner**
|
||||
[IAM role](https://console.cloud.google.com/iam-admin/iam/project) role is added to
|
||||
each service account. During the creation of the service account, you should
|
||||
download the JSON credential file for use later.
|
||||
|
||||
Next, ensure the following APIs are enabled in the general project:
|
||||
|
||||
- BigQuery API
|
||||
- BigQuery Data Transfer API
|
||||
- Cloud Dataproc API
|
||||
- Cloud Dataproc Control API Private
|
||||
- Cloud Datastore API
|
||||
- Cloud Firestore API
|
||||
- Cloud Key Management Service (KMS) API
|
||||
- Cloud Natural Language API
|
||||
- Cloud OS Login API
|
||||
- Cloud Pub/Sub API
|
||||
- Cloud Resource Manager API
|
||||
- Cloud Spanner API
|
||||
- Cloud Speech API
|
||||
- Cloud Translation API
|
||||
- Cloud Video Intelligence API
|
||||
- Cloud Vision API
|
||||
- Compute Engine API
|
||||
- Compute Engine Instance Group Manager API
|
||||
- Container Registry API
|
||||
- Firebase Rules API
|
||||
- Google Cloud APIs
|
||||
- Google Cloud Deployment Manager V2 API
|
||||
- Google Cloud SQL
|
||||
- Google Cloud Storage
|
||||
- Google Cloud Storage JSON API
|
||||
- Google Compute Engine Instance Group Updater API
|
||||
- Google Compute Engine Instance Groups API
|
||||
- Kubernetes Engine API
|
||||
- Cloud Error Reporting API
|
||||
- Pub/Sub Lite API
|
||||
|
||||
Next, create a Datastore database in the general project, and a Firestore
|
||||
database in the Firestore project.
|
||||
|
||||
Finally, in the general project, create an API key for the translate API:
|
||||
|
||||
- Go to GCP Developer Console.
|
||||
- Navigate to APIs & Services > Credentials.
|
||||
- Click Create Credentials > API Key.
|
||||
- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below.
|
||||
|
||||
#### Local Setup
|
||||
|
||||
Once the two projects are created and configured, set the following environment
|
||||
variables:
|
||||
|
||||
- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g.
|
||||
bamboo-shift-455) for the general project.
|
||||
- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general
|
||||
project's service account.
|
||||
- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID
|
||||
(e.g. doorway-cliff-677) for the Firestore project.
|
||||
- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the
|
||||
Firestore project's service account.
|
||||
- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API created above.
|
||||
|
||||
As part of the setup that follows, the following variables will be configured:
|
||||
|
||||
- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests,
|
||||
in the form
|
||||
"projects/P/locations/L/keyRings/R". The creation of this is described below.
|
||||
- `GCLOUD_TESTS_BIGTABLE_KEYRING`: The full name of the keyring for the bigtable tests,
|
||||
in the form
|
||||
"projects/P/locations/L/keyRings/R". The creation of this is described below. Expected to be single region.
|
||||
- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone.
|
||||
|
||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it to
|
||||
create some resources used in integration tests.
|
||||
|
||||
From the project's root directory:
|
||||
|
||||
``` sh
|
||||
# Sets the default project in your env.
|
||||
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Authenticates the gcloud tool with your account.
|
||||
$ gcloud auth login
|
||||
|
||||
# Create the indexes used in the datastore integration tests.
|
||||
$ gcloud datastore indexes create datastore/testdata/index.yaml
|
||||
|
||||
# Creates a Google Cloud storage bucket with the same name as your test project,
|
||||
# and with the Cloud Logging service account as owner, for the sink
|
||||
# integration tests in logging.
|
||||
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Creates a PubSub topic for integration tests of storage notifications.
|
||||
$ gcloud beta pubsub topics create go-storage-notification-test
|
||||
# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user
|
||||
# "service-<numeric project id>@gs-project-accounts.iam.gserviceaccount.com"
|
||||
# as a publisher to that topic.
|
||||
|
||||
# Creates a Spanner instance for the spanner integration tests.
|
||||
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test'
|
||||
# NOTE: Spanner instances are priced by the node-hour, so you may want to
|
||||
# delete the instance after testing with 'gcloud beta spanner instances delete'.
|
||||
|
||||
$ export MY_KEYRING=some-keyring-name
|
||||
$ export MY_LOCATION=global
|
||||
$ export MY_SINGLE_LOCATION=us-central1
|
||||
# Creates a KMS keyring, in the same location as the default location for your
|
||||
# project's buckets.
|
||||
$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION
|
||||
# Creates two keys in the keyring, named key1 and key2.
|
||||
$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
|
||||
$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
|
||||
# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
|
||||
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
|
||||
# Authorizes Google Cloud Storage to encrypt and decrypt using key1.
|
||||
$ gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
|
||||
|
||||
# Create KMS Key in one region for Bigtable
|
||||
$ gcloud kms keyrings create $MY_KEYRING --location $MY_SINGLE_LOCATION
|
||||
$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_SINGLE_LOCATION --purpose encryption
|
||||
# Sets the GCLOUD_TESTS_BIGTABLE_KEYRING environment variable.
|
||||
$ export GCLOUD_TESTS_BIGTABLE_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_SINGLE_LOCATION/keyRings/$MY_KEYRING
|
||||
# Create a service agent, https://cloud.google.com/bigtable/docs/use-cmek#gcloud:
|
||||
$ gcloud beta services identity create \
|
||||
--service=bigtableadmin.googleapis.com \
|
||||
--project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
# Note the service agent email for the agent created.
|
||||
$ export SERVICE_AGENT_EMAIL=<service agent email, from last step>
|
||||
|
||||
# Authorizes Google Cloud Bigtable to encrypt and decrypt using key1
|
||||
$ gcloud kms keys add-iam-policy-binding key1 \
|
||||
--keyring $MY_KEYRING \
|
||||
--location $MY_SINGLE_LOCATION \
|
||||
--role roles/cloudkms.cryptoKeyEncrypterDecrypter \
|
||||
--member "serviceAccount:$SERVICE_AGENT_EMAIL" \
|
||||
--project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
```
|
||||
|
||||
It may be useful to add exports to your shell initialization for future use.
|
||||
For instance, in `.zshrc`:
|
||||
|
||||
```sh
|
||||
#### START GO SDK Test Variables
|
||||
# Developers Console project's ID (e.g. bamboo-shift-455) for the general project.
|
||||
export GCLOUD_TESTS_GOLANG_PROJECT_ID=your-project
|
||||
|
||||
# The path to the JSON key file of the general project's service account.
|
||||
export GCLOUD_TESTS_GOLANG_KEY=~/directory/your-project-abcd1234.json
|
||||
|
||||
# Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project.
|
||||
export GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID=your-firestore-project
|
||||
|
||||
# The path to the JSON key file of the Firestore project's service account.
|
||||
export GCLOUD_TESTS_GOLANG_FIRESTORE_KEY=~/directory/your-firestore-project-abcd1234.json
|
||||
|
||||
# The full name of the keyring for the tests, in the form "projects/P/locations/L/keyRings/R".
|
||||
# The creation of this is described below.
|
||||
export MY_KEYRING=my-golang-sdk-test
|
||||
export MY_LOCATION=global
|
||||
export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
|
||||
|
||||
# API key for using the Translate API.
|
||||
export GCLOUD_TESTS_API_KEY=abcdefghijk123456789
|
||||
|
||||
# Compute Engine zone. (https://cloud.google.com/compute/docs/regions-zones)
|
||||
export GCLOUD_TESTS_GOLANG_ZONE=your-chosen-region
|
||||
#### END GO SDK Test Variables
|
||||
```
|
||||
|
||||
#### Running
|
||||
|
||||
Once you've done the necessary setup, you can run the integration tests by
|
||||
running:
|
||||
|
||||
``` sh
|
||||
$ go test -v ./...
|
||||
```
|
||||
|
||||
Note that the above command will not run the tests in other modules. To run
|
||||
tests on other modules, first navigate to the appropriate
|
||||
subdirectory. For instance, to run only the tests for datastore:
|
||||
``` sh
|
||||
$ cd datastore
|
||||
$ go test -v ./...
|
||||
```
|
||||
|
||||
#### Replay
|
||||
|
||||
Some packages can record the RPCs during integration tests to a file for
|
||||
subsequent replay. To record, pass the `-record` flag to `go test`. The
|
||||
recording will be saved to the _package_`.replay` file. To replay integration
|
||||
tests from a saved recording, the replay file must be present, the `-short`
|
||||
flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY`
|
||||
environment variable must have a non-empty value.
|
||||
|
||||
## Contributor License Agreements
|
||||
|
||||
Before we can accept your pull requests you'll need to sign a Contributor
|
||||
License Agreement (CLA):
|
||||
|
||||
- **If you are an individual writing original source code** and **you own the
|
||||
intellectual property**, then you'll need to sign an [individual CLA][indvcla].
|
||||
- **If you work for a company that wants to allow you to contribute your
|
||||
work**, then you'll need to sign a [corporate CLA][corpcla].
|
||||
|
||||
You can sign these electronically (just scroll to the bottom). After that,
|
||||
we'll be able to accept your pull requests.
|
||||
|
||||
## Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project,
|
||||
and in the interest of fostering an open and welcoming community,
|
||||
we pledge to respect all people who contribute through reporting issues,
|
||||
posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project
|
||||
a harassment-free experience for everyone,
|
||||
regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information,
|
||||
such as physical or electronic
|
||||
addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct.
|
||||
By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently
|
||||
applying these principles to every aspect of managing this project.
|
||||
Project maintainers who do not follow or enforce the Code of Conduct
|
||||
may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||
may be reported by opening an issue
|
||||
or contacting one or more of the project maintainers.
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org), version 1.2.0,
|
||||
available at [https://contributor-covenant.org/version/1/2/0/](https://contributor-covenant.org/version/1/2/0/)
|
||||
|
||||
[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
|
||||
[indvcla]: https://developers.google.com/open-source/cla/individual
|
||||
[corpcla]: https://developers.google.com/open-source/cla/corporate
|
||||
138
vendor/cloud.google.com/go/README.md
generated
vendored
138
vendor/cloud.google.com/go/README.md
generated
vendored
@@ -1,138 +0,0 @@
|
||||
# Google Cloud Client Libraries for Go
|
||||
|
||||
[](https://pkg.go.dev/cloud.google.com/go)
|
||||
|
||||
Go packages for [Google Cloud Platform](https://cloud.google.com) services.
|
||||
|
||||
``` go
|
||||
import "cloud.google.com/go"
|
||||
```
|
||||
|
||||
To install the packages on your system, *do not clone the repo*. Instead:
|
||||
|
||||
1. Change to your project directory:
|
||||
|
||||
```bash
|
||||
cd /my/cloud/project
|
||||
```
|
||||
1. Get the package you want to use. Some products have their own module, so it's
|
||||
best to `go get` the package(s) you want to use:
|
||||
|
||||
```
|
||||
$ go get cloud.google.com/go/firestore # Replace with the package you want to use.
|
||||
```
|
||||
|
||||
**NOTE:** Some of these packages are under development, and may occasionally
|
||||
make backwards-incompatible changes.
|
||||
|
||||
## Supported APIs
|
||||
|
||||
For an updated list of all of our released APIs please see our
|
||||
[reference docs](https://cloud.google.com/go/docs/reference).
|
||||
|
||||
## [Go Versions Supported](#supported-versions)
|
||||
|
||||
Our libraries are compatible with at least the three most recent, major Go
|
||||
releases. They are currently compatible with:
|
||||
|
||||
- Go 1.18
|
||||
- Go 1.17
|
||||
- Go 1.16
|
||||
- Go 1.15
|
||||
|
||||
## Authorization
|
||||
|
||||
By default, each API will use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials)
|
||||
for authorization credentials used in calling the API endpoints. This will allow your
|
||||
application to run in many environments without requiring explicit configuration.
|
||||
|
||||
[snip]:# (auth)
|
||||
```go
|
||||
client, err := storage.NewClient(ctx)
|
||||
```
|
||||
|
||||
To authorize using a
|
||||
[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
|
||||
pass
|
||||
[`option.WithCredentialsFile`](https://pkg.go.dev/google.golang.org/api/option#WithCredentialsFile)
|
||||
to the `NewClient` function of the desired package. For example:
|
||||
|
||||
[snip]:# (auth-JSON)
|
||||
```go
|
||||
client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json"))
|
||||
```
|
||||
|
||||
You can exert more control over authorization by using the
|
||||
[`golang.org/x/oauth2`](https://pkg.go.dev/golang.org/x/oauth2) package to
|
||||
create an `oauth2.TokenSource`. Then pass
|
||||
[`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource)
|
||||
to the `NewClient` function:
|
||||
[snip]:# (auth-ts)
|
||||
```go
|
||||
tokenSource := ...
|
||||
client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome. Please, see the
|
||||
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
|
||||
document for details.
|
||||
|
||||
Please note that this project is released with a Contributor Code of Conduct.
|
||||
By participating in this project you agree to abide by its terms.
|
||||
See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
|
||||
for more information.
|
||||
|
||||
[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
|
||||
[cloud-automl]: https://cloud.google.com/automl
|
||||
[cloud-build]: https://cloud.google.com/cloud-build/
|
||||
[cloud-bigquery]: https://cloud.google.com/bigquery/
|
||||
[cloud-bigtable]: https://cloud.google.com/bigtable/
|
||||
[cloud-compute]: https://cloud.google.com/compute
|
||||
[cloud-container]: https://cloud.google.com/containers/
|
||||
[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis
|
||||
[cloud-dataproc]: https://cloud.google.com/dataproc/
|
||||
[cloud-datastore]: https://cloud.google.com/datastore/
|
||||
[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/
|
||||
[cloud-debugger]: https://cloud.google.com/debugger/
|
||||
[cloud-dlp]: https://cloud.google.com/dlp/
|
||||
[cloud-errors]: https://cloud.google.com/error-reporting/
|
||||
[cloud-firestore]: https://cloud.google.com/firestore/
|
||||
[cloud-iam]: https://cloud.google.com/iam/
|
||||
[cloud-iot]: https://cloud.google.com/iot-core/
|
||||
[cloud-irm]: https://cloud.google.com/incident-response/docs/concepts
|
||||
[cloud-kms]: https://cloud.google.com/kms/
|
||||
[cloud-pubsub]: https://cloud.google.com/pubsub/
|
||||
[cloud-pubsublite]: https://cloud.google.com/pubsub/lite
|
||||
[cloud-storage]: https://cloud.google.com/storage/
|
||||
[cloud-language]: https://cloud.google.com/natural-language
|
||||
[cloud-logging]: https://cloud.google.com/logging/
|
||||
[cloud-natural-language]: https://cloud.google.com/natural-language/
|
||||
[cloud-memorystore]: https://cloud.google.com/memorystore/
|
||||
[cloud-monitoring]: https://cloud.google.com/monitoring/
|
||||
[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest
|
||||
[cloud-phishingprotection]: https://cloud.google.com/phishing-protection/
|
||||
[cloud-securitycenter]: https://cloud.google.com/security-command-center/
|
||||
[cloud-scheduler]: https://cloud.google.com/scheduler
|
||||
[cloud-spanner]: https://cloud.google.com/spanner/
|
||||
[cloud-speech]: https://cloud.google.com/speech
|
||||
[cloud-talent]: https://cloud.google.com/solutions/talent-solution/
|
||||
[cloud-tasks]: https://cloud.google.com/tasks/
|
||||
[cloud-texttospeech]: https://cloud.google.com/texttospeech/
|
||||
[cloud-talent]: https://cloud.google.com/solutions/talent-solution/
|
||||
[cloud-trace]: https://cloud.google.com/trace/
|
||||
[cloud-translate]: https://cloud.google.com/translate
|
||||
[cloud-recaptcha]: https://cloud.google.com/recaptcha-enterprise/
|
||||
[cloud-recommender]: https://cloud.google.com/recommendations/
|
||||
[cloud-video]: https://cloud.google.com/video-intelligence/
|
||||
[cloud-vision]: https://cloud.google.com/vision
|
||||
[cloud-webrisk]: https://cloud.google.com/web-risk/
|
||||
|
||||
## Links
|
||||
|
||||
- [Go on Google Cloud](https://cloud.google.com/go/home)
|
||||
- [Getting started with Go on Google Cloud](https://cloud.google.com/go/getting-started)
|
||||
- [App Engine Quickstart](https://cloud.google.com/appengine/docs/standard/go/quickstart)
|
||||
- [Cloud Functions Quickstart](https://cloud.google.com/functions/docs/quickstart-go)
|
||||
- [Cloud Run Quickstart](https://cloud.google.com/run/docs/quickstarts/build-and-deploy#go)
|
||||
141
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
141
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
@@ -1,141 +0,0 @@
|
||||
# Releasing
|
||||
|
||||
## Determine which module to release
|
||||
|
||||
The Go client libraries have several modules. Each module does not strictly
|
||||
correspond to a single library - they correspond to trees of directories. If a
|
||||
file needs to be released, you must release the closest ancestor module.
|
||||
|
||||
To see all modules:
|
||||
|
||||
```bash
|
||||
$ cat `find . -name go.mod` | grep module
|
||||
module cloud.google.com/go/pubsub
|
||||
module cloud.google.com/go/spanner
|
||||
module cloud.google.com/go
|
||||
module cloud.google.com/go/bigtable
|
||||
module cloud.google.com/go/bigquery
|
||||
module cloud.google.com/go/storage
|
||||
module cloud.google.com/go/pubsublite
|
||||
module cloud.google.com/go/firestore
|
||||
module cloud.google.com/go/logging
|
||||
module cloud.google.com/go/internal/gapicgen
|
||||
module cloud.google.com/go/internal/godocfx
|
||||
module cloud.google.com/go/internal/examples/fake
|
||||
module cloud.google.com/go/internal/examples/mock
|
||||
module cloud.google.com/go/datastore
|
||||
```
|
||||
|
||||
The `cloud.google.com/go` is the repository root module. Each other module is
|
||||
a submodule.
|
||||
|
||||
So, if you need to release a change in `bigtable/bttest/inmem.go`, the closest
|
||||
ancestor module is `cloud.google.com/go/bigtable` - so you should release a new
|
||||
version of the `cloud.google.com/go/bigtable` submodule.
|
||||
|
||||
If you need to release a change in `asset/apiv1/asset_client.go`, the closest
|
||||
ancestor module is `cloud.google.com/go` - so you should release a new version
|
||||
of the `cloud.google.com/go` repository root module. Note: releasing
|
||||
`cloud.google.com/go` has no impact on any of the submodules, and vice-versa.
|
||||
They are released entirely independently.
|
||||
|
||||
## Test failures
|
||||
|
||||
If there are any test failures in the Kokoro build, releases are blocked until
|
||||
the failures have been resolved.
|
||||
|
||||
## How to release
|
||||
|
||||
### Automated Releases (`cloud.google.com/go` and submodules)
|
||||
|
||||
We now use [release-please](https://github.com/googleapis/release-please) to
|
||||
perform automated releases for `cloud.google.com/go` and all submodules.
|
||||
|
||||
1. If there are changes that have not yet been released, a
|
||||
[pull request](https://github.com/googleapis/google-cloud-go/pull/2971) will
|
||||
be automatically opened by release-please
|
||||
with a title like "chore: release X.Y.Z" (for the root module) or
|
||||
"chore: release datastore X.Y.Z" (for the datastore submodule), where X.Y.Z
|
||||
is the next version to be released. Find the desired pull request
|
||||
[here](https://github.com/googleapis/google-cloud-go/pulls)
|
||||
1. Check for failures in the
|
||||
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
|
||||
any failures in the most recent build, address them before proceeding with
|
||||
the release. (This applies even if the failures are in a different submodule
|
||||
from the one being released.)
|
||||
1. Review the release notes. These are automatically generated from the titles
|
||||
of any merged commits since the previous release. If you would like to edit
|
||||
them, this can be done by updating the changes in the release PR.
|
||||
1. To cut a release, approve and merge the pull request. Doing so will
|
||||
update the `CHANGES.md`, tag the merged commit with the appropriate version,
|
||||
and draft a GitHub release which will copy the notes from `CHANGES.md`.
|
||||
|
||||
### Manual Release (`cloud.google.com/go`)
|
||||
|
||||
If for whatever reason the automated release process is not working as expected,
|
||||
here is how to manually cut a release of `cloud.google.com/go`.
|
||||
|
||||
1. Check for failures in the
|
||||
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
|
||||
any failures in the most recent build, address them before proceeding with
|
||||
the release.
|
||||
1. Navigate to `google-cloud-go/` and switch to main.
|
||||
1. `git pull`
|
||||
1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases.
|
||||
The current latest tag `$CV` is the largest tag. It should look something
|
||||
like `vX.Y.Z` (note: ignore all `LIB/vX.Y.Z` tags - these are tags for a
|
||||
specific library, not the module root). We'll call the current version `$CV`
|
||||
and the new version `$NV`.
|
||||
1. On main, run `git log $CV...` to list all the changes since the last
|
||||
release. NOTE: You must manually visually parse out changes to submodules [1]
|
||||
(the `git log` is going to show you things in submodules, which are not going
|
||||
to be part of your release).
|
||||
1. Edit `CHANGES.md` to include a summary of the changes.
|
||||
1. In `internal/version/version.go`, update `const Repo` to today's date with
|
||||
the format `YYYYMMDD`.
|
||||
1. In `internal/version` run `go generate`.
|
||||
1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork,
|
||||
and create a PR titled `chore: release $NV`.
|
||||
1. Wait for the PR to be reviewed and merged. Once it's merged, and without
|
||||
merging any other PRs in the meantime:
|
||||
a. Switch to main.
|
||||
b. `git pull`
|
||||
c. Tag the repo with the next version: `git tag $NV`.
|
||||
d. Push the tag to origin:
|
||||
`git push origin $NV`
|
||||
1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
|
||||
with the new release, copying the contents of `CHANGES.md`.
|
||||
|
||||
### Manual Releases (submodules)
|
||||
|
||||
If for whatever reason the automated release process is not working as expected,
|
||||
here is how to manually cut a release of a submodule.
|
||||
|
||||
(these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly)
|
||||
|
||||
1. Check for failures in the
|
||||
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
|
||||
any failures in the most recent build, address them before proceeding with
|
||||
the release. (This applies even if the failures are in a different submodule
|
||||
from the one being released.)
|
||||
1. Navigate to `google-cloud-go/` and switch to main.
|
||||
1. `git pull`
|
||||
1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all
|
||||
existing releases. The current latest tag `$CV` is the largest tag. It
|
||||
should look something like `datastore/vX.Y.Z`. We'll call the current version
|
||||
`$CV` and the new version `$NV`.
|
||||
1. On main, run `git log $CV.. -- datastore/` to list all the changes to the
|
||||
submodule directory since the last release.
|
||||
1. Edit `datastore/CHANGES.md` to include a summary of the changes.
|
||||
1. In `internal/version` run `go generate`.
|
||||
1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork,
|
||||
and create a PR titled `chore(datastore): release $NV`.
|
||||
1. Wait for the PR to be reviewed and merged. Once it's merged, and without
|
||||
merging any other PRs in the meantime:
|
||||
a. Switch to main.
|
||||
b. `git pull`
|
||||
c. Tag the repo with the next version: `git tag $NV`.
|
||||
d. Push the tag to origin:
|
||||
`git push origin $NV`
|
||||
1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
|
||||
with the new release, copying the contents of `datastore/CHANGES.md`.
|
||||
7
vendor/cloud.google.com/go/SECURITY.md
generated
vendored
7
vendor/cloud.google.com/go/SECURITY.md
generated
vendored
@@ -1,7 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
To report a security issue, please use [g.co/vulnz](https://g.co/vulnz).
|
||||
|
||||
The Google Security Team will respond within 5 working days of your report on g.co/vulnz.
|
||||
|
||||
We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue.
|
||||
@@ -1,10 +1,10 @@
|
||||
// Copyright 2021 Google LLC
|
||||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// package adapters provides helper functions for the google.type protobuf
|
||||
// messages (Decimal, Fraction, etc.).
|
||||
package adapters
|
||||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.12.1"
|
||||
5
vendor/cloud.google.com/go/compute/metadata/CHANGES.md
generated
vendored
Normal file
5
vendor/cloud.google.com/go/compute/metadata/CHANGES.md
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# Changes
|
||||
|
||||
## [0.1.0] (2022-10-26)
|
||||
|
||||
Initial release of metadata being it's own module.
|
||||
@@ -193,7 +193,7 @@
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
27
vendor/cloud.google.com/go/compute/metadata/README.md
generated
vendored
Normal file
27
vendor/cloud.google.com/go/compute/metadata/README.md
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
# Compute API
|
||||
|
||||
[](https://pkg.go.dev/cloud.google.com/go/compute/metadata)
|
||||
|
||||
This is a utility library for communicating with Google Cloud metadata service
|
||||
on Google Cloud.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get cloud.google.com/go/compute/metadata
|
||||
```
|
||||
|
||||
## Go Version Support
|
||||
|
||||
See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported)
|
||||
section in the root directory's README.
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
|
||||
document for details.
|
||||
|
||||
Please note that this project is released with a Contributor Code of Conduct.
|
||||
By participating in this project you agree to abide by its terms. See
|
||||
[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
|
||||
for more information.
|
||||
1
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
1
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@@ -71,6 +71,7 @@ func newDefaultHTTPClient() *http.Client {
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,12 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file, and the cloud.google.com/go import, won't actually become part of
|
||||
// This file, and the {{.RootMod}} import, won't actually become part of
|
||||
// the resultant binary.
|
||||
//go:build modhack
|
||||
// +build modhack
|
||||
|
||||
package iam
|
||||
package metadata
|
||||
|
||||
// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
||||
import _ "cloud.google.com/go"
|
||||
import _ "cloud.google.com/go/compute/internal"
|
||||
227
vendor/cloud.google.com/go/doc.go
generated
vendored
227
vendor/cloud.google.com/go/doc.go
generated
vendored
@@ -1,227 +0,0 @@
|
||||
// Copyright 2014 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package cloud is the root of the packages used to access Google Cloud
|
||||
Services. See https://godoc.org/cloud.google.com/go for a full list
|
||||
of sub-packages.
|
||||
|
||||
|
||||
Client Options
|
||||
|
||||
All clients in sub-packages are configurable via client options. These options are
|
||||
described here: https://godoc.org/google.golang.org/api/option.
|
||||
|
||||
|
||||
Authentication and Authorization
|
||||
|
||||
All the clients in sub-packages support authentication via Google Application Default
|
||||
Credentials (see https://cloud.google.com/docs/authentication/production), or
|
||||
by providing a JSON key file for a Service Account. See examples below.
|
||||
|
||||
Google Application Default Credentials (ADC) is the recommended way to authorize
|
||||
and authenticate clients. For information on how to create and obtain
|
||||
Application Default Credentials, see
|
||||
https://cloud.google.com/docs/authentication/production. Here is an example
|
||||
of a client using ADC to authenticate:
|
||||
client, err := secretmanager.NewClient(context.Background())
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
|
||||
You can use a file with credentials to authenticate and authorize, such as a JSON
|
||||
key file associated with a Google service account. Service Account keys can be
|
||||
created and downloaded from
|
||||
https://console.cloud.google.com/iam-admin/serviceaccounts. This example uses
|
||||
the Secret Manger client, but the same steps apply to the other client libraries
|
||||
underneath this package. Example:
|
||||
client, err := secretmanager.NewClient(context.Background(),
|
||||
option.WithCredentialsFile("/path/to/service-account-key.json"))
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
|
||||
In some cases (for instance, you don't want to store secrets on disk), you can
|
||||
create credentials from in-memory JSON and use the WithCredentials option.
|
||||
The google package in this example is at golang.org/x/oauth2/google.
|
||||
This example uses the Secret Manager client, but the same steps apply to
|
||||
the other client libraries underneath this package. Note that scopes can be
|
||||
found at https://developers.google.com/identity/protocols/oauth2/scopes, and
|
||||
are also provided in all auto-generated libraries: for example,
|
||||
cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example:
|
||||
ctx := context.Background()
|
||||
creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds))
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
|
||||
|
||||
Timeouts and Cancellation
|
||||
|
||||
By default, non-streaming methods, like Create or Get, will have a default deadline applied to the
|
||||
context provided at call time, unless a context deadline is already set. Streaming
|
||||
methods have no default deadline and will run indefinitely. To set timeouts or
|
||||
arrange for cancellation, use contexts. Transient
|
||||
errors will be retried when correctness allows.
|
||||
|
||||
Here is an example of how to set a timeout for an RPC, use context.WithTimeout:
|
||||
ctx := context.Background()
|
||||
// Do not set a timeout on the context passed to NewClient: dialing happens
|
||||
// asynchronously, and the context is used to refresh credentials in the
|
||||
// background.
|
||||
client, err := secretmanager.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// Time out if it takes more than 10 seconds to create a dataset.
|
||||
tctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel() // Always call cancel.
|
||||
|
||||
req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"}
|
||||
if err := client.DeleteSecret(tctx, req); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
|
||||
Here is an example of how to arrange for an RPC to be canceled, use context.WithCancel:
|
||||
ctx := context.Background()
|
||||
// Do not cancel the context passed to NewClient: dialing happens asynchronously,
|
||||
// and the context is used to refresh credentials in the background.
|
||||
client, err := secretmanager.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel() // Always call cancel.
|
||||
|
||||
// TODO: Make the cancel function available to whatever might want to cancel the
|
||||
// call--perhaps a GUI button.
|
||||
req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"}
|
||||
if err := client.DeleteSecret(cctx, req); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
|
||||
To opt out of default deadlines, set the temporary environment variable
|
||||
GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client
|
||||
creation. This affects all Google Cloud Go client libraries. This opt-out
|
||||
mechanism will be removed in a future release. File an issue at
|
||||
https://github.com/googleapis/google-cloud-go if the default deadlines
|
||||
cannot work for you.
|
||||
|
||||
Do not attempt to control the initial connection (dialing) of a service by setting a
|
||||
timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts
|
||||
would be ineffective and would only interfere with credential refreshing, which uses
|
||||
the same context.
|
||||
|
||||
|
||||
Connection Pooling
|
||||
|
||||
Connection pooling differs in clients based on their transport. Cloud
|
||||
clients either rely on HTTP or gRPC transports to communicate
|
||||
with Google Cloud.
|
||||
|
||||
Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the
|
||||
underlying HTTP transport to cache connections for later re-use. These are cached to
|
||||
the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in
|
||||
http.DefaultTransport.
|
||||
|
||||
For gRPC clients (all others in this repo), connection pooling is configurable. Users
|
||||
of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client
|
||||
option to NewClient calls. This configures the underlying gRPC connections to be
|
||||
pooled and addressed in a round robin fashion.
|
||||
|
||||
|
||||
Using the Libraries with Docker
|
||||
|
||||
Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to
|
||||
hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928
|
||||
for more information.
|
||||
|
||||
|
||||
Debugging
|
||||
|
||||
To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See
|
||||
https://godoc.org/google.golang.org/grpc/grpclog for more information.
|
||||
|
||||
For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2".
|
||||
|
||||
|
||||
Inspecting errors
|
||||
|
||||
Most of the errors returned by the generated clients are wrapped in an
|
||||
`apierror.APIError` (https://pkg.go.dev/github.com/googleapis/gax-go/v2/apierror)
|
||||
and can be further unwrapped into a `grpc.Status` or `googleapi.Error` depending
|
||||
on the transport used to make the call (gRPC or REST). Converting your errors to
|
||||
these types can be a useful way to get more information about what went wrong
|
||||
while debugging.
|
||||
|
||||
`apierror.APIError` gives access to specific details in the
|
||||
error. The transport-specific errors can still be unwrapped using the
|
||||
`apierror.APIError`.
|
||||
if err != nil {
|
||||
var ae *apierror.APIError
|
||||
if errors.As(err, &ae) {
|
||||
log.Println(ae.Reason())
|
||||
log.Println(ae.Details().Help.GetLinks())
|
||||
}
|
||||
}
|
||||
|
||||
If the gRPC transport was used, the `grpc.Status` can still be parsed using the
|
||||
`status.FromError` function.
|
||||
if err != nil {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
log.Println(s.Message())
|
||||
for _, d := range s.Proto().Details {
|
||||
log.Println(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
If the REST transport was used, the `googleapi.Error` can be parsed in a similar
|
||||
way.
|
||||
if err != nil {
|
||||
var gerr *googleapi.Error
|
||||
if errors.As(err, &gerr) {
|
||||
log.Println(gerr.Message)
|
||||
}
|
||||
}
|
||||
|
||||
Client Stability
|
||||
|
||||
Clients in this repository are considered alpha or beta unless otherwise
|
||||
marked as stable in the README.md. Semver is not used to communicate stability
|
||||
of clients.
|
||||
|
||||
Alpha and beta clients may change or go away without notice.
|
||||
|
||||
Clients marked stable will maintain compatibility with future versions for as
|
||||
long as we can reasonably sustain. Incompatible changes might be made in some
|
||||
situations, including:
|
||||
|
||||
- Security bugs may prompt backwards-incompatible changes.
|
||||
|
||||
- Situations in which components are no longer feasible to maintain without
|
||||
making breaking changes, including removal.
|
||||
|
||||
- Parts of the client surface may be outright unstable and subject to change.
|
||||
These parts of the surface will be labeled with the note, "It is EXPERIMENTAL
|
||||
and subject to change or removal without notice."
|
||||
*/
|
||||
package cloud // import "cloud.google.com/go"
|
||||
28
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
28
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
@@ -1,5 +1,33 @@
|
||||
# Changes
|
||||
|
||||
## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad))
|
||||
|
||||
## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** start generating stubs dir ([de2d180](https://github.com/googleapis/google-cloud-go/commit/de2d18066dc613b72f6f8db93ca60146dabcfdcc))
|
||||
|
||||
## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7))
|
||||
|
||||
## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** start generating apiv2 ([#6605](https://github.com/googleapis/google-cloud-go/issues/6605)) ([a6004e7](https://github.com/googleapis/google-cloud-go/commit/a6004e762f782869cd85688937475744f7b17e50))
|
||||
|
||||
## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23)
|
||||
|
||||
|
||||
|
||||
237
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
237
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
@@ -62,6 +62,24 @@
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/apigeeregistry/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/apigeeregistry/apiv1",
|
||||
"description": "Apigee Registry API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeregistry/latest/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/apikeys/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/apikeys/apiv2",
|
||||
"description": "API Keys API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apikeys/latest/apiv2",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/appengine/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/appengine/apiv1",
|
||||
"description": "App Engine Admin API",
|
||||
@@ -80,6 +98,15 @@
|
||||
"release_level": "alpha",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/artifactregistry/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/artifactregistry/apiv1",
|
||||
"description": "Artifact Registry API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/artifactregistry/apiv1beta2": {
|
||||
"distribution_name": "cloud.google.com/go/artifactregistry/apiv1beta2",
|
||||
"description": "Artifact Registry API",
|
||||
@@ -170,6 +197,51 @@
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/appconnections/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/appconnections/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnections/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/appconnectors/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/appconnectors/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnectors/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/appgateways/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/appgateways/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appgateways/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientconnectorservices/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/clientgateways/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/clientgateways/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientgateways/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/bigquery": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery",
|
||||
"description": "BigQuery",
|
||||
@@ -179,6 +251,15 @@
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_MANUAL"
|
||||
},
|
||||
"cloud.google.com/go/bigquery/analyticshub/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery/analyticshub/apiv1",
|
||||
"description": "Analytics Hub API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/analyticshub/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/bigquery/connection/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery/connection/apiv1",
|
||||
"description": "BigQuery Connection API",
|
||||
@@ -206,6 +287,15 @@
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/bigquery/datapolicies/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1beta1",
|
||||
"description": "BigQuery Data Policy API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/bigquery/datatransfer/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery/datatransfer/apiv1",
|
||||
"description": "BigQuery Data Transfer API",
|
||||
@@ -242,15 +332,6 @@
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/bigquery/reservation/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1beta1",
|
||||
"description": "BigQuery Reservation API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/reservation/apiv1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/bigquery/storage/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery/storage/apiv1",
|
||||
"description": "BigQuery Storage API",
|
||||
@@ -338,7 +419,7 @@
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/certificatemanager/latest/apiv1",
|
||||
"release_level": "beta",
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/channel/apiv1": {
|
||||
@@ -467,6 +548,24 @@
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/dataform/apiv1alpha2": {
|
||||
"distribution_name": "cloud.google.com/go/dataform/apiv1alpha2",
|
||||
"description": "Dataform API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2",
|
||||
"release_level": "alpha",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/dataform/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/dataform/apiv1beta1",
|
||||
"description": "Dataform API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/datafusion/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/datafusion/apiv1",
|
||||
"description": "Cloud Data Fusion API",
|
||||
@@ -575,6 +674,15 @@
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/dialogflow/apiv2beta1": {
|
||||
"distribution_name": "cloud.google.com/go/dialogflow/apiv2beta1",
|
||||
"description": "Dialogflow API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/dialogflow/cx/apiv3": {
|
||||
"distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3",
|
||||
"description": "Dialogflow API",
|
||||
@@ -629,6 +737,15 @@
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/edgecontainer/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/edgecontainer/apiv1",
|
||||
"description": "Distributed Cloud Edge Container API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgecontainer/latest/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/errorreporting": {
|
||||
"distribution_name": "cloud.google.com/go/errorreporting",
|
||||
"description": "Cloud Error Reporting API",
|
||||
@@ -719,6 +836,24 @@
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/functions/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/functions/apiv2",
|
||||
"description": "Cloud Functions API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/functions/apiv2beta": {
|
||||
"distribution_name": "cloud.google.com/go/functions/apiv2beta",
|
||||
"description": "Cloud Functions API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2beta",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/functions/metadata": {
|
||||
"distribution_name": "cloud.google.com/go/functions/metadata",
|
||||
"description": "Cloud Functions",
|
||||
@@ -800,6 +935,15 @@
|
||||
"release_level": "ga",
|
||||
"library_type": "CORE"
|
||||
},
|
||||
"cloud.google.com/go/iam/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/iam/apiv2",
|
||||
"description": "Identity and Access Management (IAM) API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv2",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/iam/credentials/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/iam/credentials/apiv1",
|
||||
"description": "IAM Service Account Credentials API",
|
||||
@@ -856,7 +1000,7 @@
|
||||
},
|
||||
"cloud.google.com/go/language/apiv1beta2": {
|
||||
"distribution_name": "cloud.google.com/go/language/apiv1beta2",
|
||||
"description": "Google Cloud Natural Language API",
|
||||
"description": "Cloud Natural Language API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1beta2",
|
||||
@@ -895,7 +1039,7 @@
|
||||
"description": "Long Running Operations API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/longrunning/autogen",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/longrunning/latest/autogen",
|
||||
"release_level": "alpha",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
@@ -1193,7 +1337,7 @@
|
||||
"language": "Go",
|
||||
"client_library_type": "manual",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest",
|
||||
"release_level": "beta",
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_MANUAL"
|
||||
},
|
||||
"cloud.google.com/go/pubsublite/apiv1": {
|
||||
@@ -1205,15 +1349,6 @@
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/recaptchaenterprise/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1beta1",
|
||||
"description": "reCAPTCHA Enterprise API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/latest/apiv1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/recaptchaenterprise/v2/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1",
|
||||
"description": "reCAPTCHA Enterprise API",
|
||||
@@ -1223,6 +1358,15 @@
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1",
|
||||
"description": "reCAPTCHA Enterprise API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/recommendationengine/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/recommendationengine/apiv1beta1",
|
||||
"description": "Recommendations AI",
|
||||
@@ -1367,15 +1511,6 @@
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/secretmanager/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/secretmanager/apiv1beta1",
|
||||
"description": "Secret Manager API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/security/privateca/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/security/privateca/apiv1",
|
||||
"description": "Certificate Authority API",
|
||||
@@ -1394,6 +1529,15 @@
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/security/publicca/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1",
|
||||
"description": "Public Certificate Authority API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/securitycenter/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/securitycenter/apiv1",
|
||||
"description": "Security Command Center API",
|
||||
@@ -1495,7 +1639,7 @@
|
||||
},
|
||||
"cloud.google.com/go/spanner/admin/database/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/spanner/admin/database/apiv1",
|
||||
"description": "Cloud Spanner Database Admin API",
|
||||
"description": "Cloud Spanner API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/database/apiv1",
|
||||
@@ -1538,6 +1682,15 @@
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/speech/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/speech/apiv2",
|
||||
"description": "Cloud Speech-to-Text API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv2",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/storage": {
|
||||
"distribution_name": "cloud.google.com/go/storage",
|
||||
"description": "Cloud Storage (GCS)",
|
||||
@@ -1571,7 +1724,7 @@
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4",
|
||||
"release_level": "beta",
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/talent/apiv4beta1": {
|
||||
@@ -1682,15 +1835,6 @@
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/vision/apiv1p1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/vision/apiv1p1beta1",
|
||||
"description": "Cloud Vision API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/latest/apiv1p1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/vision/v2/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/vision/v2/apiv1",
|
||||
"description": "Cloud Vision API",
|
||||
@@ -1700,6 +1844,15 @@
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/vision/v2/apiv1p1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/vision/v2/apiv1p1beta1",
|
||||
"description": "Cloud Vision API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1p1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/vmmigration/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/vmmigration/apiv1",
|
||||
"description": "VM Migration API",
|
||||
|
||||
3
vendor/cloud.google.com/go/internal/annotate.go
generated
vendored
3
vendor/cloud.google.com/go/internal/annotate.go
generated
vendored
@@ -31,7 +31,8 @@ import (
|
||||
// - "google.golang.org/api/googleapi".Error
|
||||
// If the error is not one of these types, Annotate behaves
|
||||
// like
|
||||
// fmt.Errorf("%s: %v", msg, err)
|
||||
//
|
||||
// fmt.Errorf("%s: %v", msg, err)
|
||||
func Annotate(err error, msg string) error {
|
||||
if err == nil {
|
||||
panic("Annotate called with nil")
|
||||
|
||||
322
vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
generated
vendored
322
vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
generated
vendored
@@ -1,322 +0,0 @@
|
||||
{
|
||||
"release-type": "go-yoshi",
|
||||
"include-component-in-tag": true,
|
||||
"tag-separator": "/",
|
||||
"packages": {
|
||||
"accessapproval": {
|
||||
"component": "accessapproval"
|
||||
},
|
||||
"accesscontextmanager": {
|
||||
"component": "accesscontextmanager"
|
||||
},
|
||||
"aiplatform": {
|
||||
"component": "aiplatform"
|
||||
},
|
||||
"analytics": {
|
||||
"component": "analytics"
|
||||
},
|
||||
"apigateway": {
|
||||
"component": "apigateway"
|
||||
},
|
||||
"apigeeconnect": {
|
||||
"component": "apigeeconnect"
|
||||
},
|
||||
"appengine": {
|
||||
"component": "appengine"
|
||||
},
|
||||
"area120": {
|
||||
"component": "area120"
|
||||
},
|
||||
"artifactregistry": {
|
||||
"component": "artifactregistry"
|
||||
},
|
||||
"asset": {
|
||||
"component": "asset"
|
||||
},
|
||||
"assuredworkloads": {
|
||||
"component": "assuredworkloads"
|
||||
},
|
||||
"automl": {
|
||||
"component": "automl"
|
||||
},
|
||||
"baremetalsolution": {
|
||||
"component": "baremetalsolution"
|
||||
},
|
||||
"batch": {
|
||||
"component": "batch"
|
||||
},
|
||||
"billing": {
|
||||
"component": "billing"
|
||||
},
|
||||
"binaryauthorization": {
|
||||
"component": "binaryauthorization"
|
||||
},
|
||||
"certificatemanager": {
|
||||
"component": "certificatemanager"
|
||||
},
|
||||
"channel": {
|
||||
"component": "channel"
|
||||
},
|
||||
"cloudbuild": {
|
||||
"component": "cloudbuild"
|
||||
},
|
||||
"clouddms": {
|
||||
"component": "clouddms"
|
||||
},
|
||||
"cloudtasks": {
|
||||
"component": "cloudtasks"
|
||||
},
|
||||
"compute": {
|
||||
"component": "compute"
|
||||
},
|
||||
"contactcenterinsights": {
|
||||
"component": "contactcenterinsights"
|
||||
},
|
||||
"container": {
|
||||
"component": "container"
|
||||
},
|
||||
"containeranalysis": {
|
||||
"component": "containeranalysis"
|
||||
},
|
||||
"datacatalog": {
|
||||
"component": "datacatalog"
|
||||
},
|
||||
"dataflow": {
|
||||
"component": "dataflow"
|
||||
},
|
||||
"datafusion": {
|
||||
"component": "datafusion"
|
||||
},
|
||||
"datalabeling": {
|
||||
"component": "datalabeling"
|
||||
},
|
||||
"dataplex": {
|
||||
"component": "dataplex"
|
||||
},
|
||||
"dataproc": {
|
||||
"component": "dataproc"
|
||||
},
|
||||
"dataqna": {
|
||||
"component": "dataqna"
|
||||
},
|
||||
"datastream": {
|
||||
"component": "datastream"
|
||||
},
|
||||
"deploy": {
|
||||
"component": "deploy"
|
||||
},
|
||||
"dialogflow": {
|
||||
"component": "dialogflow"
|
||||
},
|
||||
"dlp": {
|
||||
"component": "dlp"
|
||||
},
|
||||
"documentai": {
|
||||
"component": "documentai"
|
||||
},
|
||||
"domains": {
|
||||
"component": "domains"
|
||||
},
|
||||
"essentialcontacts": {
|
||||
"component": "essentialcontacts"
|
||||
},
|
||||
"eventarc": {
|
||||
"component": "eventarc"
|
||||
},
|
||||
"filestore": {
|
||||
"component": "filestore"
|
||||
},
|
||||
"functions": {
|
||||
"component": "functions"
|
||||
},
|
||||
"gaming": {
|
||||
"component": "gaming"
|
||||
},
|
||||
"gkebackup": {
|
||||
"component": "gkebackup"
|
||||
},
|
||||
"gkeconnect": {
|
||||
"component": "gkeconnect"
|
||||
},
|
||||
"gkehub": {
|
||||
"component": "gkehub"
|
||||
},
|
||||
"gkemulticloud": {
|
||||
"component": "gkemulticloud"
|
||||
},
|
||||
"grafeas": {
|
||||
"component": "grafeas"
|
||||
},
|
||||
"gsuiteaddons": {
|
||||
"component": "gsuiteaddons"
|
||||
},
|
||||
"iam": {
|
||||
"component": "iam"
|
||||
},
|
||||
"iap": {
|
||||
"component": "iap"
|
||||
},
|
||||
"ids": {
|
||||
"component": "ids"
|
||||
},
|
||||
"iot": {
|
||||
"component": "iot"
|
||||
},
|
||||
"kms": {
|
||||
"component": "kms"
|
||||
},
|
||||
"language": {
|
||||
"component": "language"
|
||||
},
|
||||
"lifesciences": {
|
||||
"component": "lifesciences"
|
||||
},
|
||||
"managedidentities": {
|
||||
"component": "managedidentities"
|
||||
},
|
||||
"mediatranslation": {
|
||||
"component": "mediatranslation"
|
||||
},
|
||||
"memcache": {
|
||||
"component": "memcache"
|
||||
},
|
||||
"metastore": {
|
||||
"component": "metastore"
|
||||
},
|
||||
"monitoring": {
|
||||
"component": "monitoring"
|
||||
},
|
||||
"networkconnectivity": {
|
||||
"component": "networkconnectivity"
|
||||
},
|
||||
"networkmanagement": {
|
||||
"component": "networkmanagement"
|
||||
},
|
||||
"networksecurity": {
|
||||
"component": "networksecurity"
|
||||
},
|
||||
"notebooks": {
|
||||
"component": "notebooks"
|
||||
},
|
||||
"optimization": {
|
||||
"component": "optimization"
|
||||
},
|
||||
"orchestration": {
|
||||
"component": "orchestration"
|
||||
},
|
||||
"orgpolicy": {
|
||||
"component": "orgpolicy"
|
||||
},
|
||||
"osconfig": {
|
||||
"component": "osconfig"
|
||||
},
|
||||
"oslogin": {
|
||||
"component": "oslogin"
|
||||
},
|
||||
"phishingprotection": {
|
||||
"component": "phishingprotection"
|
||||
},
|
||||
"policytroubleshooter": {
|
||||
"component": "policytroubleshooter"
|
||||
},
|
||||
"privatecatalog": {
|
||||
"component": "privatecatalog"
|
||||
},
|
||||
"recaptchaenterprise/v2": {
|
||||
"component": "recaptchaenterprise"
|
||||
},
|
||||
"recommendationengine": {
|
||||
"component": "recommendationengine"
|
||||
},
|
||||
"recommender": {
|
||||
"component": "recommender"
|
||||
},
|
||||
"redis": {
|
||||
"component": "redis"
|
||||
},
|
||||
"resourcemanager": {
|
||||
"component": "resourcemanager"
|
||||
},
|
||||
"resourcesettings": {
|
||||
"component": "resourcesettings"
|
||||
},
|
||||
"retail": {
|
||||
"component": "retail"
|
||||
},
|
||||
"run": {
|
||||
"component": "run"
|
||||
},
|
||||
"scheduler": {
|
||||
"component": "scheduler"
|
||||
},
|
||||
"secretmanager": {
|
||||
"component": "secretmanager"
|
||||
},
|
||||
"security": {
|
||||
"component": "security"
|
||||
},
|
||||
"securitycenter": {
|
||||
"component": "securitycenter"
|
||||
},
|
||||
"servicecontrol": {
|
||||
"component": "servicecontrol"
|
||||
},
|
||||
"servicedirectory": {
|
||||
"component": "servicedirectory"
|
||||
},
|
||||
"servicemanagement": {
|
||||
"component": "servicemanagement"
|
||||
},
|
||||
"serviceusage": {
|
||||
"component": "serviceusage"
|
||||
},
|
||||
"shell": {
|
||||
"component": "shell"
|
||||
},
|
||||
"speech": {
|
||||
"component": "speech"
|
||||
},
|
||||
"storagetransfer": {
|
||||
"component": "storagetransfer"
|
||||
},
|
||||
"talent": {
|
||||
"component": "talent"
|
||||
},
|
||||
"texttospeech": {
|
||||
"component": "texttospeech"
|
||||
},
|
||||
"tpu": {
|
||||
"component": "tpu"
|
||||
},
|
||||
"trace": {
|
||||
"component": "trace"
|
||||
},
|
||||
"translate": {
|
||||
"component": "translate"
|
||||
},
|
||||
"video": {
|
||||
"component": "video"
|
||||
},
|
||||
"videointelligence": {
|
||||
"component": "videointelligence"
|
||||
},
|
||||
"vision/v2": {
|
||||
"component": "vision"
|
||||
},
|
||||
"vmmigration": {
|
||||
"component": "vmmigration"
|
||||
},
|
||||
"vpcaccess": {
|
||||
"component": "vpcaccess"
|
||||
},
|
||||
"webrisk": {
|
||||
"component": "webrisk"
|
||||
},
|
||||
"websecurityscanner": {
|
||||
"component": "websecurityscanner"
|
||||
},
|
||||
"workflows": {
|
||||
"component": "workflows"
|
||||
}
|
||||
}
|
||||
}
|
||||
10
vendor/cloud.google.com/go/release-please-config.json
generated
vendored
10
vendor/cloud.google.com/go/release-please-config.json
generated
vendored
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"release-type": "go-yoshi",
|
||||
"separate-pull-requests": true,
|
||||
"include-component-in-tag": false,
|
||||
"packages": {
|
||||
".": {
|
||||
"component": "main"
|
||||
}
|
||||
}
|
||||
}
|
||||
3
vendor/cloud.google.com/go/storage/.release-please-manifest.json
generated
vendored
3
vendor/cloud.google.com/go/storage/.release-please-manifest.json
generated
vendored
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"storage": "1.23.0"
|
||||
}
|
||||
53
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
53
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
@@ -1,6 +1,59 @@
|
||||
# Changes
|
||||
|
||||
|
||||
## [1.28.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.27.0...storage/v1.28.0) (2022-11-03)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/internal:** Add routing annotations ([ce3f945](https://github.com/googleapis/google-cloud-go/commit/ce3f9458e511eca0910992763232abbcd64698f1))
|
||||
* **storage:** Add Autoclass support ([#6828](https://github.com/googleapis/google-cloud-go/issues/6828)) ([f7c7f41](https://github.com/googleapis/google-cloud-go/commit/f7c7f41e4d7fcffe05860e1114cb20f40c869da8))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** Fix read-write race in Writer.Write ([#6817](https://github.com/googleapis/google-cloud-go/issues/6817)) ([4766d3e](https://github.com/googleapis/google-cloud-go/commit/4766d3e1004119b93c6bd352024b5bf3404252eb))
|
||||
* **storage:** Fix request token passing for Copier.Run ([#6863](https://github.com/googleapis/google-cloud-go/issues/6863)) ([faaab06](https://github.com/googleapis/google-cloud-go/commit/faaab066d8e509dc440bcbc87391557ecee7dbf2)), refs [#6857](https://github.com/googleapis/google-cloud-go/issues/6857)
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
* **storage:** Update broken links for SignURL and PostPolicy ([#6779](https://github.com/googleapis/google-cloud-go/issues/6779)) ([776138b](https://github.com/googleapis/google-cloud-go/commit/776138bc06a1e5fd45acbf8f9d36e9dc6ce31dd3))
|
||||
|
||||
## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.26.0...storage/v1.27.0) (2022-09-22)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** Find GoogleAccessID when using impersonated creds ([#6591](https://github.com/googleapis/google-cloud-go/issues/6591)) ([a2d16a7](https://github.com/googleapis/google-cloud-go/commit/a2d16a7a778c85d13217fc67955ec5dac1da34e8))
|
||||
|
||||
## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** export ShouldRetry ([#6370](https://github.com/googleapis/google-cloud-go/issues/6370)) ([0da9ab0](https://github.com/googleapis/google-cloud-go/commit/0da9ab0831540569dc04c0a23437b084b1564e15)), refs [#6362](https://github.com/googleapis/google-cloud-go/issues/6362)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** allow to use age=0 in OLM conditions ([#6204](https://github.com/googleapis/google-cloud-go/issues/6204)) ([c85704f](https://github.com/googleapis/google-cloud-go/commit/c85704f4284626ce728cb48f3b130f2ce2a0165e))
|
||||
|
||||
## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.24.0...storage/v1.25.0) (2022-08-11)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/internal:** Add routing annotations ([8a8ba85](https://github.com/googleapis/google-cloud-go/commit/8a8ba85311f85701c97fd7c10f1d88b738ce423f))
|
||||
* **storage:** refactor to use transport-agnostic interface ([#6465](https://github.com/googleapis/google-cloud-go/issues/6465)) ([d03c3e1](https://github.com/googleapis/google-cloud-go/commit/d03c3e15a79fe9afa1232d9c8bd4c484a9bb927e))
|
||||
|
||||
## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.23.0...storage/v1.24.0) (2022-07-20)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** add Custom Placement Config Dual Region Support ([#6294](https://github.com/googleapis/google-cloud-go/issues/6294)) ([5a8c607](https://github.com/googleapis/google-cloud-go/commit/5a8c607e3a9a3265887e27cb13f8943f3e3fa23d))
|
||||
|
||||
## [1.23.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.1...storage/v1.23.0) (2022-06-23)
|
||||
|
||||
|
||||
|
||||
4
vendor/cloud.google.com/go/storage/README.md
generated
vendored
4
vendor/cloud.google.com/go/storage/README.md
generated
vendored
@@ -2,7 +2,7 @@
|
||||
|
||||
- [About Cloud Storage](https://cloud.google.com/storage/)
|
||||
- [API documentation](https://cloud.google.com/storage/docs)
|
||||
- [Go client documentation](https://pkg.go.dev/cloud.google.com/go/storage)
|
||||
- [Go client documentation](https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest)
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage)
|
||||
|
||||
### Example Usage
|
||||
@@ -25,7 +25,7 @@ if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer rc.Close()
|
||||
body, err := ioutil.ReadAll(rc)
|
||||
body, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
102
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
102
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
@@ -20,9 +20,8 @@ import (
|
||||
"reflect"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"google.golang.org/api/googleapi"
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
)
|
||||
|
||||
// ACLRole is the level of access to grant.
|
||||
@@ -121,111 +120,46 @@ func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
|
||||
a.configureCall(ctx, req)
|
||||
err = run(ctx, func() error {
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
}, a.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toObjectACLRules(acls.Items), nil
|
||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
||||
return a.c.tc.ListDefaultObjectACLs(ctx, a.bucket, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
||||
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
|
||||
a.configureCall(ctx, req)
|
||||
|
||||
return run(ctx, func() error {
|
||||
return req.Do()
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.DeleteDefaultObjectACL(ctx, a.bucket, entity, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.BucketAccessControls
|
||||
var err error
|
||||
req := a.c.raw.BucketAccessControls.List(a.bucket)
|
||||
a.configureCall(ctx, req)
|
||||
err = run(ctx, func() error {
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
}, a.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toBucketACLRules(acls.Items), nil
|
||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
||||
return a.c.tc.ListBucketACLs(ctx, a.bucket, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
||||
acl := &raw.BucketAccessControl{
|
||||
Bucket: a.bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
|
||||
a.configureCall(ctx, req)
|
||||
return run(ctx, func() error {
|
||||
_, err := req.Do()
|
||||
return err
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.UpdateBucketACL(ctx, a.bucket, entity, role, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
||||
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
|
||||
a.configureCall(ctx, req)
|
||||
return run(ctx, func() error {
|
||||
return req.Do()
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.DeleteBucketACL(ctx, a.bucket, entity, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
|
||||
a.configureCall(ctx, req)
|
||||
err = run(ctx, func() error {
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
}, a.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toObjectACLRules(acls.Items), nil
|
||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
||||
return a.c.tc.ListObjectACLs(ctx, a.bucket, a.object, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
|
||||
type setRequest interface {
|
||||
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
|
||||
Header() http.Header
|
||||
}
|
||||
|
||||
acl := &raw.ObjectAccessControl{
|
||||
Bucket: a.bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
var req setRequest
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
if isBucketDefault {
|
||||
req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
|
||||
} else {
|
||||
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
|
||||
return a.c.tc.UpdateDefaultObjectACL(ctx, a.bucket, entity, role, opts...)
|
||||
}
|
||||
a.configureCall(ctx, req)
|
||||
return run(ctx, func() error {
|
||||
_, err := req.Do()
|
||||
return err
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
return a.c.tc.UpdateObjectACL(ctx, a.bucket, a.object, entity, role, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
||||
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
|
||||
a.configureCall(ctx, req)
|
||||
return run(ctx, func() error {
|
||||
return req.Do()
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
|
||||
|
||||
663
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
663
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
@@ -20,21 +20,20 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"github.com/googleapis/go-type-adapters/adapters"
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iamcredentials/v1"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
"google.golang.org/genproto/googleapis/storage/v2"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
dpb "google.golang.org/genproto/googleapis/type/date"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -56,7 +55,8 @@ type BucketHandle struct {
|
||||
// The supplied name must contain only lowercase letters, numbers, dashes,
|
||||
// underscores, and dots. The full specification for valid bucket names can be
|
||||
// found at:
|
||||
// https://cloud.google.com/storage/docs/bucket-naming
|
||||
//
|
||||
// https://cloud.google.com/storage/docs/bucket-naming
|
||||
func (c *Client) Bucket(name string) *BucketHandle {
|
||||
retry := c.retry.clone()
|
||||
return &BucketHandle{
|
||||
@@ -83,27 +83,11 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
var bkt *raw.Bucket
|
||||
if attrs != nil {
|
||||
bkt = attrs.toRawBucket()
|
||||
} else {
|
||||
bkt = &raw.Bucket{}
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, o...); err != nil {
|
||||
return err
|
||||
}
|
||||
bkt.Name = b.name
|
||||
// If there is lifecycle information but no location, explicitly set
|
||||
// the location. This is a GCS quirk/bug.
|
||||
if bkt.Location == "" && bkt.Lifecycle != nil {
|
||||
bkt.Location = "US"
|
||||
}
|
||||
req := b.c.raw.Buckets.Insert(projectID, bkt)
|
||||
setClientHeader(req.Header())
|
||||
if attrs != nil && attrs.PredefinedACL != "" {
|
||||
req.PredefinedAcl(attrs.PredefinedACL)
|
||||
}
|
||||
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
|
||||
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
|
||||
}
|
||||
return run(ctx, func() error { _, err := req.Context(ctx).Do(); return err }, b.retry, true, setRetryHeaderHTTP(req))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes the Bucket.
|
||||
@@ -111,24 +95,8 @@ func (b *BucketHandle) Delete(ctx context.Context) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
req, err := b.newDeleteCall()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return run(ctx, func() error { return req.Context(ctx).Do() }, b.retry, true, setRetryHeaderHTTP(req))
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
|
||||
req := b.c.raw.Buckets.Delete(b.name)
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.DeleteBucket(ctx, b.name, b.conds, o...)
|
||||
}
|
||||
|
||||
// ACL returns an ACLHandle, which provides access to the bucket's access control list.
|
||||
@@ -151,7 +119,8 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
|
||||
//
|
||||
// name must consist entirely of valid UTF-8-encoded runes. The full specification
|
||||
// for valid object names can be found at:
|
||||
// https://cloud.google.com/storage/docs/naming-objects
|
||||
//
|
||||
// https://cloud.google.com/storage/docs/naming-objects
|
||||
func (b *BucketHandle) Object(name string) *ObjectHandle {
|
||||
retry := b.retry.clone()
|
||||
return &ObjectHandle{
|
||||
@@ -176,35 +145,8 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
req, err := b.newGetCall()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var resp *raw.Bucket
|
||||
err = run(ctx, func() error {
|
||||
resp, err = req.Context(ctx).Do()
|
||||
return err
|
||||
}, b.retry, true, setRetryHeaderHTTP(req))
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
return nil, ErrBucketNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBucket(resp)
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
|
||||
req := b.c.raw.Buckets.Get(b.name).Projection("full")
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.GetBucket(ctx, b.name, b.conds, o...)
|
||||
}
|
||||
|
||||
// Update updates a bucket's attributes.
|
||||
@@ -212,62 +154,23 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
req, err := b.newPatchCall(&uattrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if uattrs.PredefinedACL != "" {
|
||||
req.PredefinedAcl(uattrs.PredefinedACL)
|
||||
}
|
||||
if uattrs.PredefinedDefaultObjectACL != "" {
|
||||
req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL)
|
||||
}
|
||||
|
||||
isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0
|
||||
|
||||
var rawBucket *raw.Bucket
|
||||
call := func() error {
|
||||
rb, err := req.Context(ctx).Do()
|
||||
rawBucket = rb
|
||||
return err
|
||||
}
|
||||
|
||||
if err := run(ctx, call, b.retry, isIdempotent, setRetryHeaderHTTP(req)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBucket(rawBucket)
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
|
||||
rb := uattrs.toRawBucket()
|
||||
req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full")
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
o := makeStorageOpts(isIdempotent, b.retry, b.userProject)
|
||||
return b.c.tc.UpdateBucket(ctx, b.name, &uattrs, b.conds, o...)
|
||||
}
|
||||
|
||||
// SignedURL returns a URL for the specified object. Signed URLs allow anyone
|
||||
// access to a restricted resource for a limited time without needing a
|
||||
// Google account or signing in. For more information about signed URLs, see
|
||||
// https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication
|
||||
// access to a restricted resource for a limited time without needing a Google
|
||||
// account or signing in.
|
||||
// For more information about signed URLs, see "[Overview of access control]."
|
||||
//
|
||||
// This method only requires the Method and Expires fields in the specified
|
||||
// SignedURLOptions opts to be non-nil. If not provided, it attempts to fill the
|
||||
// GoogleAccessID and PrivateKey from the GOOGLE_APPLICATION_CREDENTIALS environment variable.
|
||||
// If you are authenticating with a custom HTTP client, Service Account based
|
||||
// auto-detection will be hindered.
|
||||
// This method requires the Method and Expires fields in the specified
|
||||
// SignedURLOptions to be non-nil. You may need to set the GoogleAccessID and
|
||||
// PrivateKey fields in some cases. Read more on the [automatic detection of credentials]
|
||||
// for this method.
|
||||
//
|
||||
// If no private key is found, it attempts to use the GoogleAccessID to sign the URL.
|
||||
// This requires the IAM Service Account Credentials API to be enabled
|
||||
// (https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview)
|
||||
// and iam.serviceAccounts.signBlob permissions on the GoogleAccessID service account.
|
||||
// If you do not want these fields set for you, you may pass them in through opts or use
|
||||
// SignedURL(bucket, name string, opts *SignedURLOptions) instead.
|
||||
// [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication
|
||||
// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing
|
||||
func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) {
|
||||
if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) {
|
||||
return SignedURL(b.name, object, opts)
|
||||
@@ -305,18 +208,11 @@ func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string,
|
||||
// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts.
|
||||
// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads.
|
||||
//
|
||||
// This method only requires the Expires field in the specified PostPolicyV4Options
|
||||
// to be non-nil. If not provided, it attempts to fill the GoogleAccessID and PrivateKey
|
||||
// from the GOOGLE_APPLICATION_CREDENTIALS environment variable.
|
||||
// If you are authenticating with a custom HTTP client, Service Account based
|
||||
// auto-detection will be hindered.
|
||||
// This method requires the Expires field in the specified PostPolicyV4Options
|
||||
// to be non-nil. You may need to set the GoogleAccessID and PrivateKey fields
|
||||
// in some cases. Read more on the [automatic detection of credentials] for this method.
|
||||
//
|
||||
// If no private key is found, it attempts to use the GoogleAccessID to sign the URL.
|
||||
// This requires the IAM Service Account Credentials API to be enabled
|
||||
// (https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview)
|
||||
// and iam.serviceAccounts.signBlob permissions on the GoogleAccessID service account.
|
||||
// If you do not want these fields set for you, you may pass them in through opts or use
|
||||
// GenerateSignedPostPolicyV4(bucket, name string, opts *PostPolicyV4Options) instead.
|
||||
// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing
|
||||
func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) {
|
||||
if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) {
|
||||
return GenerateSignedPostPolicyV4(b.name, object, opts)
|
||||
@@ -356,17 +252,27 @@ func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) {
|
||||
|
||||
if b.c.creds != nil && len(b.c.creds.JSON) > 0 {
|
||||
var sa struct {
|
||||
ClientEmail string `json:"client_email"`
|
||||
}
|
||||
err := json.Unmarshal(b.c.creds.JSON, &sa)
|
||||
if err == nil && sa.ClientEmail != "" {
|
||||
return sa.ClientEmail, nil
|
||||
} else if err != nil {
|
||||
returnErr = err
|
||||
} else {
|
||||
returnErr = errors.New("storage: empty client email in credentials")
|
||||
ClientEmail string `json:"client_email"`
|
||||
SAImpersonationURL string `json:"service_account_impersonation_url"`
|
||||
CredType string `json:"type"`
|
||||
}
|
||||
|
||||
err := json.Unmarshal(b.c.creds.JSON, &sa)
|
||||
if err != nil {
|
||||
returnErr = err
|
||||
} else if sa.CredType == "impersonated_service_account" {
|
||||
start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":")
|
||||
|
||||
if end <= start {
|
||||
returnErr = errors.New("error parsing impersonated service account credentials")
|
||||
} else {
|
||||
return sa.SAImpersonationURL[start+1 : end], nil
|
||||
}
|
||||
} else if sa.CredType == "service_account" && sa.ClientEmail != "" {
|
||||
return sa.ClientEmail, nil
|
||||
} else {
|
||||
returnErr = errors.New("unable to parse credentials; only service_account and impersonated_service_account credentials are supported")
|
||||
}
|
||||
}
|
||||
|
||||
// Don't error out if we can't unmarshal, fallback to GCE check.
|
||||
@@ -377,11 +283,11 @@ func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) {
|
||||
} else if err != nil {
|
||||
returnErr = err
|
||||
} else {
|
||||
returnErr = errors.New("got empty email from GCE metadata service")
|
||||
returnErr = errors.New("empty email from GCE metadata service")
|
||||
}
|
||||
|
||||
}
|
||||
return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %v", returnErr)
|
||||
return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4])", returnErr)
|
||||
}
|
||||
|
||||
func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, error) {
|
||||
@@ -392,18 +298,18 @@ func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte,
|
||||
// circumventing the cost of recreating the auth/transport layer
|
||||
svc, err := iamcredentials.NewService(ctx, option.WithHTTPClient(b.c.hc))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create iamcredentials client: %v", err)
|
||||
return nil, fmt.Errorf("unable to create iamcredentials client: %w", err)
|
||||
}
|
||||
|
||||
resp, err := svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{
|
||||
Payload: base64.StdEncoding.EncodeToString(in),
|
||||
}).Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to sign bytes: %v", err)
|
||||
return nil, fmt.Errorf("unable to sign bytes: %w", err)
|
||||
}
|
||||
out, err := base64.StdEncoding.DecodeString(resp.SignedBlob)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to base64 decode response: %v", err)
|
||||
return nil, fmt.Errorf("unable to base64 decode response: %w", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
@@ -461,8 +367,13 @@ type BucketAttrs struct {
|
||||
PredefinedDefaultObjectACL string
|
||||
|
||||
// Location is the location of the bucket. It defaults to "US".
|
||||
// If specifying a dual-region, CustomPlacementConfig should be set in conjunction.
|
||||
Location string
|
||||
|
||||
// The bucket's custom placement configuration that holds a list of
|
||||
// regional locations for custom dual regions.
|
||||
CustomPlacementConfig *CustomPlacementConfig
|
||||
|
||||
// MetaGeneration is the metadata generation of the bucket.
|
||||
// This field is read-only.
|
||||
MetaGeneration int64
|
||||
@@ -533,6 +444,11 @@ type BucketAttrs struct {
|
||||
// See https://cloud.google.com/storage/docs/managing-turbo-replication for
|
||||
// more information.
|
||||
RPO RPO
|
||||
|
||||
// Autoclass holds the bucket's autoclass configuration. If enabled,
|
||||
// allows for the automatic selection of the best storage class
|
||||
// based on object access patterns.
|
||||
Autoclass *Autoclass
|
||||
}
|
||||
|
||||
// BucketPolicyOnly is an alias for UniformBucketLevelAccess.
|
||||
@@ -698,7 +614,12 @@ const (
|
||||
//
|
||||
// All configured conditions must be met for the associated action to be taken.
|
||||
type LifecycleCondition struct {
|
||||
// AllObjects is used to select all objects in a bucket by
|
||||
// setting AgeInDays to 0.
|
||||
AllObjects bool
|
||||
|
||||
// AgeInDays is the age of the object in days.
|
||||
// If you want to set AgeInDays to `0` use AllObjects set to `true`.
|
||||
AgeInDays int64
|
||||
|
||||
// CreatedBefore is the time the object was created.
|
||||
@@ -716,10 +637,12 @@ type LifecycleCondition struct {
|
||||
|
||||
// DaysSinceCustomTime is the days elapsed since the CustomTime date of the
|
||||
// object. This condition can only be satisfied if CustomTime has been set.
|
||||
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
|
||||
DaysSinceCustomTime int64
|
||||
|
||||
// DaysSinceNoncurrentTime is the days elapsed since the noncurrent timestamp
|
||||
// of the object. This condition is relevant only for versioned objects.
|
||||
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
|
||||
DaysSinceNoncurrentTime int64
|
||||
|
||||
// Liveness specifies the object's liveness. Relevant only for versioned objects
|
||||
@@ -751,6 +674,7 @@ type LifecycleCondition struct {
|
||||
// If the value is N, this condition is satisfied when there are at least N
|
||||
// versions (including the live version) newer than this version of the
|
||||
// object.
|
||||
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
|
||||
NumNewerVersions int64
|
||||
}
|
||||
|
||||
@@ -782,6 +706,29 @@ type BucketWebsite struct {
|
||||
NotFoundPage string
|
||||
}
|
||||
|
||||
// CustomPlacementConfig holds the bucket's custom placement
|
||||
// configuration for Custom Dual Regions. See
|
||||
// https://cloud.google.com/storage/docs/locations#location-dr for more information.
|
||||
type CustomPlacementConfig struct {
|
||||
// The list of regional locations in which data is placed.
|
||||
// Custom Dual Regions require exactly 2 regional locations.
|
||||
DataLocations []string
|
||||
}
|
||||
|
||||
// Autoclass holds the bucket's autoclass configuration. If enabled,
|
||||
// allows for the automatic selection of the best storage class
|
||||
// based on object access patterns. See
|
||||
// https://cloud.google.com/storage/docs/using-autoclass for more information.
|
||||
type Autoclass struct {
|
||||
// Enabled specifies whether the autoclass feature is enabled
|
||||
// on the bucket.
|
||||
Enabled bool
|
||||
// ToggleTime is the time from which Autoclass was last toggled.
|
||||
// If Autoclass is enabled when the bucket is created, the ToggleTime
|
||||
// is set to the bucket creation time. This field is read-only.
|
||||
ToggleTime time.Time
|
||||
}
|
||||
|
||||
func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
|
||||
if b == nil {
|
||||
return nil, nil
|
||||
@@ -815,6 +762,8 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
|
||||
LocationType: b.LocationType,
|
||||
ProjectNumber: b.ProjectNumber,
|
||||
RPO: toRPO(b),
|
||||
CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig),
|
||||
Autoclass: toAutoclassFromRaw(b.Autoclass),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -845,6 +794,9 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
|
||||
PublicAccessPrevention: toPublicAccessPreventionFromProto(b.GetIamConfig()),
|
||||
LocationType: b.GetLocationType(),
|
||||
RPO: toRPOFromProto(b),
|
||||
CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()),
|
||||
ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based
|
||||
Autoclass: toAutoclassFromProto(b.GetAutoclass()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -882,22 +834,24 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
|
||||
}
|
||||
}
|
||||
return &raw.Bucket{
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: toRawBucketACL(b.ACL),
|
||||
DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL),
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toRawLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(),
|
||||
Cors: toRawCORS(b.CORS),
|
||||
Encryption: b.Encryption.toRawBucketEncryption(),
|
||||
Logging: b.Logging.toRawBucketLogging(),
|
||||
Website: b.Website.toRawBucketWebsite(),
|
||||
IamConfiguration: bktIAM,
|
||||
Rpo: b.RPO.String(),
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: toRawBucketACL(b.ACL),
|
||||
DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL),
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toRawLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(),
|
||||
Cors: toRawCORS(b.CORS),
|
||||
Encryption: b.Encryption.toRawBucketEncryption(),
|
||||
Logging: b.Logging.toRawBucketLogging(),
|
||||
Website: b.Website.toRawBucketWebsite(),
|
||||
IamConfiguration: bktIAM,
|
||||
Rpo: b.RPO.String(),
|
||||
CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(),
|
||||
Autoclass: b.Autoclass.toRawAutoclass(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -924,7 +878,7 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
|
||||
}
|
||||
var bb *storagepb.Bucket_Billing
|
||||
if b.RequesterPays {
|
||||
bb = &storage.Bucket_Billing{RequesterPays: true}
|
||||
bb = &storagepb.Bucket_Billing{RequesterPays: true}
|
||||
}
|
||||
var bktIAM *storagepb.Bucket_IamConfig
|
||||
if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown {
|
||||
@@ -940,22 +894,24 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
|
||||
}
|
||||
|
||||
return &storagepb.Bucket{
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: toProtoBucketACL(b.ACL),
|
||||
DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL),
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toProtoLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(),
|
||||
Cors: toProtoCORS(b.CORS),
|
||||
Encryption: b.Encryption.toProtoBucketEncryption(),
|
||||
Logging: b.Logging.toProtoBucketLogging(),
|
||||
Website: b.Website.toProtoBucketWebsite(),
|
||||
IamConfig: bktIAM,
|
||||
Rpo: b.RPO.String(),
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: toProtoBucketACL(b.ACL),
|
||||
DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL),
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toProtoLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(),
|
||||
Cors: toProtoCORS(b.CORS),
|
||||
Encryption: b.Encryption.toProtoBucketEncryption(),
|
||||
Logging: b.Logging.toProtoBucketLogging(),
|
||||
Website: b.Website.toProtoBucketWebsite(),
|
||||
IamConfig: bktIAM,
|
||||
Rpo: b.RPO.String(),
|
||||
CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(),
|
||||
Autoclass: b.Autoclass.toProtoAutoclass(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -972,25 +928,32 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket {
|
||||
}
|
||||
var bb *storagepb.Bucket_Billing
|
||||
if ua.RequesterPays != nil {
|
||||
bb = &storage.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)}
|
||||
bb = &storagepb.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)}
|
||||
}
|
||||
|
||||
var bktIAM *storagepb.Bucket_IamConfig
|
||||
var ublaEnabled bool
|
||||
var bktPolicyOnlyEnabled bool
|
||||
if ua.UniformBucketLevelAccess != nil {
|
||||
ublaEnabled = optional.ToBool(ua.UniformBucketLevelAccess.Enabled)
|
||||
}
|
||||
if ua.BucketPolicyOnly != nil {
|
||||
bktPolicyOnlyEnabled = optional.ToBool(ua.BucketPolicyOnly.Enabled)
|
||||
}
|
||||
if ublaEnabled || bktPolicyOnlyEnabled {
|
||||
bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
|
||||
Enabled: true,
|
||||
if ua.UniformBucketLevelAccess != nil || ua.BucketPolicyOnly != nil || ua.PublicAccessPrevention != PublicAccessPreventionUnknown {
|
||||
bktIAM = &storagepb.Bucket_IamConfig{}
|
||||
|
||||
if ua.BucketPolicyOnly != nil {
|
||||
bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
|
||||
Enabled: optional.ToBool(ua.BucketPolicyOnly.Enabled),
|
||||
}
|
||||
}
|
||||
|
||||
if ua.UniformBucketLevelAccess != nil {
|
||||
// UniformBucketLevelAccess takes precedence over BucketPolicyOnly,
|
||||
// so Enabled will be overriden here if both are set
|
||||
bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
|
||||
Enabled: optional.ToBool(ua.UniformBucketLevelAccess.Enabled),
|
||||
}
|
||||
}
|
||||
|
||||
if ua.PublicAccessPrevention != PublicAccessPreventionUnknown {
|
||||
bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String()
|
||||
}
|
||||
}
|
||||
if ua.PublicAccessPrevention != PublicAccessPreventionUnknown {
|
||||
bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String()
|
||||
}
|
||||
|
||||
var defaultHold bool
|
||||
if ua.DefaultEventBasedHold != nil {
|
||||
defaultHold = optional.ToBool(ua.DefaultEventBasedHold)
|
||||
@@ -1031,6 +994,7 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket {
|
||||
Website: ua.Website.toProtoBucketWebsite(),
|
||||
IamConfig: bktIAM,
|
||||
Rpo: ua.RPO.String(),
|
||||
Autoclass: ua.Autoclass.toProtoAutoclass(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1146,6 +1110,10 @@ type BucketAttrsToUpdate struct {
|
||||
// more information.
|
||||
RPO RPO
|
||||
|
||||
// If set, updates the autoclass configuration of the bucket.
|
||||
// See https://cloud.google.com/storage/docs/using-autoclass for more information.
|
||||
Autoclass *Autoclass
|
||||
|
||||
// acl is the list of access control rules on the bucket.
|
||||
// It is unexported and only used internally by the gRPC client.
|
||||
// Library users should use ACLHandle methods directly.
|
||||
@@ -1259,6 +1227,12 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
|
||||
rb.Website = ua.Website.toRawBucketWebsite()
|
||||
}
|
||||
}
|
||||
if ua.Autoclass != nil {
|
||||
rb.Autoclass = &raw.BucketAutoclass{
|
||||
Enabled: ua.Autoclass.Enabled,
|
||||
ForceSendFields: []string{"Enabled"},
|
||||
}
|
||||
}
|
||||
if ua.PredefinedACL != "" {
|
||||
// Clear ACL or the call will fail.
|
||||
rb.Acl = nil
|
||||
@@ -1347,15 +1321,8 @@ func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
|
||||
// most customers. It might be changed in backwards-incompatible ways and is not
|
||||
// subject to any SLA or deprecation policy.
|
||||
func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
|
||||
var metageneration int64
|
||||
if b.conds != nil {
|
||||
metageneration = b.conds.MetagenerationMatch
|
||||
}
|
||||
req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration)
|
||||
return run(ctx, func() error {
|
||||
_, err := req.Context(ctx).Do()
|
||||
return err
|
||||
}, b.retry, true, setRetryHeaderHTTP(req))
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...)
|
||||
}
|
||||
|
||||
// applyBucketConds modifies the provided call using the conditions in conds.
|
||||
@@ -1420,8 +1387,14 @@ func (rp *RetentionPolicy) toProtoRetentionPolicy() *storagepb.Bucket_RetentionP
|
||||
if rp == nil {
|
||||
return nil
|
||||
}
|
||||
// RetentionPeriod must be greater than 0, so if it is 0, the user left it
|
||||
// unset, and so we should not send it in the request i.e. nil is sent.
|
||||
var period *int64
|
||||
if rp.RetentionPeriod != 0 {
|
||||
period = proto.Int64(int64(rp.RetentionPeriod / time.Second))
|
||||
}
|
||||
return &storagepb.Bucket_RetentionPolicy{
|
||||
RetentionPeriod: int64(rp.RetentionPeriod / time.Second),
|
||||
RetentionPeriod: period,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1441,7 +1414,7 @@ func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error)
|
||||
}
|
||||
|
||||
func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *RetentionPolicy {
|
||||
if rp == nil {
|
||||
if rp == nil || rp.GetEffectiveTime().AsTime().Unix() == 0 {
|
||||
return nil
|
||||
}
|
||||
return &RetentionPolicy{
|
||||
@@ -1503,19 +1476,6 @@ func toCORSFromProto(rc []*storagepb.Bucket_Cors) []CORS {
|
||||
return out
|
||||
}
|
||||
|
||||
// Used to handle breaking change in Autogen Storage client OLM Age field
|
||||
// from int64 to *int64 gracefully in the manual client
|
||||
// TODO(#6240): Method should be removed once breaking change is made and introduced to this client
|
||||
func setAgeCondition(age int64, ageField interface{}) {
|
||||
c := reflect.ValueOf(ageField).Elem()
|
||||
switch c.Kind() {
|
||||
case reflect.Int64:
|
||||
c.SetInt(age)
|
||||
case reflect.Ptr:
|
||||
c.Set(reflect.ValueOf(&age))
|
||||
}
|
||||
}
|
||||
|
||||
func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
|
||||
var rl raw.BucketLifecycle
|
||||
if len(l.Rules) == 0 {
|
||||
@@ -1537,7 +1497,15 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
|
||||
},
|
||||
}
|
||||
|
||||
setAgeCondition(r.Condition.AgeInDays, &rr.Condition.Age)
|
||||
// AllObjects takes precedent when both AllObjects and AgeInDays are set
|
||||
// Rationale: If you've opted into using AllObjects, it makes sense that you
|
||||
// understand the implications of how this option works with AgeInDays.
|
||||
if r.Condition.AllObjects {
|
||||
rr.Condition.Age = googleapi.Int64(0)
|
||||
rr.Condition.ForceSendFields = []string{"Age"}
|
||||
} else if r.Condition.AgeInDays > 0 {
|
||||
rr.Condition.Age = googleapi.Int64(r.Condition.AgeInDays)
|
||||
}
|
||||
|
||||
switch r.Condition.Liveness {
|
||||
case LiveAndArchived:
|
||||
@@ -1586,6 +1554,11 @@ func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle {
|
||||
},
|
||||
}
|
||||
|
||||
// TODO(#6205): This may not be needed for gRPC
|
||||
if r.Condition.AllObjects {
|
||||
rr.Condition.AgeDays = proto.Int32(0)
|
||||
}
|
||||
|
||||
switch r.Condition.Liveness {
|
||||
case LiveAndArchived:
|
||||
rr.Condition.IsLive = nil
|
||||
@@ -1596,34 +1569,19 @@ func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle {
|
||||
}
|
||||
|
||||
if !r.Condition.CreatedBefore.IsZero() {
|
||||
rr.Condition.CreatedBefore = adapters.TimeToProtoDate(r.Condition.CreatedBefore)
|
||||
rr.Condition.CreatedBefore = timeToProtoDate(r.Condition.CreatedBefore)
|
||||
}
|
||||
if !r.Condition.CustomTimeBefore.IsZero() {
|
||||
rr.Condition.CustomTimeBefore = adapters.TimeToProtoDate(r.Condition.CustomTimeBefore)
|
||||
rr.Condition.CustomTimeBefore = timeToProtoDate(r.Condition.CustomTimeBefore)
|
||||
}
|
||||
if !r.Condition.NoncurrentTimeBefore.IsZero() {
|
||||
rr.Condition.NoncurrentTimeBefore = adapters.TimeToProtoDate(r.Condition.NoncurrentTimeBefore)
|
||||
rr.Condition.NoncurrentTimeBefore = timeToProtoDate(r.Condition.NoncurrentTimeBefore)
|
||||
}
|
||||
rl.Rule = append(rl.Rule, rr)
|
||||
}
|
||||
return &rl
|
||||
}
|
||||
|
||||
// Used to handle breaking change in Autogen Storage client OLM Age field
|
||||
// from int64 to *int64 gracefully in the manual client
|
||||
// TODO(#6240): Method should be removed once breaking change is made and introduced to this client
|
||||
func getAgeCondition(ageField interface{}) int64 {
|
||||
v := reflect.ValueOf(ageField)
|
||||
if v.Kind() == reflect.Int64 {
|
||||
return v.Interface().(int64)
|
||||
} else if v.Kind() == reflect.Ptr {
|
||||
if val, ok := v.Interface().(*int64); ok {
|
||||
return *val
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
|
||||
var l Lifecycle
|
||||
if rl == nil {
|
||||
@@ -1644,7 +1602,12 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
|
||||
NumNewerVersions: rr.Condition.NumNewerVersions,
|
||||
},
|
||||
}
|
||||
r.Condition.AgeInDays = getAgeCondition(rr.Condition.Age)
|
||||
if rr.Condition.Age != nil {
|
||||
r.Condition.AgeInDays = *rr.Condition.Age
|
||||
if *rr.Condition.Age == 0 {
|
||||
r.Condition.AllObjects = true
|
||||
}
|
||||
}
|
||||
|
||||
if rr.Condition.IsLive == nil {
|
||||
r.Condition.Liveness = LiveAndArchived
|
||||
@@ -1690,6 +1653,11 @@ func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle {
|
||||
},
|
||||
}
|
||||
|
||||
// TODO(#6205): This may not be needed for gRPC
|
||||
if rr.GetCondition().GetAgeDays() == 0 {
|
||||
r.Condition.AllObjects = true
|
||||
}
|
||||
|
||||
if rr.GetCondition().IsLive == nil {
|
||||
r.Condition.Liveness = LiveAndArchived
|
||||
} else if rr.GetCondition().GetIsLive() {
|
||||
@@ -1699,13 +1667,13 @@ func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle {
|
||||
}
|
||||
|
||||
if rr.GetCondition().GetCreatedBefore() != nil {
|
||||
r.Condition.CreatedBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetCreatedBefore())
|
||||
r.Condition.CreatedBefore = protoDateToUTCTime(rr.GetCondition().GetCreatedBefore())
|
||||
}
|
||||
if rr.GetCondition().GetCustomTimeBefore() != nil {
|
||||
r.Condition.CustomTimeBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore())
|
||||
r.Condition.CustomTimeBefore = protoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore())
|
||||
}
|
||||
if rr.GetCondition().GetNoncurrentTimeBefore() != nil {
|
||||
r.Condition.NoncurrentTimeBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore())
|
||||
r.Condition.NoncurrentTimeBefore = protoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore())
|
||||
}
|
||||
l.Rules = append(l.Rules, r)
|
||||
}
|
||||
@@ -1933,24 +1901,93 @@ func toRPOFromProto(b *storagepb.Bucket) RPO {
|
||||
}
|
||||
}
|
||||
|
||||
func customPlacementFromRaw(c *raw.BucketCustomPlacementConfig) *CustomPlacementConfig {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return &CustomPlacementConfig{DataLocations: c.DataLocations}
|
||||
}
|
||||
|
||||
func (c *CustomPlacementConfig) toRawCustomPlacement() *raw.BucketCustomPlacementConfig {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return &raw.BucketCustomPlacementConfig{
|
||||
DataLocations: c.DataLocations,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CustomPlacementConfig) toProtoCustomPlacement() *storagepb.Bucket_CustomPlacementConfig {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return &storagepb.Bucket_CustomPlacementConfig{
|
||||
DataLocations: c.DataLocations,
|
||||
}
|
||||
}
|
||||
|
||||
func customPlacementFromProto(c *storagepb.Bucket_CustomPlacementConfig) *CustomPlacementConfig {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return &CustomPlacementConfig{DataLocations: c.GetDataLocations()}
|
||||
}
|
||||
|
||||
func (a *Autoclass) toRawAutoclass() *raw.BucketAutoclass {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
// Excluding read only field ToggleTime.
|
||||
return &raw.BucketAutoclass{
|
||||
Enabled: a.Enabled,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Autoclass) toProtoAutoclass() *storagepb.Bucket_Autoclass {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
// Excluding read only field ToggleTime.
|
||||
return &storagepb.Bucket_Autoclass{
|
||||
Enabled: a.Enabled,
|
||||
}
|
||||
}
|
||||
|
||||
func toAutoclassFromRaw(a *raw.BucketAutoclass) *Autoclass {
|
||||
if a == nil || a.ToggleTime == "" {
|
||||
return nil
|
||||
}
|
||||
// Return Autoclass.ToggleTime only if parsed with a valid value.
|
||||
t, err := time.Parse(time.RFC3339, a.ToggleTime)
|
||||
if err != nil {
|
||||
return &Autoclass{
|
||||
Enabled: a.Enabled,
|
||||
}
|
||||
}
|
||||
return &Autoclass{
|
||||
Enabled: a.Enabled,
|
||||
ToggleTime: t,
|
||||
}
|
||||
}
|
||||
|
||||
func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass {
|
||||
if a == nil || a.GetToggleTime().AsTime().Unix() == 0 {
|
||||
return nil
|
||||
}
|
||||
return &Autoclass{
|
||||
Enabled: a.GetEnabled(),
|
||||
ToggleTime: a.GetToggleTime().AsTime(),
|
||||
}
|
||||
}
|
||||
|
||||
// Objects returns an iterator over the objects in the bucket that match the
|
||||
// Query q. If q is nil, no filtering is done. Objects will be iterated over
|
||||
// lexicographically by name.
|
||||
//
|
||||
// Note: The returned iterator is not safe for concurrent operations without explicit synchronization.
|
||||
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
|
||||
it := &ObjectIterator{
|
||||
ctx: ctx,
|
||||
bucket: b,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.items) },
|
||||
func() interface{} { b := it.items; it.items = nil; return b })
|
||||
if q != nil {
|
||||
it.query = *q
|
||||
}
|
||||
return it
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.ListObjects(ctx, b.name, q, o...)
|
||||
}
|
||||
|
||||
// Retryer returns a bucket handle that is configured with custom retry
|
||||
@@ -1985,7 +2022,6 @@ func (b *BucketHandle) Retryer(opts ...RetryOption) *BucketHandle {
|
||||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
||||
type ObjectIterator struct {
|
||||
ctx context.Context
|
||||
bucket *BucketHandle
|
||||
query Query
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
@@ -2022,52 +2058,6 @@ func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
req := it.bucket.c.raw.Objects.List(it.bucket.name)
|
||||
setClientHeader(req.Header())
|
||||
projection := it.query.Projection
|
||||
if projection == ProjectionDefault {
|
||||
projection = ProjectionFull
|
||||
}
|
||||
req.Projection(projection.String())
|
||||
req.Delimiter(it.query.Delimiter)
|
||||
req.Prefix(it.query.Prefix)
|
||||
req.StartOffset(it.query.StartOffset)
|
||||
req.EndOffset(it.query.EndOffset)
|
||||
req.Versions(it.query.Versions)
|
||||
req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter)
|
||||
if len(it.query.fieldSelection) > 0 {
|
||||
req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection))
|
||||
}
|
||||
req.PageToken(pageToken)
|
||||
if it.bucket.userProject != "" {
|
||||
req.UserProject(it.bucket.userProject)
|
||||
}
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var resp *raw.Objects
|
||||
var err error
|
||||
err = run(it.ctx, func() error {
|
||||
resp, err = req.Context(it.ctx).Do()
|
||||
return err
|
||||
}, it.bucket.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
err = ErrBucketNotExist
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
it.items = append(it.items, newObject(item))
|
||||
}
|
||||
for _, prefix := range resp.Prefixes {
|
||||
it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
|
||||
// Buckets returns an iterator over the buckets in the project. You may
|
||||
// optionally set the iterator's Prefix field to restrict the list to buckets
|
||||
// whose names begin with the prefix. By default, all buckets in the project
|
||||
@@ -2075,17 +2065,8 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
|
||||
//
|
||||
// Note: The returned iterator is not safe for concurrent operations without explicit synchronization.
|
||||
func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator {
|
||||
it := &BucketIterator{
|
||||
ctx: ctx,
|
||||
client: c,
|
||||
projectID: projectID,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.buckets) },
|
||||
func() interface{} { b := it.buckets; it.buckets = nil; return b })
|
||||
|
||||
return it
|
||||
o := makeStorageOpts(true, c.retry, "")
|
||||
return c.tc.ListBuckets(ctx, projectID, o...)
|
||||
}
|
||||
|
||||
// A BucketIterator is an iterator over BucketAttrs.
|
||||
@@ -2096,7 +2077,6 @@ type BucketIterator struct {
|
||||
Prefix string
|
||||
|
||||
ctx context.Context
|
||||
client *Client
|
||||
projectID string
|
||||
buckets []*BucketAttrs
|
||||
pageInfo *iterator.PageInfo
|
||||
@@ -2122,36 +2102,6 @@ func (it *BucketIterator) Next() (*BucketAttrs, error) {
|
||||
// Note: This method is not safe for concurrent operations without explicit synchronization.
|
||||
func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
// TODO: When the transport-agnostic client interface is integrated into the Veneer,
|
||||
// this method should be removed, and the iterator should be initialized by the
|
||||
// transport-specific client implementations.
|
||||
func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) {
|
||||
req := it.client.raw.Buckets.List(it.projectID)
|
||||
setClientHeader(req.Header())
|
||||
req.Projection("full")
|
||||
req.Prefix(it.Prefix)
|
||||
req.PageToken(pageToken)
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var resp *raw.Buckets
|
||||
err = run(it.ctx, func() error {
|
||||
resp, err = req.Context(it.ctx).Do()
|
||||
return err
|
||||
}, it.client.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
b, err := newBucket(item)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.buckets = append(it.buckets, b)
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
|
||||
// RPO (Recovery Point Objective) configures the turbo replication feature. See
|
||||
// https://cloud.google.com/storage/docs/managing-turbo-replication for more information.
|
||||
type RPO int
|
||||
@@ -2187,3 +2137,28 @@ func (rpo RPO) String() string {
|
||||
return rpoUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// protoDateToUTCTime returns a new Time based on the google.type.Date, in UTC.
|
||||
//
|
||||
// Hours, minutes, seconds, and nanoseconds are set to 0.
|
||||
func protoDateToUTCTime(d *dpb.Date) time.Time {
|
||||
return protoDateToTime(d, time.UTC)
|
||||
}
|
||||
|
||||
// protoDateToTime returns a new Time based on the google.type.Date and provided
|
||||
// *time.Location.
|
||||
//
|
||||
// Hours, minutes, seconds, and nanoseconds are set to 0.
|
||||
func protoDateToTime(d *dpb.Date, l *time.Location) time.Time {
|
||||
return time.Date(int(d.GetYear()), time.Month(d.GetMonth()), int(d.GetDay()), 0, 0, 0, 0, l)
|
||||
}
|
||||
|
||||
// timeToProtoDate returns a new google.type.Date based on the provided time.Time.
|
||||
// The location is ignored, as is anything more precise than the day.
|
||||
func timeToProtoDate(t time.Time) *dpb.Date {
|
||||
return &dpb.Date{
|
||||
Year: int32(t.Year()),
|
||||
Month: int32(t.Month()),
|
||||
Day: int32(t.Day()),
|
||||
}
|
||||
}
|
||||
|
||||
85
vendor/cloud.google.com/go/storage/client.go
generated
vendored
85
vendor/cloud.google.com/go/storage/client.go
generated
vendored
@@ -44,7 +44,7 @@ type storageClient interface {
|
||||
// Top-level methods.
|
||||
|
||||
GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error)
|
||||
CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error)
|
||||
CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error)
|
||||
ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator
|
||||
Close() error
|
||||
|
||||
@@ -66,19 +66,19 @@ type storageClient interface {
|
||||
|
||||
DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error
|
||||
ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error)
|
||||
UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error)
|
||||
UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
||||
|
||||
// Bucket ACL methods.
|
||||
|
||||
DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error
|
||||
ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error)
|
||||
UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error)
|
||||
UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
||||
|
||||
// Object ACL methods.
|
||||
|
||||
DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error
|
||||
ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error)
|
||||
UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error)
|
||||
UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
||||
|
||||
// Media operations.
|
||||
|
||||
@@ -96,11 +96,11 @@ type storageClient interface {
|
||||
|
||||
// HMAC Key methods.
|
||||
|
||||
GetHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error)
|
||||
ListHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) *HMACKeysIterator
|
||||
UpdateHMACKey(ctx context.Context, desc *hmacKeyDesc, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error)
|
||||
CreateHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error)
|
||||
DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error
|
||||
GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error)
|
||||
ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator
|
||||
UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error)
|
||||
CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error)
|
||||
DeleteHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) error
|
||||
|
||||
// Notification methods.
|
||||
ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error)
|
||||
@@ -162,6 +162,20 @@ func callSettings(defaults *settings, opts ...storageOption) *settings {
|
||||
return &cs
|
||||
}
|
||||
|
||||
// makeStorageOpts is a helper for generating a set of storageOption based on
|
||||
// idempotency, retryConfig, and userProject. All top-level client operations
|
||||
// will generally have to pass these options through the interface.
|
||||
func makeStorageOpts(isIdempotent bool, retry *retryConfig, userProject string) []storageOption {
|
||||
opts := []storageOption{idempotent(isIdempotent)}
|
||||
if retry != nil {
|
||||
opts = append(opts, withRetryConfig(retry))
|
||||
}
|
||||
if userProject != "" {
|
||||
opts = append(opts, withUserProject(userProject))
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// storageOption is the transport-agnostic call option for the storageClient
|
||||
// interface.
|
||||
type storageOption interface {
|
||||
@@ -267,40 +281,53 @@ type openWriterParams struct {
|
||||
}
|
||||
|
||||
type newRangeReaderParams struct {
|
||||
bucket string
|
||||
conds *Conditions
|
||||
encryptionKey []byte
|
||||
gen int64
|
||||
length int64
|
||||
object string
|
||||
offset int64
|
||||
bucket string
|
||||
conds *Conditions
|
||||
encryptionKey []byte
|
||||
gen int64
|
||||
length int64
|
||||
object string
|
||||
offset int64
|
||||
readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently.
|
||||
}
|
||||
|
||||
type composeObjectRequest struct {
|
||||
dstBucket string
|
||||
dstObject string
|
||||
srcs []string
|
||||
dstObject destinationObject
|
||||
srcs []sourceObject
|
||||
predefinedACL string
|
||||
sendCRC32C bool
|
||||
}
|
||||
|
||||
type sourceObject struct {
|
||||
name string
|
||||
bucket string
|
||||
gen int64
|
||||
conds *Conditions
|
||||
predefinedACL string
|
||||
encryptionKey []byte
|
||||
}
|
||||
|
||||
type destinationObject struct {
|
||||
name string
|
||||
bucket string
|
||||
conds *Conditions
|
||||
attrs *ObjectAttrs // attrs to set on the destination object.
|
||||
encryptionKey []byte
|
||||
keyName string
|
||||
}
|
||||
|
||||
type rewriteObjectRequest struct {
|
||||
srcBucket string
|
||||
srcObject string
|
||||
dstBucket string
|
||||
dstObject string
|
||||
dstKeyName string
|
||||
attrs *ObjectAttrs
|
||||
gen int64
|
||||
conds *Conditions
|
||||
predefinedACL string
|
||||
token string
|
||||
srcObject sourceObject
|
||||
dstObject destinationObject
|
||||
predefinedACL string
|
||||
token string
|
||||
maxBytesRewrittenPerCall int64
|
||||
}
|
||||
|
||||
type rewriteObjectResponse struct {
|
||||
resource *ObjectAttrs
|
||||
done bool
|
||||
written int64
|
||||
size int64
|
||||
token string
|
||||
}
|
||||
|
||||
153
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
153
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
@@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// CopierFrom creates a Copier that can copy src to dst.
|
||||
@@ -70,6 +69,15 @@ type Copier struct {
|
||||
DestinationKMSKeyName string
|
||||
|
||||
dst, src *ObjectHandle
|
||||
|
||||
// The maximum number of bytes that will be rewritten per rewrite request.
|
||||
// Most callers shouldn't need to specify this parameter - it is primarily
|
||||
// in place to support testing. If specified the value must be an integral
|
||||
// multiple of 1 MiB (1048576). Also, this only applies to requests where
|
||||
// the source and destination span locations and/or storage classes. Finally,
|
||||
// this value must not change across rewrite calls else you'll get an error
|
||||
// that the `rewriteToken` is invalid.
|
||||
maxBytesRewrittenPerCall int64
|
||||
}
|
||||
|
||||
// Run performs the copy.
|
||||
@@ -86,69 +94,59 @@ func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
||||
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
|
||||
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
|
||||
}
|
||||
if c.dst.gen != defaultGen {
|
||||
return nil, fmt.Errorf("storage: generation cannot be specified on copy destination, got %v", c.dst.gen)
|
||||
}
|
||||
// Convert destination attributes to raw form, omitting the bucket.
|
||||
// If the bucket is included but name or content-type aren't, the service
|
||||
// returns a 400 with "Required" as the only message. Omitting the bucket
|
||||
// does not cause any problems.
|
||||
rawObject := c.ObjectAttrs.toRawObject("")
|
||||
req := &rewriteObjectRequest{
|
||||
srcObject: sourceObject{
|
||||
name: c.src.object,
|
||||
bucket: c.src.bucket,
|
||||
gen: c.src.gen,
|
||||
conds: c.src.conds,
|
||||
encryptionKey: c.src.encryptionKey,
|
||||
},
|
||||
dstObject: destinationObject{
|
||||
name: c.dst.object,
|
||||
bucket: c.dst.bucket,
|
||||
conds: c.dst.conds,
|
||||
attrs: &c.ObjectAttrs,
|
||||
encryptionKey: c.dst.encryptionKey,
|
||||
keyName: c.DestinationKMSKeyName,
|
||||
},
|
||||
predefinedACL: c.PredefinedACL,
|
||||
token: c.RewriteToken,
|
||||
maxBytesRewrittenPerCall: c.maxBytesRewrittenPerCall,
|
||||
}
|
||||
|
||||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
||||
var userProject string
|
||||
if c.dst.userProject != "" {
|
||||
userProject = c.dst.userProject
|
||||
} else if c.src.userProject != "" {
|
||||
userProject = c.src.userProject
|
||||
}
|
||||
opts := makeStorageOpts(isIdempotent, c.dst.retry, userProject)
|
||||
|
||||
for {
|
||||
res, err := c.callRewrite(ctx, rawObject)
|
||||
res, err := c.dst.c.tc.RewriteObject(ctx, req, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.RewriteToken = res.token
|
||||
req.token = res.token
|
||||
if c.ProgressFunc != nil {
|
||||
c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
|
||||
c.ProgressFunc(uint64(res.written), uint64(res.size))
|
||||
}
|
||||
if res.Done { // Finished successfully.
|
||||
return newObject(res.Resource), nil
|
||||
if res.done { // Finished successfully.
|
||||
return res.resource, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) {
|
||||
call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj)
|
||||
|
||||
call.Context(ctx).Projection("full")
|
||||
if c.RewriteToken != "" {
|
||||
call.RewriteToken(c.RewriteToken)
|
||||
}
|
||||
if c.DestinationKMSKeyName != "" {
|
||||
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
|
||||
}
|
||||
if c.PredefinedACL != "" {
|
||||
call.DestinationPredefinedAcl(c.PredefinedACL)
|
||||
}
|
||||
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.userProject != "" {
|
||||
call.UserProject(c.dst.userProject)
|
||||
} else if c.src.userProject != "" {
|
||||
call.UserProject(c.src.userProject)
|
||||
}
|
||||
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res *raw.RewriteResponse
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
|
||||
retryCall := func() error { res, err = call.Do(); return err }
|
||||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
||||
|
||||
if err := run(ctx, retryCall, c.dst.retry, isIdempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.RewriteToken = res.RewriteToken
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ComposerFrom creates a Composer that can compose srcs into dst.
|
||||
// You can immediately call Run on the returned Composer, or you can
|
||||
// configure it first.
|
||||
@@ -188,17 +186,13 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
||||
if err := c.dst.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.gen != defaultGen {
|
||||
return nil, fmt.Errorf("storage: generation cannot be specified on compose destination, got %v", c.dst.gen)
|
||||
}
|
||||
if len(c.srcs) == 0 {
|
||||
return nil, errors.New("storage: at least one source object must be specified")
|
||||
}
|
||||
|
||||
req := &raw.ComposeRequest{}
|
||||
// Compose requires a non-empty Destination, so we always set it,
|
||||
// even if the caller-provided ObjectAttrs is the zero value.
|
||||
req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
|
||||
if c.SendCRC32C {
|
||||
req.Destination.Crc32c = encodeUint32(c.ObjectAttrs.CRC32C)
|
||||
}
|
||||
for _, src := range c.srcs {
|
||||
if err := src.validate(); err != nil {
|
||||
return nil, err
|
||||
@@ -209,36 +203,31 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
||||
if src.encryptionKey != nil {
|
||||
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
|
||||
}
|
||||
srcObj := &raw.ComposeRequestSourceObjects{
|
||||
Name: src.object,
|
||||
}
|
||||
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.SourceObjects = append(req.SourceObjects, srcObj)
|
||||
}
|
||||
|
||||
call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
|
||||
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||
return nil, err
|
||||
req := &composeObjectRequest{
|
||||
dstBucket: c.dst.bucket,
|
||||
predefinedACL: c.PredefinedACL,
|
||||
sendCRC32C: c.SendCRC32C,
|
||||
}
|
||||
if c.dst.userProject != "" {
|
||||
call.UserProject(c.dst.userProject)
|
||||
req.dstObject = destinationObject{
|
||||
name: c.dst.object,
|
||||
bucket: c.dst.bucket,
|
||||
conds: c.dst.conds,
|
||||
attrs: &c.ObjectAttrs,
|
||||
encryptionKey: c.dst.encryptionKey,
|
||||
}
|
||||
if c.PredefinedACL != "" {
|
||||
call.DestinationPredefinedAcl(c.PredefinedACL)
|
||||
for _, src := range c.srcs {
|
||||
s := sourceObject{
|
||||
name: src.object,
|
||||
bucket: src.bucket,
|
||||
gen: src.gen,
|
||||
conds: src.conds,
|
||||
}
|
||||
req.srcs = append(req.srcs, s)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
setClientHeader(call.Header())
|
||||
|
||||
retryCall := func() error { obj, err = call.Do(); return err }
|
||||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
||||
|
||||
if err := run(ctx, retryCall, c.dst.retry, isIdempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
opts := makeStorageOpts(isIdempotent, c.dst.retry, c.dst.userProject)
|
||||
return c.dst.c.tc.ComposeObject(ctx, req, opts...)
|
||||
}
|
||||
|
||||
321
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
321
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
@@ -22,185 +22,183 @@ https://cloud.google.com/storage/docs.
|
||||
See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts,
|
||||
connection pooling and similar aspects of this package.
|
||||
|
||||
# Creating a Client
|
||||
|
||||
Creating a Client
|
||||
To start working with this package, create a [Client]:
|
||||
|
||||
To start working with this package, create a client:
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
The client will use your default application credentials. Clients should be
|
||||
reused instead of created as needed. The methods of Client are safe for
|
||||
reused instead of created as needed. The methods of [Client] are safe for
|
||||
concurrent use by multiple goroutines.
|
||||
|
||||
If you only wish to access public data, you can create
|
||||
an unauthenticated client with
|
||||
|
||||
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
||||
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
||||
|
||||
To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST
|
||||
environment variable to the address at which your emulator is running. This will
|
||||
send requests to that address instead of to Cloud Storage. You can then create
|
||||
and use a client as usual:
|
||||
|
||||
// Set STORAGE_EMULATOR_HOST environment variable.
|
||||
err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Set STORAGE_EMULATOR_HOST environment variable.
|
||||
err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// Create client as usual.
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Create client as usual.
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// This request is now directed to http://localhost:9000/storage/v1/b
|
||||
// instead of https://storage.googleapis.com/storage/v1/b
|
||||
if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// This request is now directed to http://localhost:9000/storage/v1/b
|
||||
// instead of https://storage.googleapis.com/storage/v1/b
|
||||
if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Please note that there is no official emulator for Cloud Storage.
|
||||
|
||||
Buckets
|
||||
# Buckets
|
||||
|
||||
A Google Cloud Storage bucket is a collection of objects. To work with a
|
||||
bucket, make a bucket handle:
|
||||
|
||||
bkt := client.Bucket(bucketName)
|
||||
bkt := client.Bucket(bucketName)
|
||||
|
||||
A handle is a reference to a bucket. You can have a handle even if the
|
||||
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
|
||||
call Create on the handle:
|
||||
call [BucketHandle.Create]:
|
||||
|
||||
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Note that although buckets are associated with projects, bucket names are
|
||||
global across all projects.
|
||||
|
||||
Each bucket has associated metadata, represented in this package by
|
||||
BucketAttrs. The third argument to BucketHandle.Create allows you to set
|
||||
the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
|
||||
Attrs:
|
||||
[BucketAttrs]. The third argument to [BucketHandle.Create] allows you to set
|
||||
the initial [BucketAttrs] of a bucket. To retrieve a bucket's attributes, use
|
||||
[BucketHandle.Attrs]:
|
||||
|
||||
attrs, err := bkt.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
||||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
||||
attrs, err := bkt.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
||||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
||||
|
||||
Objects
|
||||
# Objects
|
||||
|
||||
An object holds arbitrary data as a sequence of bytes, like a file. You
|
||||
refer to objects using a handle, just as with buckets, but unlike buckets
|
||||
you don't explicitly create an object. Instead, the first time you write
|
||||
to an object it will be created. You can use the standard Go io.Reader
|
||||
and io.Writer interfaces to read and write object data:
|
||||
to an object it will be created. You can use the standard Go [io.Reader]
|
||||
and [io.Writer] interfaces to read and write object data:
|
||||
|
||||
obj := bkt.Object("data")
|
||||
// Write something to obj.
|
||||
// w implements io.Writer.
|
||||
w := obj.NewWriter(ctx)
|
||||
// Write some text to obj. This will either create the object or overwrite whatever is there already.
|
||||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Close, just like writing a file.
|
||||
if err := w.Close(); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
obj := bkt.Object("data")
|
||||
// Write something to obj.
|
||||
// w implements io.Writer.
|
||||
w := obj.NewWriter(ctx)
|
||||
// Write some text to obj. This will either create the object or overwrite whatever is there already.
|
||||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Close, just like writing a file.
|
||||
if err := w.Close(); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// Read it back.
|
||||
r, err := obj.NewReader(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer r.Close()
|
||||
if _, err := io.Copy(os.Stdout, r); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Prints "This object contains text."
|
||||
// Read it back.
|
||||
r, err := obj.NewReader(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer r.Close()
|
||||
if _, err := io.Copy(os.Stdout, r); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Prints "This object contains text."
|
||||
|
||||
Objects also have attributes, which you can fetch with Attrs:
|
||||
Objects also have attributes, which you can fetch with [ObjectHandle.Attrs]:
|
||||
|
||||
objAttrs, err := obj.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("object %s has size %d and can be read using %s\n",
|
||||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
||||
objAttrs, err := obj.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("object %s has size %d and can be read using %s\n",
|
||||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
||||
|
||||
Listing objects
|
||||
# Listing objects
|
||||
|
||||
Listing objects in a bucket is done with the Bucket.Objects method:
|
||||
Listing objects in a bucket is done with the [BucketHandle.Objects] method:
|
||||
|
||||
query := &storage.Query{Prefix: ""}
|
||||
query := &storage.Query{Prefix: ""}
|
||||
|
||||
var names []string
|
||||
it := bkt.Objects(ctx, query)
|
||||
for {
|
||||
attrs, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
names = append(names, attrs.Name)
|
||||
}
|
||||
var names []string
|
||||
it := bkt.Objects(ctx, query)
|
||||
for {
|
||||
attrs, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
names = append(names, attrs.Name)
|
||||
}
|
||||
|
||||
Objects are listed lexicographically by name. To filter objects
|
||||
lexicographically, Query.StartOffset and/or Query.EndOffset can be used:
|
||||
lexicographically, [Query.StartOffset] and/or [Query.EndOffset] can be used:
|
||||
|
||||
query := &storage.Query{
|
||||
Prefix: "",
|
||||
StartOffset: "bar/", // Only list objects lexicographically >= "bar/"
|
||||
EndOffset: "foo/", // Only list objects lexicographically < "foo/"
|
||||
}
|
||||
query := &storage.Query{
|
||||
Prefix: "",
|
||||
StartOffset: "bar/", // Only list objects lexicographically >= "bar/"
|
||||
EndOffset: "foo/", // Only list objects lexicographically < "foo/"
|
||||
}
|
||||
|
||||
// ... as before
|
||||
// ... as before
|
||||
|
||||
If only a subset of object attributes is needed when listing, specifying this
|
||||
subset using Query.SetAttrSelection may speed up the listing process:
|
||||
subset using [Query.SetAttrSelection] may speed up the listing process:
|
||||
|
||||
query := &storage.Query{Prefix: ""}
|
||||
query.SetAttrSelection([]string{"Name"})
|
||||
query := &storage.Query{Prefix: ""}
|
||||
query.SetAttrSelection([]string{"Name"})
|
||||
|
||||
// ... as before
|
||||
// ... as before
|
||||
|
||||
ACLs
|
||||
# ACLs
|
||||
|
||||
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
|
||||
ACLRules, each of which specifies the role of a user, group or project. ACLs
|
||||
are suitable for fine-grained control, but you may prefer using IAM to control
|
||||
access at the project level (see
|
||||
https://cloud.google.com/storage/docs/access-control/iam).
|
||||
access at the project level (see [Cloud Storage IAM docs].
|
||||
|
||||
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
|
||||
To list the ACLs of a bucket or object, obtain an [ACLHandle] and call [ACLHandle.List]:
|
||||
|
||||
acls, err := obj.ACL().List(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, rule := range acls {
|
||||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
||||
}
|
||||
acls, err := obj.ACL().List(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, rule := range acls {
|
||||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
||||
}
|
||||
|
||||
You can also set and delete ACLs.
|
||||
|
||||
Conditions
|
||||
# Conditions
|
||||
|
||||
Every object has a generation and a metageneration. The generation changes
|
||||
whenever the content changes, and the metageneration changes whenever the
|
||||
metadata changes. Conditions let you check these values before an operation;
|
||||
metadata changes. [Conditions] let you check these values before an operation;
|
||||
the operation only executes if the conditions match. You can use conditions to
|
||||
prevent race conditions in read-modify-write operations.
|
||||
|
||||
@@ -208,60 +206,81 @@ For example, say you've read an object's metadata into objAttrs. Now
|
||||
you want to write to that object, but only if its contents haven't changed
|
||||
since you read it. Here is how to express that:
|
||||
|
||||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
||||
// Proceed with writing as above.
|
||||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
||||
// Proceed with writing as above.
|
||||
|
||||
Signed URLs
|
||||
# Signed URLs
|
||||
|
||||
You can obtain a URL that lets anyone read or write an object for a limited time.
|
||||
Signing a URL requires credentials authorized to sign a URL. To use the same
|
||||
authentication that was used when instantiating the Storage client, use the
|
||||
BucketHandle.SignedURL method.
|
||||
authentication that was used when instantiating the Storage client, use
|
||||
[BucketHandle.SignedURL].
|
||||
|
||||
url, err := client.Bucket(bucketName).SignedURL(objectName, opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
url, err := client.Bucket(bucketName).SignedURL(objectName, opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
|
||||
You can also sign a URL wihout creating a client. See the documentation of
|
||||
SignedURL for details.
|
||||
You can also sign a URL without creating a client. See the documentation of
|
||||
[SignedURL] for details.
|
||||
|
||||
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
|
||||
Post Policy V4 Signed Request
|
||||
# Post Policy V4 Signed Request
|
||||
|
||||
A type of signed request that allows uploads through HTML forms directly to Cloud Storage with
|
||||
temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised
|
||||
by a user.
|
||||
|
||||
For more information, please see https://cloud.google.com/storage/docs/xml-api/post-object as well
|
||||
as the documentation of BucketHandle.GenerateSignedPostPolicyV4.
|
||||
For more information, please see the [XML POST Object docs] as well
|
||||
as the documentation of [BucketHandle.GenerateSignedPostPolicyV4].
|
||||
|
||||
pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields)
|
||||
pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields)
|
||||
|
||||
Errors
|
||||
# Credential requirements for signing
|
||||
|
||||
Errors returned by this client are often of the type googleapi.Error.
|
||||
These errors can be introspected for more information by using errors.As
|
||||
with the richer googleapi.Error type. For example:
|
||||
If the GoogleAccessID and PrivateKey option fields are not provided, they will
|
||||
be automatically detected by [BucketHandle.SignedURL] and
|
||||
[BucketHandle.GenerateSignedPostPolicyV4] if any of the following are true:
|
||||
- you are authenticated to the Storage Client with a service account's
|
||||
downloaded private key, either directly in code or by setting the
|
||||
GOOGLE_APPLICATION_CREDENTIALS environment variable (see [Other Environments]),
|
||||
- your application is running on Google Compute Engine (GCE), or
|
||||
- you are logged into [gcloud using application default credentials]
|
||||
with [impersonation enabled].
|
||||
|
||||
Detecting GoogleAccessID may not be possible if you are authenticated using a
|
||||
token source or using [option.WithHTTPClient]. In this case, you can provide a
|
||||
service account email for GoogleAccessID and the client will attempt to sign
|
||||
the URL or Post Policy using that service account.
|
||||
|
||||
To generate the signature, you must have:
|
||||
- iam.serviceAccounts.signBlob permissions on the GoogleAccessID service
|
||||
account, and
|
||||
- the [IAM Service Account Credentials API] enabled (unless authenticating
|
||||
with a downloaded private key).
|
||||
|
||||
# Errors
|
||||
|
||||
Errors returned by this client are often of the type [googleapi.Error].
|
||||
These errors can be introspected for more information by using [errors.As]
|
||||
with the richer [googleapi.Error] type. For example:
|
||||
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok {
|
||||
if e.Code == 409 { ... }
|
||||
}
|
||||
|
||||
See https://pkg.go.dev/google.golang.org/api/googleapi#Error for more information.
|
||||
|
||||
Retrying failed requests
|
||||
# Retrying failed requests
|
||||
|
||||
Methods in this package may retry calls that fail with transient errors.
|
||||
Retrying continues indefinitely unless the controlling context is canceled, the
|
||||
@@ -271,12 +290,12 @@ continuing, use context timeouts or cancellation.
|
||||
The retry strategy in this library follows best practices for Cloud Storage. By
|
||||
default, operations are retried only if they are idempotent, and exponential
|
||||
backoff with jitter is employed. In addition, errors are only retried if they
|
||||
are defined as transient by the service. See
|
||||
https://cloud.google.com/storage/docs/retry-strategy for more information.
|
||||
are defined as transient by the service. See the [Cloud Storage retry docs]
|
||||
for more information.
|
||||
|
||||
Users can configure non-default retry behavior for a single library call (using
|
||||
BucketHandle.Retryer and ObjectHandle.Retryer) or for all calls made by a
|
||||
client (using Client.SetRetry). For example:
|
||||
[BucketHandle.Retryer] and [ObjectHandle.Retryer]) or for all calls made by a
|
||||
client (using [Client.SetRetry]). For example:
|
||||
|
||||
o := client.Bucket(bucket).Object(object).Retryer(
|
||||
// Use WithBackoff to change the timing of the exponential backoff.
|
||||
@@ -297,5 +316,13 @@ client (using Client.SetRetry). For example:
|
||||
if err := o.Delete(ctx); err != nil {
|
||||
// Handle err.
|
||||
}
|
||||
|
||||
[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam
|
||||
[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object
|
||||
[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy
|
||||
[Other Environments]: https://cloud.google.com/storage/docs/authentication#libauth
|
||||
[gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login
|
||||
[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account
|
||||
[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview
|
||||
*/
|
||||
package storage // import "cloud.google.com/go/storage"
|
||||
|
||||
509
vendor/cloud.google.com/go/storage/grpc_client.go
generated
vendored
509
vendor/cloud.google.com/go/storage/grpc_client.go
generated
vendored
@@ -16,16 +16,20 @@ package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
gapic "cloud.google.com/go/storage/internal/apiv2"
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
@@ -43,10 +47,21 @@ const (
|
||||
// This is only used for the gRPC client.
|
||||
defaultConnPoolSize = 4
|
||||
|
||||
// maxPerMessageWriteSize is the maximum amount of content that can be sent
|
||||
// per WriteObjectRequest message. A buffer reaching this amount will
|
||||
// precipitate a flush of the buffer. It is only used by the gRPC Writer
|
||||
// implementation.
|
||||
maxPerMessageWriteSize int = int(storagepb.ServiceConstants_MAX_WRITE_CHUNK_BYTES)
|
||||
|
||||
// globalProjectAlias is the project ID alias used for global buckets.
|
||||
//
|
||||
// This is only used for the gRPC API.
|
||||
globalProjectAlias = "_"
|
||||
|
||||
// msgEntityNotSupported indicates ACL entites using project ID are not currently supported.
|
||||
//
|
||||
// This is only used for the gRPC API.
|
||||
msgEntityNotSupported = "The gRPC API currently does not support ACL entities using project ID, use project numbers instead"
|
||||
)
|
||||
|
||||
// defaultGRPCOptions returns a set of the default client options
|
||||
@@ -75,6 +90,9 @@ func defaultGRPCOptions() []option.ClientOption {
|
||||
option.WithGRPCDialOption(grpc.WithInsecure()),
|
||||
option.WithoutAuthentication(),
|
||||
)
|
||||
} else {
|
||||
// Only enable DirectPath when the emulator is not being targeted.
|
||||
defaults = append(defaults, internaloption.EnableDirectPath(true))
|
||||
}
|
||||
|
||||
return defaults
|
||||
@@ -127,10 +145,10 @@ func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project strin
|
||||
return resp.EmailAddress, err
|
||||
}
|
||||
|
||||
func (c *grpcStorageClient) CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
|
||||
func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
b := attrs.toProtoBucket()
|
||||
|
||||
b.Name = bucket
|
||||
// If there is lifecycle information but no location, explicitly set
|
||||
// the location. This is a GCS quirk/bug.
|
||||
if b.GetLocation() == "" && b.GetLifecycle() != nil {
|
||||
@@ -138,11 +156,13 @@ func (c *grpcStorageClient) CreateBucket(ctx context.Context, project string, at
|
||||
}
|
||||
|
||||
req := &storagepb.CreateBucketRequest{
|
||||
Parent: toProjectResource(project),
|
||||
Bucket: b,
|
||||
BucketId: b.GetName(),
|
||||
PredefinedAcl: attrs.PredefinedACL,
|
||||
PredefinedDefaultObjectAcl: attrs.PredefinedDefaultObjectACL,
|
||||
Parent: toProjectResource(project),
|
||||
Bucket: b,
|
||||
BucketId: b.GetName(),
|
||||
}
|
||||
if attrs != nil {
|
||||
req.PredefinedAcl = attrs.PredefinedACL
|
||||
req.PredefinedDefaultObjectAcl = attrs.PredefinedDefaultObjectACL
|
||||
}
|
||||
|
||||
var battrs *BucketAttrs
|
||||
@@ -227,7 +247,8 @@ func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, con
|
||||
func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := &storagepb.GetBucketRequest{
|
||||
Name: bucketResourceName(globalProjectAlias, bucket),
|
||||
Name: bucketResourceName(globalProjectAlias, bucket),
|
||||
ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}},
|
||||
}
|
||||
if err := applyBucketCondsProto("grpcStorageClient.GetBucket", conds, req); err != nil {
|
||||
return nil, err
|
||||
@@ -309,6 +330,8 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat
|
||||
// In cases where PredefinedDefaultObjectACL is set, DefaultObjectAcl is cleared.
|
||||
fieldMask.Paths = append(fieldMask.Paths, "default_object_acl")
|
||||
}
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
if uattrs.acl != nil {
|
||||
// In cases where acl is set by UpdateBucketACL method.
|
||||
fieldMask.Paths = append(fieldMask.Paths, "acl")
|
||||
@@ -323,6 +346,9 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat
|
||||
if uattrs.RPO != RPOUnknown {
|
||||
fieldMask.Paths = append(fieldMask.Paths, "rpo")
|
||||
}
|
||||
if uattrs.Autoclass != nil {
|
||||
fieldMask.Paths = append(fieldMask.Paths, "autoclass")
|
||||
}
|
||||
// TODO(cathyo): Handle labels. Pending b/230510191.
|
||||
req.UpdateMask = fieldMask
|
||||
|
||||
@@ -359,14 +385,14 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
|
||||
it.query = *q
|
||||
}
|
||||
req := &storagepb.ListObjectsRequest{
|
||||
Parent: bucketResourceName(globalProjectAlias, bucket),
|
||||
Prefix: it.query.Prefix,
|
||||
Delimiter: it.query.Delimiter,
|
||||
Versions: it.query.Versions,
|
||||
LexicographicStart: it.query.StartOffset,
|
||||
LexicographicEnd: it.query.EndOffset,
|
||||
// TODO(noahietz): Convert a projection to a FieldMask.
|
||||
// ReadMask: q.Projection,
|
||||
Parent: bucketResourceName(globalProjectAlias, bucket),
|
||||
Prefix: it.query.Prefix,
|
||||
Delimiter: it.query.Delimiter,
|
||||
Versions: it.query.Versions,
|
||||
LexicographicStart: it.query.StartOffset,
|
||||
LexicographicEnd: it.query.EndOffset,
|
||||
IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter,
|
||||
ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask
|
||||
}
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
@@ -390,6 +416,12 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
|
||||
it.items = append(it.items, b)
|
||||
}
|
||||
|
||||
// Response is always non-nil after a successful request.
|
||||
res := gitr.Response.(*storagepb.ListObjectsResponse)
|
||||
for _, prefix := range res.GetPrefixes() {
|
||||
it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
|
||||
}
|
||||
|
||||
return token, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
@@ -428,6 +460,8 @@ func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string
|
||||
req := &storagepb.GetObjectRequest{
|
||||
Bucket: bucketResourceName(globalProjectAlias, bucket),
|
||||
Object: object,
|
||||
// ProjectionFull by default.
|
||||
ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}},
|
||||
}
|
||||
if err := applyCondsProto("grpcStorageClient.GetObject", gen, conds, req); err != nil {
|
||||
return nil, err
|
||||
@@ -458,7 +492,8 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object str
|
||||
s := callSettings(c.settings, opts...)
|
||||
o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, bucket), object)
|
||||
req := &storagepb.UpdateObjectRequest{
|
||||
Object: o,
|
||||
Object: o,
|
||||
PredefinedAcl: uattrs.PredefinedACL,
|
||||
}
|
||||
if err := applyCondsProto("grpcStorageClient.UpdateObject", gen, conds, req); err != nil {
|
||||
return nil, err
|
||||
@@ -470,10 +505,7 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object str
|
||||
req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey)
|
||||
}
|
||||
|
||||
var paths []string
|
||||
fieldMask := &fieldmaskpb.FieldMask{
|
||||
Paths: paths,
|
||||
}
|
||||
fieldMask := &fieldmaskpb.FieldMask{Paths: nil}
|
||||
if uattrs.EventBasedHold != nil {
|
||||
fieldMask.Paths = append(fieldMask.Paths, "event_based_hold")
|
||||
}
|
||||
@@ -498,8 +530,11 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object str
|
||||
if !uattrs.CustomTime.IsZero() {
|
||||
fieldMask.Paths = append(fieldMask.Paths, "custom_time")
|
||||
}
|
||||
|
||||
// TODO(cathyo): Handle ACL and PredefinedACL. Pending b/233617896.
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
if uattrs.ACL != nil || len(uattrs.PredefinedACL) > 0 {
|
||||
fieldMask.Paths = append(fieldMask.Paths, "acl")
|
||||
}
|
||||
// TODO(cathyo): Handle metadata. Pending b/230510191.
|
||||
|
||||
req.UpdateMask = fieldMask
|
||||
@@ -527,11 +562,21 @@ func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket s
|
||||
return err
|
||||
}
|
||||
// Delete the entity and copy other remaining ACL entities.
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
// Return error if entity is not found or a project ID is used.
|
||||
invalidEntity := true
|
||||
var acl []ACLRule
|
||||
for _, a := range attrs.DefaultObjectACL {
|
||||
if a.Entity != entity {
|
||||
acl = append(acl, a)
|
||||
}
|
||||
if a.Entity == entity {
|
||||
invalidEntity = false
|
||||
}
|
||||
}
|
||||
if invalidEntity {
|
||||
return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.DefaultObjectACL, msgEntityNotSupported)
|
||||
}
|
||||
uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl}
|
||||
// Call UpdateBucket with a MetagenerationMatch precondition set.
|
||||
@@ -540,6 +585,7 @@ func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket s
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *grpcStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) {
|
||||
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
|
||||
if err != nil {
|
||||
@@ -547,8 +593,25 @@ func (c *grpcStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket st
|
||||
}
|
||||
return attrs.DefaultObjectACL, nil
|
||||
}
|
||||
func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error) {
|
||||
return nil, errMethodNotSupported
|
||||
|
||||
func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error {
|
||||
// There is no separate API for PATCH in gRPC.
|
||||
// Make a GET call first to retrieve BucketAttrs.
|
||||
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
var acl []ACLRule
|
||||
aclRule := ACLRule{Entity: entity, Role: role}
|
||||
acl = append(attrs.DefaultObjectACL, aclRule)
|
||||
uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl}
|
||||
// Call UpdateBucket with a MetagenerationMatch precondition set.
|
||||
if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bucket ACL methods.
|
||||
@@ -561,11 +624,21 @@ func (c *grpcStorageClient) DeleteBucketACL(ctx context.Context, bucket string,
|
||||
return err
|
||||
}
|
||||
// Delete the entity and copy other remaining ACL entities.
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
// Return error if entity is not found or a project ID is used.
|
||||
invalidEntity := true
|
||||
var acl []ACLRule
|
||||
for _, a := range attrs.ACL {
|
||||
if a.Entity != entity {
|
||||
acl = append(acl, a)
|
||||
}
|
||||
if a.Entity == entity {
|
||||
invalidEntity = false
|
||||
}
|
||||
}
|
||||
if invalidEntity {
|
||||
return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported)
|
||||
}
|
||||
uattrs := &BucketAttrsToUpdate{acl: acl}
|
||||
// Call UpdateBucket with a MetagenerationMatch precondition set.
|
||||
@@ -574,6 +647,7 @@ func (c *grpcStorageClient) DeleteBucketACL(ctx context.Context, bucket string,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *grpcStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) {
|
||||
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
|
||||
if err != nil {
|
||||
@@ -582,29 +656,58 @@ func (c *grpcStorageClient) ListBucketACLs(ctx context.Context, bucket string, o
|
||||
return attrs.ACL, nil
|
||||
}
|
||||
|
||||
func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
|
||||
func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error {
|
||||
// There is no separate API for PATCH in gRPC.
|
||||
// Make a GET call first to retrieve BucketAttrs.
|
||||
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
var acl []ACLRule
|
||||
aclRule := ACLRule{Entity: entity, Role: role}
|
||||
acl = append(attrs.ACL, aclRule)
|
||||
uattrs := &BucketAttrsToUpdate{acl: acl}
|
||||
// Call UpdateBucket with a MetagenerationMatch precondition set.
|
||||
_, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
return &aclRule, err
|
||||
return nil
|
||||
}
|
||||
|
||||
// Object ACL methods.
|
||||
|
||||
func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error {
|
||||
return errMethodNotSupported
|
||||
// There is no separate API for PATCH in gRPC.
|
||||
// Make a GET call first to retrieve ObjectAttrs.
|
||||
attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Delete the entity and copy other remaining ACL entities.
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
// Return error if entity is not found or a project ID is used.
|
||||
invalidEntity := true
|
||||
var acl []ACLRule
|
||||
for _, a := range attrs.ACL {
|
||||
if a.Entity != entity {
|
||||
acl = append(acl, a)
|
||||
}
|
||||
if a.Entity == entity {
|
||||
invalidEntity = false
|
||||
}
|
||||
}
|
||||
if invalidEntity {
|
||||
return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported)
|
||||
}
|
||||
uattrs := &ObjectAttrsToUpdate{ACL: acl}
|
||||
// Call UpdateObject with the specified metageneration.
|
||||
if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object.
|
||||
@@ -617,31 +720,141 @@ func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object s
|
||||
return o.ACL, nil
|
||||
}
|
||||
|
||||
func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
|
||||
return nil, errMethodNotSupported
|
||||
func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error {
|
||||
// There is no separate API for PATCH in gRPC.
|
||||
// Make a GET call first to retrieve ObjectAttrs.
|
||||
attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
var acl []ACLRule
|
||||
aclRule := ACLRule{Entity: entity, Role: role}
|
||||
acl = append(attrs.ACL, aclRule)
|
||||
uattrs := &ObjectAttrsToUpdate{ACL: acl}
|
||||
// Call UpdateObject with the specified metageneration.
|
||||
if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Media operations.
|
||||
|
||||
func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) {
|
||||
return nil, errMethodNotSupported
|
||||
s := callSettings(c.settings, opts...)
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
|
||||
dstObjPb := req.dstObject.attrs.toProtoObject(req.dstBucket)
|
||||
dstObjPb.Name = req.dstObject.name
|
||||
if err := applyCondsProto("ComposeObject destination", defaultGen, req.dstObject.conds, dstObjPb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if req.sendCRC32C {
|
||||
dstObjPb.Checksums.Crc32C = &req.dstObject.attrs.CRC32C
|
||||
}
|
||||
|
||||
srcs := []*storagepb.ComposeObjectRequest_SourceObject{}
|
||||
for _, src := range req.srcs {
|
||||
srcObjPb := &storagepb.ComposeObjectRequest_SourceObject{Name: src.name}
|
||||
if err := applyCondsProto("ComposeObject source", src.gen, src.conds, srcObjPb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcs = append(srcs, srcObjPb)
|
||||
}
|
||||
|
||||
rawReq := &storagepb.ComposeObjectRequest{
|
||||
Destination: dstObjPb,
|
||||
SourceObjects: srcs,
|
||||
}
|
||||
if req.predefinedACL != "" {
|
||||
rawReq.DestinationPredefinedAcl = req.predefinedACL
|
||||
}
|
||||
if req.dstObject.encryptionKey != nil {
|
||||
rawReq.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey)
|
||||
}
|
||||
|
||||
var obj *storagepb.Object
|
||||
var err error
|
||||
if err := run(ctx, func() error {
|
||||
obj, err = c.raw.ComposeObject(ctx, rawReq, s.gax...)
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newObjectFromProto(obj), nil
|
||||
}
|
||||
func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) {
|
||||
return nil, errMethodNotSupported
|
||||
s := callSettings(c.settings, opts...)
|
||||
obj := req.dstObject.attrs.toProtoObject("")
|
||||
call := &storagepb.RewriteObjectRequest{
|
||||
SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket),
|
||||
SourceObject: req.srcObject.name,
|
||||
RewriteToken: req.token,
|
||||
DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket),
|
||||
DestinationName: req.dstObject.name,
|
||||
Destination: obj,
|
||||
DestinationKmsKey: req.dstObject.keyName,
|
||||
DestinationPredefinedAcl: req.predefinedACL,
|
||||
}
|
||||
|
||||
// The userProject, whether source or destination project, is decided by the code calling the interface.
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
if err := applyCondsProto("Copy destination", defaultGen, req.dstObject.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := applySourceCondsProto(req.srcObject.gen, req.srcObject.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(req.dstObject.encryptionKey) > 0 {
|
||||
call.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey)
|
||||
}
|
||||
if len(req.srcObject.encryptionKey) > 0 {
|
||||
srcParams := toProtoCommonObjectRequestParams(req.srcObject.encryptionKey)
|
||||
call.CopySourceEncryptionAlgorithm = srcParams.GetEncryptionAlgorithm()
|
||||
call.CopySourceEncryptionKeyBytes = srcParams.GetEncryptionKeyBytes()
|
||||
call.CopySourceEncryptionKeySha256Bytes = srcParams.GetEncryptionKeySha256Bytes()
|
||||
}
|
||||
|
||||
call.MaxBytesRewrittenPerCall = req.maxBytesRewrittenPerCall
|
||||
|
||||
var res *storagepb.RewriteResponse
|
||||
var err error
|
||||
|
||||
retryCall := func() error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err }
|
||||
|
||||
if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := &rewriteObjectResponse{
|
||||
done: res.GetDone(),
|
||||
written: res.GetTotalBytesRewritten(),
|
||||
size: res.GetObjectSize(),
|
||||
token: res.GetRewriteToken(),
|
||||
resource: newObjectFromProto(res.GetResource()),
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if params.conds != nil {
|
||||
if err := params.conds.validate("grpcStorageClient.NewRangeReader"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
s := callSettings(c.settings, opts...)
|
||||
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
|
||||
// A negative length means "read to the end of the object", but the
|
||||
// read_limit field it corresponds to uses zero to mean the same thing. Thus
|
||||
// we coerce the length to 0 to read to the end of the object.
|
||||
@@ -695,6 +908,11 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
|
||||
}
|
||||
|
||||
msg, err = stream.Recv()
|
||||
// These types of errors show up on the Recv call, rather than the
|
||||
// initialization of the stream via ReadObject above.
|
||||
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
|
||||
return ErrObjectNotExist
|
||||
}
|
||||
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
|
||||
@@ -738,6 +956,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
|
||||
// Store the content from the first Recv in the
|
||||
// client buffer for reading later.
|
||||
leftovers: msg.GetChecksummedData().GetContent(),
|
||||
settings: s,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -759,6 +978,8 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
|
||||
}
|
||||
|
||||
func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
|
||||
var offset int64
|
||||
errorf := params.setError
|
||||
progress := params.progress
|
||||
@@ -766,6 +987,10 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
gw := newGRPCWriter(c, params, pr)
|
||||
gw.settings = s
|
||||
if s.userProject != "" {
|
||||
gw.ctx = setUserProjectMetadata(gw.ctx, s.userProject)
|
||||
}
|
||||
|
||||
// This function reads the data sent to the pipe and sends sets of messages
|
||||
// on the gRPC client-stream as the buffer is filled.
|
||||
@@ -883,20 +1108,142 @@ func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource str
|
||||
|
||||
// HMAC Key methods.
|
||||
|
||||
func (c *grpcStorageClient) GetHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) {
|
||||
return nil, errMethodNotSupported
|
||||
func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := &storagepb.GetHmacKeyRequest{
|
||||
AccessId: accessID,
|
||||
Project: toProjectResource(project),
|
||||
}
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
var metadata *storagepb.HmacKeyMetadata
|
||||
err := run(ctx, func() error {
|
||||
var err error
|
||||
metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...)
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toHMACKeyFromProto(metadata), nil
|
||||
}
|
||||
func (c *grpcStorageClient) ListHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) *HMACKeysIterator {
|
||||
return &HMACKeysIterator{}
|
||||
|
||||
func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator {
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := &storagepb.ListHmacKeysRequest{
|
||||
Project: toProjectResource(project),
|
||||
ServiceAccountEmail: serviceAccountEmail,
|
||||
ShowDeletedKeys: showDeletedKeys,
|
||||
}
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
it := &HMACKeysIterator{
|
||||
ctx: ctx,
|
||||
projectID: project,
|
||||
retry: s.retry,
|
||||
}
|
||||
gitr := c.raw.ListHmacKeys(it.ctx, req, s.gax...)
|
||||
fetch := func(pageSize int, pageToken string) (token string, err error) {
|
||||
var hmacKeys []*storagepb.HmacKeyMetadata
|
||||
err = run(it.ctx, func() error {
|
||||
hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken)
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, hkmd := range hmacKeys {
|
||||
hk := toHMACKeyFromProto(hkmd)
|
||||
it.hmacKeys = append(it.hmacKeys, hk)
|
||||
}
|
||||
|
||||
return token, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
fetch,
|
||||
func() int { return len(it.hmacKeys) - it.index },
|
||||
func() interface{} {
|
||||
prev := it.hmacKeys
|
||||
it.hmacKeys = it.hmacKeys[:0]
|
||||
it.index = 0
|
||||
return prev
|
||||
})
|
||||
return it
|
||||
}
|
||||
func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, desc *hmacKeyDesc, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
|
||||
return nil, errMethodNotSupported
|
||||
|
||||
func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
hk := &storagepb.HmacKeyMetadata{
|
||||
AccessId: accessID,
|
||||
Project: toProjectResource(project),
|
||||
ServiceAccountEmail: serviceAccountEmail,
|
||||
State: string(attrs.State),
|
||||
Etag: attrs.Etag,
|
||||
}
|
||||
var paths []string
|
||||
fieldMask := &fieldmaskpb.FieldMask{
|
||||
Paths: paths,
|
||||
}
|
||||
if attrs.State != "" {
|
||||
fieldMask.Paths = append(fieldMask.Paths, "state")
|
||||
}
|
||||
req := &storagepb.UpdateHmacKeyRequest{
|
||||
HmacKey: hk,
|
||||
UpdateMask: fieldMask,
|
||||
}
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
var metadata *storagepb.HmacKeyMetadata
|
||||
err := run(ctx, func() error {
|
||||
var err error
|
||||
metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...)
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toHMACKeyFromProto(metadata), nil
|
||||
}
|
||||
func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) {
|
||||
return nil, errMethodNotSupported
|
||||
|
||||
func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := &storagepb.CreateHmacKeyRequest{
|
||||
Project: toProjectResource(project),
|
||||
ServiceAccountEmail: serviceAccountEmail,
|
||||
}
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
var res *storagepb.CreateHmacKeyResponse
|
||||
err := run(ctx, func() error {
|
||||
var err error
|
||||
res, err = c.raw.CreateHmacKey(ctx, req, s.gax...)
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := toHMACKeyFromProto(res.Metadata)
|
||||
key.Secret = base64.StdEncoding.EncodeToString(res.SecretKeyBytes)
|
||||
|
||||
return key, nil
|
||||
}
|
||||
func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error {
|
||||
return errMethodNotSupported
|
||||
|
||||
func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error {
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := &storagepb.DeleteHmacKeyRequest{
|
||||
AccessId: accessID,
|
||||
Project: toProjectResource(project),
|
||||
}
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
return run(ctx, func() error {
|
||||
return c.raw.DeleteHmacKey(ctx, req, s.gax...)
|
||||
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
|
||||
}
|
||||
|
||||
// Notification methods.
|
||||
@@ -988,6 +1335,7 @@ type gRPCReader struct {
|
||||
reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error)
|
||||
leftovers []byte
|
||||
cancel context.CancelFunc
|
||||
settings *settings
|
||||
}
|
||||
|
||||
// Read reads bytes into the user's buffer from an open gRPC stream.
|
||||
@@ -1063,6 +1411,10 @@ func (r *gRPCReader) Close() error {
|
||||
// an attempt to reopen the stream.
|
||||
func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) {
|
||||
msg, err := r.stream.Recv()
|
||||
var shouldRetry = ShouldRetry
|
||||
if r.settings.retry != nil && r.settings.retry.shouldRetry != nil {
|
||||
shouldRetry = r.settings.retry.shouldRetry
|
||||
}
|
||||
if err != nil && shouldRetry(err) {
|
||||
// This will "close" the existing stream and immediately attempt to
|
||||
// reopen the stream, but will backoff if further attempts are necessary.
|
||||
@@ -1127,6 +1479,7 @@ type gRPCWriter struct {
|
||||
attrs *ObjectAttrs
|
||||
conds *Conditions
|
||||
encryptionKey []byte
|
||||
settings *settings
|
||||
|
||||
sendCRC32C bool
|
||||
|
||||
@@ -1144,21 +1497,27 @@ func (w *gRPCWriter) startResumableUpload() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
upres, err := w.c.raw.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{
|
||||
WriteObjectSpec: spec,
|
||||
})
|
||||
|
||||
w.upid = upres.GetUploadId()
|
||||
return err
|
||||
return run(w.ctx, func() error {
|
||||
upres, err := w.c.raw.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{
|
||||
WriteObjectSpec: spec,
|
||||
})
|
||||
w.upid = upres.GetUploadId()
|
||||
return err
|
||||
}, w.settings.retry, w.settings.idempotent, setRetryHeaderGRPC(w.ctx))
|
||||
}
|
||||
|
||||
// queryProgress is a helper that queries the status of the resumable upload
|
||||
// associated with the given upload ID.
|
||||
func (w *gRPCWriter) queryProgress() (int64, error) {
|
||||
q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{UploadId: w.upid})
|
||||
var persistedSize int64
|
||||
err := run(w.ctx, func() error {
|
||||
q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{UploadId: w.upid})
|
||||
persistedSize = q.GetPersistedSize()
|
||||
return err
|
||||
}, w.settings.retry, true, setRetryHeaderGRPC(w.ctx))
|
||||
|
||||
// q.GetCommittedSize() will return 0 if q is nil.
|
||||
return q.GetPersistedSize(), err
|
||||
return persistedSize, err
|
||||
}
|
||||
|
||||
// uploadBuffer opens a Write stream and uploads the buffer at the given offset (if
|
||||
@@ -1173,6 +1532,10 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
|
||||
var err error
|
||||
var finishWrite bool
|
||||
var sent, limit int = 0, maxPerMessageWriteSize
|
||||
var shouldRetry = ShouldRetry
|
||||
if w.settings.retry != nil && w.settings.retry.shouldRetry != nil {
|
||||
shouldRetry = w.settings.retry.shouldRetry
|
||||
}
|
||||
offset := start
|
||||
toWrite := w.buf[:recvd]
|
||||
for {
|
||||
@@ -1203,7 +1566,8 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
|
||||
// The first message on the WriteObject stream must either be the
|
||||
// Object or the Resumable Upload ID.
|
||||
if first {
|
||||
w.stream, err = w.c.raw.WriteObject(w.ctx)
|
||||
ctx := gapic.InsertMetadata(w.ctx, metadata.Pairs("x-goog-request-params", "bucket="+url.QueryEscape(w.bucket)))
|
||||
w.stream, err = w.c.raw.WriteObject(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, false, err
|
||||
}
|
||||
@@ -1225,8 +1589,16 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
|
||||
// on the *last* message of the stream (instead of the first).
|
||||
if w.sendCRC32C {
|
||||
req.ObjectChecksums = &storagepb.ObjectChecksums{
|
||||
Crc32C: proto.Uint32(w.attrs.CRC32C),
|
||||
Md5Hash: w.attrs.MD5,
|
||||
Crc32C: proto.Uint32(w.attrs.CRC32C),
|
||||
}
|
||||
}
|
||||
if len(w.attrs.MD5) != 0 {
|
||||
if cs := req.GetObjectChecksums(); cs == nil {
|
||||
req.ObjectChecksums = &storagepb.ObjectChecksums{
|
||||
Md5Hash: w.attrs.MD5,
|
||||
}
|
||||
} else {
|
||||
cs.Md5Hash = w.attrs.MD5
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1336,8 +1708,8 @@ func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) {
|
||||
spec := &storagepb.WriteObjectSpec{
|
||||
Resource: attrs.toProtoObject(w.bucket),
|
||||
}
|
||||
// WriteObject doesn't support the generation condition, so use -1.
|
||||
if err := applyCondsProto("WriteObject", -1, w.conds, spec); err != nil {
|
||||
// WriteObject doesn't support the generation condition, so use default.
|
||||
if err := applyCondsProto("WriteObject", defaultGen, w.conds, spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return spec, nil
|
||||
@@ -1345,7 +1717,12 @@ func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) {
|
||||
|
||||
// read copies the data in the reader to the given buffer and reports how much
|
||||
// data was read into the buffer and if there is no more data to read (EOF).
|
||||
// Furthermore, if the attrs.ContentType is unset, the first bytes of content
|
||||
// will be sniffed for a matching content type.
|
||||
func (w *gRPCWriter) read() (int, bool, error) {
|
||||
if w.attrs.ContentType == "" {
|
||||
w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader)
|
||||
}
|
||||
// Set n to -1 to start the Read loop.
|
||||
var n, recvd int = -1, 0
|
||||
var err error
|
||||
|
||||
161
vendor/cloud.google.com/go/storage/hmac.go
generated
vendored
161
vendor/cloud.google.com/go/storage/hmac.go
generated
vendored
@@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
"google.golang.org/api/iterator"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
@@ -90,7 +91,7 @@ type HMACKeyHandle struct {
|
||||
projectID string
|
||||
accessID string
|
||||
retry *retryConfig
|
||||
raw *raw.ProjectsHmacKeysService
|
||||
tc storageClient
|
||||
}
|
||||
|
||||
// HMACKeyHandle creates a handle that will be used for HMACKey operations.
|
||||
@@ -101,7 +102,7 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
|
||||
projectID: projectID,
|
||||
accessID: accessID,
|
||||
retry: c.retry,
|
||||
raw: raw.NewProjectsHmacKeysService(c.raw),
|
||||
tc: c.tc,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,32 +114,15 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
|
||||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) {
|
||||
call := hkh.raw.Get(hkh.projectID, hkh.accessID)
|
||||
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
if desc.userProjectID != "" {
|
||||
call = call.UserProject(desc.userProjectID)
|
||||
}
|
||||
|
||||
setClientHeader(call.Header())
|
||||
o := makeStorageOpts(true, hkh.retry, desc.userProjectID)
|
||||
hk, err := hkh.tc.GetHMACKey(ctx, hkh.projectID, hkh.accessID, o...)
|
||||
|
||||
var metadata *raw.HmacKeyMetadata
|
||||
var err error
|
||||
err = run(ctx, func() error {
|
||||
metadata, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, hkh.retry, true, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hkPb := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
return pbHmacKeyToHMACKey(hkPb, false)
|
||||
return hk, err
|
||||
}
|
||||
|
||||
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
|
||||
@@ -147,49 +131,59 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC
|
||||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error {
|
||||
delCall := hkh.raw.Delete(hkh.projectID, hkh.accessID)
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
if desc.userProjectID != "" {
|
||||
delCall = delCall.UserProject(desc.userProjectID)
|
||||
}
|
||||
setClientHeader(delCall.Header())
|
||||
|
||||
return run(ctx, func() error {
|
||||
return delCall.Context(ctx).Do()
|
||||
}, hkh.retry, true, setRetryHeaderHTTP(delCall))
|
||||
o := makeStorageOpts(true, hkh.retry, desc.userProjectID)
|
||||
return hkh.tc.DeleteHMACKey(ctx, hkh.projectID, hkh.accessID, o...)
|
||||
}
|
||||
|
||||
func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
|
||||
pbmd := pb.Metadata
|
||||
if pbmd == nil {
|
||||
func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
|
||||
hkmd := hk.Metadata
|
||||
if hkmd == nil {
|
||||
return nil, errors.New("field Metadata cannot be nil")
|
||||
}
|
||||
createdTime, err := time.Parse(time.RFC3339, pbmd.TimeCreated)
|
||||
createdTime, err := time.Parse(time.RFC3339, hkmd.TimeCreated)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("field CreatedTime: %v", err)
|
||||
return nil, fmt.Errorf("field CreatedTime: %w", err)
|
||||
}
|
||||
updatedTime, err := time.Parse(time.RFC3339, pbmd.Updated)
|
||||
updatedTime, err := time.Parse(time.RFC3339, hkmd.Updated)
|
||||
if err != nil && !updatedTimeCanBeNil {
|
||||
return nil, fmt.Errorf("field UpdatedTime: %v", err)
|
||||
return nil, fmt.Errorf("field UpdatedTime: %w", err)
|
||||
}
|
||||
|
||||
hmk := &HMACKey{
|
||||
AccessID: pbmd.AccessId,
|
||||
Secret: pb.Secret,
|
||||
Etag: pbmd.Etag,
|
||||
ID: pbmd.Id,
|
||||
State: HMACState(pbmd.State),
|
||||
ProjectID: pbmd.ProjectId,
|
||||
hmKey := &HMACKey{
|
||||
AccessID: hkmd.AccessId,
|
||||
Secret: hk.Secret,
|
||||
Etag: hkmd.Etag,
|
||||
ID: hkmd.Id,
|
||||
State: HMACState(hkmd.State),
|
||||
ProjectID: hkmd.ProjectId,
|
||||
CreatedTime: createdTime,
|
||||
UpdatedTime: updatedTime,
|
||||
|
||||
ServiceAccountEmail: pbmd.ServiceAccountEmail,
|
||||
ServiceAccountEmail: hkmd.ServiceAccountEmail,
|
||||
}
|
||||
|
||||
return hmk, nil
|
||||
return hmKey, nil
|
||||
}
|
||||
|
||||
func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey {
|
||||
if pbmd == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &HMACKey{
|
||||
AccessID: pbmd.GetAccessId(),
|
||||
ID: pbmd.GetId(),
|
||||
State: HMACState(pbmd.GetState()),
|
||||
ProjectID: pbmd.GetProject(),
|
||||
CreatedTime: convertProtoTime(pbmd.GetCreateTime()),
|
||||
UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()),
|
||||
ServiceAccountEmail: pbmd.GetServiceAccountEmail(),
|
||||
}
|
||||
}
|
||||
|
||||
// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey.
|
||||
@@ -203,29 +197,14 @@ func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEma
|
||||
return nil, errors.New("storage: expecting a non-blank service account email")
|
||||
}
|
||||
|
||||
svc := raw.NewProjectsHmacKeysService(c.raw)
|
||||
call := svc.Create(projectID, serviceAccountEmail)
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
if desc.userProjectID != "" {
|
||||
call = call.UserProject(desc.userProjectID)
|
||||
}
|
||||
|
||||
setClientHeader(call.Header())
|
||||
|
||||
var hkPb *raw.HmacKey
|
||||
|
||||
if err := run(ctx, func() error {
|
||||
h, err := call.Context(ctx).Do()
|
||||
hkPb = h
|
||||
return err
|
||||
}, c.retry, false, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pbHmacKeyToHMACKey(hkPb, true)
|
||||
o := makeStorageOpts(false, c.retry, desc.userProjectID)
|
||||
hk, err := c.tc.CreateHMACKey(ctx, projectID, serviceAccountEmail, o...)
|
||||
return hk, err
|
||||
}
|
||||
|
||||
// HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated.
|
||||
@@ -247,35 +226,15 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opt
|
||||
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive)
|
||||
}
|
||||
|
||||
call := h.raw.Update(h.projectID, h.accessID, &raw.HmacKeyMetadata{
|
||||
Etag: au.Etag,
|
||||
State: string(au.State),
|
||||
})
|
||||
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
if desc.userProjectID != "" {
|
||||
call = call.UserProject(desc.userProjectID)
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
|
||||
var metadata *raw.HmacKeyMetadata
|
||||
var err error
|
||||
isIdempotent := len(au.Etag) > 0
|
||||
err = run(ctx, func() error {
|
||||
metadata, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, h.retry, isIdempotent, setRetryHeaderHTTP(call))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hkPb := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
return pbHmacKeyToHMACKey(hkPb, false)
|
||||
o := makeStorageOpts(isIdempotent, h.retry, desc.userProjectID)
|
||||
hk, err := h.tc.UpdateHMACKey(ctx, h.projectID, desc.forServiceAccountEmail, h.accessID, &au, o...)
|
||||
return hk, err
|
||||
}
|
||||
|
||||
// An HMACKeysIterator is an iterator over HMACKeys.
|
||||
@@ -301,27 +260,13 @@ type HMACKeysIterator struct {
|
||||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator {
|
||||
it := &HMACKeysIterator{
|
||||
ctx: ctx,
|
||||
raw: raw.NewProjectsHmacKeysService(c.raw),
|
||||
projectID: projectID,
|
||||
retry: c.retry,
|
||||
}
|
||||
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(&it.desc)
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.hmacKeys) - it.index },
|
||||
func() interface{} {
|
||||
prev := it.hmacKeys
|
||||
it.hmacKeys = it.hmacKeys[:0]
|
||||
it.index = 0
|
||||
return prev
|
||||
})
|
||||
return it
|
||||
o := makeStorageOpts(true, c.retry, desc.userProjectID)
|
||||
return c.tc.ListHMACKeys(ctx, projectID, desc.forServiceAccountEmail, desc.showDeletedKeys, o...)
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if
|
||||
@@ -350,6 +295,8 @@ func (it *HMACKeysIterator) Next() (*HMACKey, error) {
|
||||
func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) {
|
||||
// TODO: Remove fetch method upon integration. This method is internalized into
|
||||
// httpStorageClient.ListHMACKeys() as it is the only caller.
|
||||
call := it.raw.List(it.projectID)
|
||||
setClientHeader(call.Header())
|
||||
if pageToken != "" {
|
||||
@@ -379,10 +326,10 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string,
|
||||
}
|
||||
|
||||
for _, metadata := range resp.Items {
|
||||
hkPb := &raw.HmacKey{
|
||||
hk := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
hkey, err := pbHmacKeyToHMACKey(hkPb, true)
|
||||
hkey, err := toHMACKeyFromRaw(hk, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user