mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 00:26:36 +03:00
vendor: run make vendor-update
This commit is contained in:
63
go.mod
63
go.mod
@@ -9,7 +9,7 @@ replace cloud.google.com/go/storage => cloud.google.com/go/storage v1.43.0
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.51.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4
|
||||
@@ -17,8 +17,8 @@ require (
|
||||
github.com/VictoriaMetrics/metrics v1.35.2
|
||||
github.com/VictoriaMetrics/metricsql v0.84.3
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.9
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.66
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.11
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.68
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2
|
||||
github.com/bmatcuk/doublestar/v4 v4.8.1
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
@@ -42,23 +42,23 @@ require (
|
||||
golang.org/x/net v0.37.0
|
||||
golang.org/x/oauth2 v0.28.0
|
||||
golang.org/x/sys v0.31.0
|
||||
google.golang.org/api v0.225.0
|
||||
google.golang.org/api v0.228.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.119.0 // indirect
|
||||
cloud.google.com/go v0.120.0 // indirect
|
||||
cloud.google.com/go/auth v0.15.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.6.0 // indirect
|
||||
cloud.google.com/go/iam v1.4.1 // indirect
|
||||
cloud.google.com/go/iam v1.4.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.62 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.64 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
|
||||
@@ -68,8 +68,8 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect
|
||||
github.com/aws/smithy-go v1.22.3 // indirect
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect
|
||||
@@ -82,11 +82,12 @@ require (
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.5 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
@@ -103,30 +104,32 @@ require (
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.121.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.121.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.121.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.122.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.122.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.122.0 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.21.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/prometheus/procfs v0.16.0 // indirect
|
||||
github.com/prometheus/sigv4 v0.1.2 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stretchr/testify v1.10.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/collector/component v1.27.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap v1.27.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.121.0 // indirect
|
||||
go.opentelemetry.io/collector/consumer v1.27.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata v1.27.0 // indirect
|
||||
go.opentelemetry.io/collector/pipeline v0.121.0 // indirect
|
||||
go.opentelemetry.io/collector/processor v0.121.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.121.0 // indirect
|
||||
go.opentelemetry.io/collector/component v1.28.1 // indirect
|
||||
go.opentelemetry.io/collector/confmap v1.28.1 // indirect
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.122.1 // indirect
|
||||
go.opentelemetry.io/collector/consumer v1.28.1 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.28.1 // indirect
|
||||
go.opentelemetry.io/collector/pdata v1.28.1 // indirect
|
||||
go.opentelemetry.io/collector/pipeline v0.122.1 // indirect
|
||||
go.opentelemetry.io/collector/processor v0.122.1 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.122.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
|
||||
@@ -142,14 +145,14 @@ require (
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250311190419-81fb87f6b8bf // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250311190419-81fb87f6b8bf // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250311190419-81fb87f6b8bf // indirect
|
||||
google.golang.org/genproto v0.0.0-20250324211829-b45e905df463 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect
|
||||
google.golang.org/grpc v1.71.0 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apimachinery v0.32.3 // indirect
|
||||
k8s.io/client-go v0.32.3 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e // indirect
|
||||
)
|
||||
|
||||
166
go.sum
166
go.sum
@@ -1,19 +1,19 @@
|
||||
cloud.google.com/go v0.119.0 h1:tw7OjErMzJKbbjaEHkrt60KQrK5Wus/boCZ7tm5/RNE=
|
||||
cloud.google.com/go v0.119.0/go.mod h1:fwB8QLzTcNevxqi8dcpR+hoMIs3jBherGS9VUBDAW08=
|
||||
cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA=
|
||||
cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q=
|
||||
cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps=
|
||||
cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
|
||||
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
|
||||
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
|
||||
cloud.google.com/go/iam v1.4.1 h1:cFC25Nv+u5BkTR/BT1tXdoF2daiVbZ1RLx2eqfQ9RMM=
|
||||
cloud.google.com/go/iam v1.4.1/go.mod h1:2vUEJpUG3Q9p2UdsyksaKpDzlwOrnMzS30isdReIcLM=
|
||||
cloud.google.com/go/longrunning v0.6.4 h1:3tyw9rO3E2XVXzSApn1gyEEnH2K9SynNQjMlBi3uHLg=
|
||||
cloud.google.com/go/longrunning v0.6.4/go.mod h1:ttZpLCe6e7EXvn9OxpBRx7kZEB0efv8yBO6YnVMfhJs=
|
||||
cloud.google.com/go/iam v1.4.2 h1:4AckGYAYsowXeHzsn/LCKWIwSWLkdb0eGjH8wWkd27Q=
|
||||
cloud.google.com/go/iam v1.4.2/go.mod h1:REGlrt8vSlh4dfCJfSEcNjLGq75wW75c5aU3FLOYq34=
|
||||
cloud.google.com/go/longrunning v0.6.5 h1:sD+t8DO8j4HKW4QfouCklg7ZC1qC4uzVZt8iz3uTW+Q=
|
||||
cloud.google.com/go/longrunning v0.6.5/go.mod h1:Et04XK+0TTLKa5IPYryKf5DkpwImy6TluQ1QTLwlKmY=
|
||||
cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=
|
||||
cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 h1:DSDNVxqkoXJiko6x8a90zidoYqnYYa6c1MTzDKzKkTo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1/go.mod h1:zGqV2R4Cr/k8Uye5w+dgQ06WJtEcbQG/8J7BB6hnCr4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
|
||||
@@ -30,8 +30,8 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kg
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.1 h1:8BKxhZZLX/WosEeoCvWysmKUscfa9v8LIPEEU0JjE2o=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
@@ -59,14 +59,14 @@ github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38y
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.9 h1:Kg+fAYNaJeGXp1vmjtidss8O2uXIsXwaRqsQJKXVr+0=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.9/go.mod h1:oU3jj2O53kgOU4TXq/yipt6ryiooYjlkqqVaZk7gY/U=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.62 h1:fvtQY3zFzYJ9CfixuAQ96IxDrBajbBWGqjNTCa79ocU=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.62/go.mod h1:ElETBxIQqcxej++Cs8GyPBbgMys5DgQPTwo7cUPDKt8=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.11 h1:/hkJIxaQzFQy0ebFjG5NHmAcLCrvNSuXeHnxLfeCz1Y=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.11/go.mod h1:OFPRZVQxC4mKqy2Go6Cse/m9NOStAo6YaMvAcTMUROg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.64 h1:NH4RAQJEXBDQDUudTqMNHdyyEVa5CvMn0tQicqv48jo=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.64/go.mod h1:tUoJfj79lzEcalHDbyNkpnZZTRg/2ayYOK/iYnRfPbo=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.66 h1:MTLivtC3s89de7Fe3P8rzML/8XPNRfuyJhlRTsCEt0k=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.66/go.mod h1:NAuQ2s6gaFEsuTIb2+P5t6amB1w5MhvJFxppoezGWH0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.68 h1:2hZuCv5lB+N2gESbJgp16JRvsD1HX95kLx7CntOJKY4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.68/go.mod h1:90G5L53I4a/ugFl89l5vU9rMHnc7axbvhak5yz2wpTQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=
|
||||
@@ -85,10 +85,10 @@ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91Liq
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 h1:jIiopHEV22b4yQP2q36Y0OmwLbsxNWdWwfZRR5QRRO4=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 h1:8JdC7Gr9NROg1Rusk25IcZeTO59zLxsKgE0gkh5O6h0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.1/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 h1:KwuLovgQPcdjNMfFt9OhUd9a2OwcOKhxfvF4glTzLuA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.2 h1:pdgODsAhGo4dvzC3JAG5Ce0PX8kWXrTZGx+jxADD+5E=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.2/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.2 h1:wK8O+j2dOolmpNVY1EWIbLgxrGCHJKVPm08Hv/u80M8=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.2/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 h1:PZV5W8yk4OtH1JAuhV2PXwwO9v5G5Aoj+eMCn4T+1Kc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
|
||||
github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k=
|
||||
@@ -166,8 +166,8 @@ github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I
|
||||
github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
@@ -188,8 +188,8 @@ github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
|
||||
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.5 h1:VgzTY2jogw3xt39CusEnFJWm7rlsq5yL5q9XdLOuP5g=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.5/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
|
||||
github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
|
||||
github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
|
||||
github.com/gophercloud/gophercloud/v2 v2.4.0 h1:XhP5tVEH3ni66NSNK1+0iSO6kaGPH/6srtx6Cr+8eCg=
|
||||
@@ -216,6 +216,8 @@ github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISH
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
||||
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977VrmffaCX/OBm17dEVJUcWn5dW+eqs3aIJ/A=
|
||||
@@ -291,14 +293,14 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.121.0 h1:I+F6xdXQsiXXdce7yjHN+y4LX5MrZI1kNmhBunJffdA=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.121.0/go.mod h1:cRh3l2emFBwW96dHnlPLr1psbEYjYJmn5qFujOkbfRo=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.121.0 h1:efEcUMbyFWBx56TQDz2IMsuI0kQ5g8Im0DjQc9w9HBU=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.121.0/go.mod h1:9ghLP9djsDo5xzmzkADqeJjZb3l92XIRhpAz/ToX2QM=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.121.0 h1:D7mQQKd4rncv3PSsbDGayNENqmVwN1dFvPo3wHFzhI4=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.121.0/go.mod h1:swPiDfFHEiy9x2TwNO3uexCkwppLWfPRVoJdpJvKIQE=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.121.0 h1:+wj+Sw08WDdL/9lD4OUy1PFgQMsiyLuSmlmb3HbKPv4=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.121.0/go.mod h1:YczZl2MmjOUdg5eXg+fAW0my/EG+77b27ue6vj7xPHU=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.122.0 h1:zHlrYHCN/uGsdfWnAqFb6iksIQv1Aq9lsSTMe/kDsZ0=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.122.0/go.mod h1:lG9v3A48Y/jox3y8TdhCuakVTZfslTs+u2lkdhc6LIk=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.122.0 h1:P6lq+OWqsSdO+o+uTrqu/lko96/MnS+Zc4SqMo3bdvs=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.122.0/go.mod h1:45Di232vetvGjROIPxlBlyBMBAgA95szYP8du09shDE=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.122.0 h1:Jsn9I74nG85Iw7wWET6g0eQ9tbwVndgNHbzHqdlZVqI=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.122.0/go.mod h1:BpcyQo7MedcfxlBmIgRB5DxdLlEa0wHRJ/Nhe8jjnW4=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.122.0 h1:VoVWWmasrx6boiis/OV+HmkEXtVm73LXeZMYHJwEgwE=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.122.0/go.mod h1:DEk8LYKrIZS01fhJXohi4tRR89iEcF3zt0oHDTB2TT0=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||
@@ -320,12 +322,14 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM=
|
||||
github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg=
|
||||
github.com/prometheus/prometheus v0.302.1 h1:xqVdrwrB4WNpdgJqxsz5loqFWNUZitsK8myqLuSZ6Ag=
|
||||
github.com/prometheus/prometheus v0.302.1/go.mod h1:YcyCoTbUR/TM8rY3Aoeqr0AWTu/pu1Ehh+trpX3eRzg=
|
||||
github.com/prometheus/sigv4 v0.1.2 h1:R7570f8AoM5YnTUPFm3mjZH5q2k4D+I/phCWvZ4PXG8=
|
||||
github.com/prometheus/sigv4 v0.1.2/go.mod h1:GF9fwrvLgkQwDdQ5BXeV9XUSCH/IPNqzvAoaohfjqMU=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
|
||||
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@@ -377,38 +381,40 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/collector/component v1.27.0 h1:6wk0K23YT9lSprX8BH9x5w8ssAORE109ekH/ix2S614=
|
||||
go.opentelemetry.io/collector/component v1.27.0/go.mod h1:fIyBHoa7vDyZL3Pcidgy45cx24tBe7iHWne097blGgo=
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.121.0 h1:G4KqBUuAqnQ1kB3fUxXPwspjwnhGZzdArlO7vc343og=
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.121.0/go.mod h1:ufRv8q15XNdbr9nNzdepMHlLl2aC3NHQgecCzp5VRns=
|
||||
go.opentelemetry.io/collector/component/componenttest v0.121.0 h1:4q1/7WnP9LPKaY4HAd8/OkzhllZpRACKAOlWsqbrzqc=
|
||||
go.opentelemetry.io/collector/component/componenttest v0.121.0/go.mod h1:H7bEXDPMYNeWcHal0xyKlVfRPByVxale7hCJ+Myjq3Q=
|
||||
go.opentelemetry.io/collector/confmap v1.27.0 h1:OIjPcjij1NxkVQsQVmHro4+t1eYNFiUGib9+J9YBZhM=
|
||||
go.opentelemetry.io/collector/confmap v1.27.0/go.mod h1:tmOa6iw3FJsEgfBHKALqvcdfRtf71JZGor0wSM5MoH8=
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.121.0 h1:pZ7SOl/i3kUIPdUwIeHHsYqzOHNLCwiyXZnwQ7rLO3E=
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.121.0/go.mod h1:YI1Sp8mbYro/H3rqH4csTq68VUuie5WVb7LI1o5+tVc=
|
||||
go.opentelemetry.io/collector/consumer v1.27.0 h1:JoXdoCeFDJG3d9TYrKHvTT4eBhzKXDVTkWW5mDfnLiY=
|
||||
go.opentelemetry.io/collector/consumer v1.27.0/go.mod h1:1B/+kTDUI6u3mCIOAkm5ityIpv5uC0Ll78IA50SNZ24=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.121.0 h1:EIJPAXQY0w9j1k/e5OzJqOYVEr6WljKpJBjgkkp/hWw=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.121.0/go.mod h1:Hmj+TizzsLU0EmS2n/rJYScOybNmm3mrAjis6ed7qTw=
|
||||
go.opentelemetry.io/collector/consumer/xconsumer v0.121.0 h1:/FJ7L6+G++FvktXc/aBnnYDIKLoYsWLh0pKbvzFFwF8=
|
||||
go.opentelemetry.io/collector/consumer/xconsumer v0.121.0/go.mod h1:KKy8Qg/vOnyseoi7A9/x1a1oEqSmf0WBHkJFlnQH0Ow=
|
||||
go.opentelemetry.io/collector/pdata v1.27.0 h1:66yI7FYkUDia74h48Fd2/KG2Vk8DxZnGw54wRXykCEU=
|
||||
go.opentelemetry.io/collector/pdata v1.27.0/go.mod h1:18e8/xDZsqyj00h/5HM5GLdJgBzzG9Ei8g9SpNoiMtI=
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.121.0 h1:DFBelDRsZYxEaSoxSRtseAazsHJfqfC/Yl64uPicl2g=
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.121.0/go.mod h1:j/fjrd7ybJp/PXkba92QLzx7hykUVmU8x/WJvI2JWSg=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.121.0 h1:FFz+rdb7o6JRZ82Zmp6WKEdKnEMaoF3jLb7F1F21ijg=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.121.0/go.mod h1:UhiSwmVpBbuKlPdmhBytiVTHipSz/JO6c4mbD4kWOPg=
|
||||
go.opentelemetry.io/collector/pipeline v0.121.0 h1:SOiocdyWCJCjWAb96HIxsy9enp2qyQ1NRFo26qyHlCE=
|
||||
go.opentelemetry.io/collector/pipeline v0.121.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4=
|
||||
go.opentelemetry.io/collector/processor v0.121.0 h1:OcLrJ2F17cU0oDtXEYbGvL8vbku/kRQgAafSZ3+8jLY=
|
||||
go.opentelemetry.io/collector/processor v0.121.0/go.mod h1:BoFEMvPn5/p53eWz+R9cibIxCXzaRZ/RtcBPtvqXNaQ=
|
||||
go.opentelemetry.io/collector/processor/processortest v0.121.0 h1:1c3mEABELrxdC1obSQjIlfh5jZljJlzUravmzy1Mofo=
|
||||
go.opentelemetry.io/collector/processor/processortest v0.121.0/go.mod h1:oL4S/eguZ6XTK6IxAQXhXD9yWuRrG5/Maiskbf9HL0o=
|
||||
go.opentelemetry.io/collector/processor/xprocessor v0.121.0 h1:AiqDKzpEYZpiP9y3RRp4G9ym6fG2f9HByu3yWkSdd2E=
|
||||
go.opentelemetry.io/collector/processor/xprocessor v0.121.0/go.mod h1:Puk+6YYKyqLVKqpftUXg0blMrd3BlH/Av+oiajp1sHQ=
|
||||
go.opentelemetry.io/collector/semconv v0.121.0 h1:dtdgh5TsKWGZXIBMsyCMVrY1VgmyWlXHgWx/VH9tL1U=
|
||||
go.opentelemetry.io/collector/semconv v0.121.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U=
|
||||
go.opentelemetry.io/collector/component v1.28.1 h1:JjwfvLR0UdadRDAANAdM4mOSwGmfGO3va2X+fdk4YdA=
|
||||
go.opentelemetry.io/collector/component v1.28.1/go.mod h1:jwZRDML3tXo1whueZdRf+y6z3DeEYTLPBmb/O1ujB40=
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.122.1 h1:zMQC0y8ZBITa87GOwEANdOoAox5I4UgaIHxY79nwCbk=
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.122.1/go.mod h1:ZYwOgoXyPu4gGqfQ5DeaEpStpUCD/Clctz4rMd9qQYw=
|
||||
go.opentelemetry.io/collector/component/componenttest v0.122.1 h1:HE4oeLub2FWVTUzCQG6SWwfnJfcK1FMknXhGQ2gOxnY=
|
||||
go.opentelemetry.io/collector/component/componenttest v0.122.1/go.mod h1:o3Xq6z3C0aVhrd/fD56aKxShrILVnHnbgQVP5NoFuic=
|
||||
go.opentelemetry.io/collector/confmap v1.28.1 h1:/zUmvpnERhFXrxVCVgubjJRgeOwdPbhTfUILZPUBfyw=
|
||||
go.opentelemetry.io/collector/confmap v1.28.1/go.mod h1:2aJggo/KQl7uynFyMNNMbl7jvKkSD7CniOVEpCbjRng=
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.122.1 h1:E8sdJens/sq+evv/VHzbDP3B28uZIAPkKjtB4mVVTso=
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.122.1/go.mod h1:33HDN5uVKRihgLiShZZDzxN0qiTA1+t8hK41rrf1jls=
|
||||
go.opentelemetry.io/collector/consumer v1.28.1 h1:3lHW2e0i7kEkbDqK1vErA8illqPpwDxMzgc5OUDsJ0Y=
|
||||
go.opentelemetry.io/collector/consumer v1.28.1/go.mod h1:g0T16JPMYFN6T2noh+1YBxJSt5i5Zp+Y0Y6pvkMqsDQ=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.122.1 h1:LKkLMdWwJCuOYyCMVzwc0OG9vncIqpl8Tp9+H8RikNg=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.122.1/go.mod h1:pYqWgx62ou3uUn8nlt2ohRyKod+7xLTf/uA3YfRwVkA=
|
||||
go.opentelemetry.io/collector/consumer/xconsumer v0.122.1 h1:iK1hGbho/XICdBfGb4MnKwF9lnhLmv09yQ4YlVm+LGo=
|
||||
go.opentelemetry.io/collector/consumer/xconsumer v0.122.1/go.mod h1:xYbRPP1oWcYUUDQJTlv78M/rlYb+qE4weiv++ObZRSU=
|
||||
go.opentelemetry.io/collector/featuregate v1.28.1 h1:ZpvRAAFxxi4RLr1G0Fju28wA7NhTA20MNT60Ftv+ToY=
|
||||
go.opentelemetry.io/collector/featuregate v1.28.1/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc=
|
||||
go.opentelemetry.io/collector/pdata v1.28.1 h1:ORl5WLpQJvjzBVpHu12lqKMdcf/qDBwRXMcUubhybiQ=
|
||||
go.opentelemetry.io/collector/pdata v1.28.1/go.mod h1:asKE8MD/4SOKz1mCrGdAz4VO2U2HUNg8A6094uK7pq0=
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.122.1 h1:25Fs0eL/J/M2ZEaVplesbI1H7pYx462zUUVxVOszpOg=
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.122.1/go.mod h1:+jSjgb4zRnNmr1R/zgVLVyTVSm9irfGrvGTrk3lDxSE=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.122.1 h1:9DO8nUUnPAGYMKmrep6wLAfOHprvKY4w/7LpE4jldPQ=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.122.1/go.mod h1:hYdNrn8KxFwq1nf44YYRgNhDjJTBzoyEr/Qa26pN0t4=
|
||||
go.opentelemetry.io/collector/pipeline v0.122.1 h1:f0uuiDmanVyKwfYo6cWveJsGbLXidV7i+Z7u8QJwWxI=
|
||||
go.opentelemetry.io/collector/pipeline v0.122.1/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4=
|
||||
go.opentelemetry.io/collector/processor v0.122.1 h1:AvZvEujq8+FYdJsm9lmAMwuuae5Y2/vKIkOJwsoxsxQ=
|
||||
go.opentelemetry.io/collector/processor v0.122.1/go.mod h1:nYKctftba7SbdLml6LxgIrnYRXCShDe2bnNWjTIpF7g=
|
||||
go.opentelemetry.io/collector/processor/processortest v0.122.1 h1:n4UOx1mq+kLaRiHGsu7vBLq+EGXfzWhSxyFweMjMl54=
|
||||
go.opentelemetry.io/collector/processor/processortest v0.122.1/go.mod h1:8/NRWx18tNJMBwCQ8/YPWr4qsFUrwk27qE7/dXoJb1M=
|
||||
go.opentelemetry.io/collector/processor/xprocessor v0.122.1 h1:Wfv4/7n4YK1HunAVTMS6yf0xmDjCkftJ6EECNcSwzfs=
|
||||
go.opentelemetry.io/collector/processor/xprocessor v0.122.1/go.mod h1:9zMW3NQ9+DzcJ1cUq5BhZg3ajoUEMGhNY0ZdYjpX+VI=
|
||||
go.opentelemetry.io/collector/semconv v0.122.1 h1:WLzDi3QC4/+LpNMLY90zn5aMDJKyqg/ujW2O4T4sxHg=
|
||||
go.opentelemetry.io/collector/semconv v0.122.1/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 h1:0tY123n7CdWMem7MOVdKOt0YfshufLCwfE5Bob+hQuM=
|
||||
@@ -485,18 +491,18 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.225.0 h1:+4/IVqBQm0MV5S+JW3kdEGC1WtOmM2mXN1LKH1LdNlw=
|
||||
google.golang.org/api v0.225.0/go.mod h1:WP/0Xm4LVvMOCldfvOISnWquSRWbG2kArDZcg+W2DbY=
|
||||
google.golang.org/genproto v0.0.0-20250311190419-81fb87f6b8bf h1:114fkUG+I9ba4UmaoNZt0UtiRmBng3KJIB/E0avfYII=
|
||||
google.golang.org/genproto v0.0.0-20250311190419-81fb87f6b8bf/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250311190419-81fb87f6b8bf h1:BdIVRm+fyDUn8lrZLPSlBCfM/YKDwUBYgDoLv9+DYo0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250311190419-81fb87f6b8bf/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250311190419-81fb87f6b8bf h1:dHDlF3CWxQkefK9IJx+O8ldY0gLygvrlYRBNbPqDWuY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250311190419-81fb87f6b8bf/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
|
||||
google.golang.org/api v0.228.0 h1:X2DJ/uoWGnY5obVjewbp8icSL5U4FzuCfy9OjbLSnLs=
|
||||
google.golang.org/api v0.228.0/go.mod h1:wNvRS1Pbe8r4+IfBIniV8fwCpGwTrYa+kMUDiC5z5a4=
|
||||
google.golang.org/genproto v0.0.0-20250324211829-b45e905df463 h1:qEFnJI6AnfZk0NNe8YTyXQh5i//Zxi4gBHwRgp76qpw=
|
||||
google.golang.org/genproto v0.0.0-20250324211829-b45e905df463/go.mod h1:SqIx1NV9hcvqdLHo7uNZDS5lrUJybQ3evo3+z/WBfA0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg=
|
||||
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
@@ -522,8 +528,8 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
|
||||
7
vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
generated
vendored
7
vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
generated
vendored
@@ -1,5 +1,12 @@
|
||||
# Changelog
|
||||
|
||||
## [0.2.8](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.7...auth/oauth2adapt/v0.2.8) (2025-03-17)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **auth/oauth2adapt:** Update golang.org/x/net to 0.37.0 ([1144978](https://github.com/googleapis/google-cloud-go/commit/11449782c7fb4896bf8b8b9cde8e7441c84fb2fd))
|
||||
|
||||
## [0.2.7](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.6...auth/oauth2adapt/v0.2.7) (2025-01-09)
|
||||
|
||||
|
||||
|
||||
7
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
7
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
@@ -1,6 +1,13 @@
|
||||
# Changes
|
||||
|
||||
|
||||
## [1.4.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.1...iam/v1.4.2) (2025-03-13)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **iam:** Update golang.org/x/net to 0.37.0 ([1144978](https://github.com/googleapis/google-cloud-go/commit/11449782c7fb4896bf8b8b9cde8e7441c84fb2fd))
|
||||
|
||||
## [1.4.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.0...iam/v1.4.1) (2025-03-06)
|
||||
|
||||
|
||||
|
||||
10
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
10
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
@@ -1789,6 +1789,16 @@
|
||||
"release_level": "stable",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/modelarmor/apiv1": {
|
||||
"api_shortname": "modelarmor",
|
||||
"distribution_name": "cloud.google.com/go/modelarmor/apiv1",
|
||||
"description": "Model Armor API",
|
||||
"language": "go",
|
||||
"client_library_type": "generated",
|
||||
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/modelarmor/latest/apiv1",
|
||||
"release_level": "preview",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/monitoring/apiv3/v2": {
|
||||
"api_shortname": "monitoring",
|
||||
"distribution_name": "cloud.google.com/go/monitoring/apiv3/v2",
|
||||
|
||||
7
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
generated
vendored
7
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
generated
vendored
@@ -1,5 +1,12 @@
|
||||
# Release History
|
||||
|
||||
## 1.17.1 (2025-03-20)
|
||||
|
||||
### Other Changes
|
||||
|
||||
* Upgraded to Go 1.23
|
||||
* Upgraded dependencies
|
||||
|
||||
## 1.17.0 (2025-01-07)
|
||||
|
||||
### Features Added
|
||||
|
||||
2
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
generated
vendored
2
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
generated
vendored
@@ -40,5 +40,5 @@ const (
|
||||
Module = "azcore"
|
||||
|
||||
// Version is the semantic version (see http://semver.org) of this module.
|
||||
Version = "v1.17.0"
|
||||
Version = "v1.17.1"
|
||||
)
|
||||
|
||||
@@ -65,6 +65,13 @@ type AuthenticationScheme = authority.AuthenticationScheme
|
||||
|
||||
type Account = shared.Account
|
||||
|
||||
type TokenSource = base.TokenSource
|
||||
|
||||
const (
|
||||
TokenSourceIdentityProvider = base.TokenSourceIdentityProvider
|
||||
TokenSourceCache = base.TokenSourceCache
|
||||
)
|
||||
|
||||
// CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file
|
||||
// must contain the public certificate and the private key. If a PEM block is encrypted and
|
||||
// password is not an empty string, it attempts to decrypt the PEM blocks using the password.
|
||||
@@ -639,7 +646,7 @@ func (cca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []s
|
||||
if err != nil {
|
||||
return AuthResult{}, err
|
||||
}
|
||||
return cca.base.AuthResultFromToken(ctx, authParams, token, true)
|
||||
return cca.base.AuthResultFromToken(ctx, authParams, token)
|
||||
}
|
||||
|
||||
// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow.
|
||||
@@ -733,7 +740,7 @@ func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string,
|
||||
if err != nil {
|
||||
return AuthResult{}, err
|
||||
}
|
||||
return cca.base.AuthResultFromToken(ctx, authParams, token, true)
|
||||
return cca.base.AuthResultFromToken(ctx, authParams, token)
|
||||
}
|
||||
|
||||
// acquireTokenOnBehalfOfOptions contains optional configuration for AcquireTokenOnBehalfOf
|
||||
|
||||
@@ -5,15 +5,16 @@ package base
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
|
||||
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
|
||||
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage"
|
||||
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
|
||||
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
|
||||
@@ -94,6 +95,7 @@ type AuthResult struct {
|
||||
|
||||
// AuthResultMetadata which contains meta data for the AuthResult
|
||||
type AuthResultMetadata struct {
|
||||
RefreshOn time.Time
|
||||
TokenSource TokenSource
|
||||
}
|
||||
|
||||
@@ -101,9 +103,8 @@ type TokenSource int
|
||||
|
||||
// These are all the types of token flows.
|
||||
const (
|
||||
SourceUnknown TokenSource = 0
|
||||
IdentityProvider TokenSource = 1
|
||||
Cache TokenSource = 2
|
||||
TokenSourceIdentityProvider TokenSource = 0
|
||||
TokenSourceCache TokenSource = 1
|
||||
)
|
||||
|
||||
// AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache).
|
||||
@@ -131,7 +132,8 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu
|
||||
GrantedScopes: grantedScopes,
|
||||
DeclinedScopes: nil,
|
||||
Metadata: AuthResultMetadata{
|
||||
TokenSource: Cache,
|
||||
TokenSource: TokenSourceCache,
|
||||
RefreshOn: storageTokenResponse.AccessToken.RefreshOn.T,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -148,7 +150,8 @@ func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Acco
|
||||
ExpiresOn: tokenResponse.ExpiresOn,
|
||||
GrantedScopes: tokenResponse.GrantedScopes.Slice,
|
||||
Metadata: AuthResultMetadata{
|
||||
TokenSource: IdentityProvider,
|
||||
TokenSource: TokenSourceIdentityProvider,
|
||||
RefreshOn: tokenResponse.RefreshOn.T,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -164,6 +167,8 @@ type Client struct {
|
||||
AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New().
|
||||
cacheAccessor cache.ExportReplace
|
||||
cacheAccessorMu *sync.RWMutex
|
||||
canRefresh map[string]*atomic.Value
|
||||
canRefreshMu *sync.Mutex
|
||||
}
|
||||
|
||||
// Option is an optional argument to the New constructor.
|
||||
@@ -240,6 +245,8 @@ func New(clientID string, authorityURI string, token *oauth.Client, options ...O
|
||||
cacheAccessorMu: &sync.RWMutex{},
|
||||
manager: storage.New(token),
|
||||
pmanager: storage.NewPartitionedManager(token),
|
||||
canRefresh: make(map[string]*atomic.Value),
|
||||
canRefreshMu: &sync.Mutex{},
|
||||
}
|
||||
for _, o := range options {
|
||||
if err = o(&client); err != nil {
|
||||
@@ -344,6 +351,28 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen
|
||||
if silent.Claims == "" {
|
||||
ar, err = AuthResultFromStorage(storageTokenResponse)
|
||||
if err == nil {
|
||||
if rt := storageTokenResponse.AccessToken.RefreshOn.T; !rt.IsZero() && Now().After(rt) {
|
||||
b.canRefreshMu.Lock()
|
||||
refreshValue, ok := b.canRefresh[tenant]
|
||||
if !ok {
|
||||
refreshValue = &atomic.Value{}
|
||||
refreshValue.Store(false)
|
||||
b.canRefresh[tenant] = refreshValue
|
||||
}
|
||||
b.canRefreshMu.Unlock()
|
||||
if refreshValue.CompareAndSwap(false, true) {
|
||||
defer refreshValue.Store(false)
|
||||
// Added a check to see if the token is still same because there is a chance
|
||||
// that the token is already refreshed by another thread.
|
||||
// If the token is not same, we don't need to refresh it.
|
||||
// Which means it refreshed.
|
||||
if str, err := m.Read(ctx, authParams); err == nil && str.AccessToken.Secret == ar.AccessToken {
|
||||
if tr, er := b.Token.Credential(ctx, authParams, silent.Credential); er == nil {
|
||||
return b.AuthResultFromToken(ctx, authParams, tr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
|
||||
return ar, err
|
||||
}
|
||||
@@ -361,7 +390,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen
|
||||
if err != nil {
|
||||
return ar, err
|
||||
}
|
||||
return b.AuthResultFromToken(ctx, authParams, token, true)
|
||||
return b.AuthResultFromToken(ctx, authParams, token)
|
||||
}
|
||||
|
||||
func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) {
|
||||
@@ -390,7 +419,7 @@ func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams Acqui
|
||||
return AuthResult{}, err
|
||||
}
|
||||
|
||||
return b.AuthResultFromToken(ctx, authParams, token, true)
|
||||
return b.AuthResultFromToken(ctx, authParams, token)
|
||||
}
|
||||
|
||||
// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token.
|
||||
@@ -419,15 +448,12 @@ func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams Acq
|
||||
authParams.UserAssertion = onBehalfOfParams.UserAssertion
|
||||
token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential)
|
||||
if err == nil {
|
||||
ar, err = b.AuthResultFromToken(ctx, authParams, token, true)
|
||||
ar, err = b.AuthResultFromToken(ctx, authParams, token)
|
||||
}
|
||||
return ar, err
|
||||
}
|
||||
|
||||
func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) {
|
||||
if !cacheWrite {
|
||||
return NewAuthResult(token, shared.Account{})
|
||||
}
|
||||
func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse) (AuthResult, error) {
|
||||
var m manager = b.manager
|
||||
if authParams.AuthorizationType == authority.ATOnBehalfOf {
|
||||
m = b.pmanager
|
||||
@@ -457,6 +483,10 @@ func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.Au
|
||||
return ar, err
|
||||
}
|
||||
|
||||
// This function wraps time.Now() and is used for refreshing the application
|
||||
// was created to test the function against refreshin
|
||||
var Now = time.Now
|
||||
|
||||
func (b Client) AllAccounts(ctx context.Context) ([]shared.Account, error) {
|
||||
if b.cacheAccessor != nil {
|
||||
b.cacheAccessorMu.RLock()
|
||||
|
||||
@@ -72,6 +72,7 @@ type AccessToken struct {
|
||||
ClientID string `json:"client_id,omitempty"`
|
||||
Secret string `json:"secret,omitempty"`
|
||||
Scopes string `json:"target,omitempty"`
|
||||
RefreshOn internalTime.Unix `json:"refresh_on,omitempty"`
|
||||
ExpiresOn internalTime.Unix `json:"expires_on,omitempty"`
|
||||
ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"`
|
||||
CachedAt internalTime.Unix `json:"cached_at,omitempty"`
|
||||
@@ -83,7 +84,7 @@ type AccessToken struct {
|
||||
}
|
||||
|
||||
// NewAccessToken is the constructor for AccessToken.
|
||||
func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken {
|
||||
func NewAccessToken(homeID, env, realm, clientID string, cachedAt, refreshOn, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken {
|
||||
return AccessToken{
|
||||
HomeAccountID: homeID,
|
||||
Environment: env,
|
||||
@@ -93,6 +94,7 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex
|
||||
Secret: token,
|
||||
Scopes: scopes,
|
||||
CachedAt: internalTime.Unix{T: cachedAt.UTC()},
|
||||
RefreshOn: internalTime.Unix{T: refreshOn.UTC()},
|
||||
ExpiresOn: internalTime.Unix{T: expiresOn.UTC()},
|
||||
ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()},
|
||||
TokenType: tokenType,
|
||||
|
||||
@@ -114,6 +114,7 @@ func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenRes
|
||||
realm,
|
||||
clientID,
|
||||
cachedAt,
|
||||
tokenResponse.RefreshOn.T,
|
||||
tokenResponse.ExpiresOn,
|
||||
tokenResponse.ExtExpiresOn.T,
|
||||
target,
|
||||
|
||||
@@ -194,6 +194,7 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces
|
||||
realm,
|
||||
clientID,
|
||||
cachedAt,
|
||||
tokenResponse.RefreshOn.T,
|
||||
tokenResponse.ExpiresOn,
|
||||
tokenResponse.ExtExpiresOn.T,
|
||||
target,
|
||||
|
||||
@@ -31,4 +31,6 @@ type TokenProviderResult struct {
|
||||
AccessToken string
|
||||
// ExpiresInSeconds is the lifetime of the token in seconds
|
||||
ExpiresInSeconds int
|
||||
// RefreshInSeconds indicates the suggested time to refresh the token, if any
|
||||
RefreshInSeconds int
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
|
||||
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported"
|
||||
internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time"
|
||||
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops"
|
||||
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
|
||||
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
|
||||
@@ -110,7 +111,7 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams
|
||||
Scopes: scopes,
|
||||
TenantID: authParams.AuthorityInfo.Tenant,
|
||||
}
|
||||
tr, err := cred.TokenProvider(ctx, params)
|
||||
pr, err := cred.TokenProvider(ctx, params)
|
||||
if err != nil {
|
||||
if len(scopes) == 0 {
|
||||
err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err)
|
||||
@@ -118,12 +119,18 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams
|
||||
}
|
||||
return accesstokens.TokenResponse{}, err
|
||||
}
|
||||
return accesstokens.TokenResponse{
|
||||
tr := accesstokens.TokenResponse{
|
||||
TokenType: authParams.AuthnScheme.AccessTokenType(),
|
||||
AccessToken: tr.AccessToken,
|
||||
ExpiresOn: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second),
|
||||
AccessToken: pr.AccessToken,
|
||||
ExpiresOn: now.Add(time.Duration(pr.ExpiresInSeconds) * time.Second),
|
||||
GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes},
|
||||
}, nil
|
||||
}
|
||||
if pr.RefreshInSeconds > 0 {
|
||||
tr.RefreshOn = internalTime.DurationTime{
|
||||
T: now.Add(time.Duration(pr.RefreshInSeconds) * time.Second),
|
||||
}
|
||||
}
|
||||
return tr, nil
|
||||
}
|
||||
|
||||
if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
|
||||
/* #nosec */
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
@@ -112,19 +113,31 @@ func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (
|
||||
}
|
||||
return c.AssertionCallback(ctx, options)
|
||||
}
|
||||
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{
|
||||
claims := jwt.MapClaims{
|
||||
"aud": authParams.Endpoints.TokenEndpoint,
|
||||
"exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)),
|
||||
"iss": authParams.ClientID,
|
||||
"jti": uuid.New().String(),
|
||||
"nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)),
|
||||
"sub": authParams.ClientID,
|
||||
})
|
||||
}
|
||||
|
||||
isADFSorDSTS := authParams.AuthorityInfo.AuthorityType == authority.ADFS ||
|
||||
authParams.AuthorityInfo.AuthorityType == authority.DSTS
|
||||
|
||||
var signingMethod jwt.SigningMethod = jwt.SigningMethodPS256
|
||||
thumbprintKey := "x5t#S256"
|
||||
|
||||
if isADFSorDSTS {
|
||||
signingMethod = jwt.SigningMethodRS256
|
||||
thumbprintKey = "x5t"
|
||||
}
|
||||
|
||||
token := jwt.NewWithClaims(signingMethod, claims)
|
||||
token.Header = map[string]interface{}{
|
||||
"alg": "RS256",
|
||||
"typ": "JWT",
|
||||
"x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)),
|
||||
"alg": signingMethod.Alg(),
|
||||
"typ": "JWT",
|
||||
thumbprintKey: base64.StdEncoding.EncodeToString(thumbprint(c.Cert, signingMethod.Alg())),
|
||||
}
|
||||
|
||||
if authParams.SendX5C {
|
||||
@@ -133,17 +146,23 @@ func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (
|
||||
|
||||
assertion, err := token.SignedString(c.Key)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err)
|
||||
return "", fmt.Errorf("unable to sign JWT token: %w", err)
|
||||
}
|
||||
|
||||
return assertion, nil
|
||||
}
|
||||
|
||||
// thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT.
|
||||
// https://tools.ietf.org/html/rfc7517#section-4.8
|
||||
func thumbprint(cert *x509.Certificate) []byte {
|
||||
/* #nosec */
|
||||
a := sha1.Sum(cert.Raw)
|
||||
return a[:]
|
||||
func thumbprint(cert *x509.Certificate, alg string) []byte {
|
||||
switch alg {
|
||||
case jwt.SigningMethodRS256.Name: // identity providers like ADFS don't support SHA256 assertions, so need to support this
|
||||
hash := sha1.Sum(cert.Raw) /* #nosec */
|
||||
return hash[:]
|
||||
default:
|
||||
hash := sha256.Sum256(cert.Raw)
|
||||
return hash[:]
|
||||
}
|
||||
}
|
||||
|
||||
// Client represents the REST calls to get tokens from token generator backends.
|
||||
|
||||
@@ -174,6 +174,7 @@ type TokenResponse struct {
|
||||
FamilyID string `json:"foci"`
|
||||
IDToken IDToken `json:"id_token"`
|
||||
ClientInfo ClientInfo `json:"client_info"`
|
||||
RefreshOn internalTime.DurationTime `json:"refresh_in,omitempty"`
|
||||
ExpiresOn time.Time `json:"-"`
|
||||
ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"`
|
||||
GrantedScopes Scopes `json:"scope"`
|
||||
|
||||
@@ -5,4 +5,4 @@
|
||||
package version
|
||||
|
||||
// Version is the version of this client package that is communicated to the server.
|
||||
const Version = "1.2.0"
|
||||
const Version = "1.4.2"
|
||||
|
||||
13
vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
generated
vendored
13
vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
generated
vendored
@@ -51,6 +51,13 @@ type AuthenticationScheme = authority.AuthenticationScheme
|
||||
|
||||
type Account = shared.Account
|
||||
|
||||
type TokenSource = base.TokenSource
|
||||
|
||||
const (
|
||||
TokenSourceIdentityProvider = base.TokenSourceIdentityProvider
|
||||
TokenSourceCache = base.TokenSourceCache
|
||||
)
|
||||
|
||||
var errNoAccount = errors.New("no account was specified with public.WithSilentAccount(), or the specified account is invalid")
|
||||
|
||||
// clientOptions configures the Client's behavior.
|
||||
@@ -387,7 +394,7 @@ func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []s
|
||||
if err != nil {
|
||||
return AuthResult{}, err
|
||||
}
|
||||
return pca.base.AuthResultFromToken(ctx, authParams, token, true)
|
||||
return pca.base.AuthResultFromToken(ctx, authParams, token)
|
||||
}
|
||||
|
||||
type DeviceCodeResult = accesstokens.DeviceCodeResult
|
||||
@@ -412,7 +419,7 @@ func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error
|
||||
if err != nil {
|
||||
return AuthResult{}, err
|
||||
}
|
||||
return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true)
|
||||
return d.client.base.AuthResultFromToken(ctx, d.authParams, token)
|
||||
}
|
||||
|
||||
// acquireTokenByDeviceCodeOptions contains optional configuration for AcquireTokenByDeviceCode
|
||||
@@ -687,7 +694,7 @@ func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string,
|
||||
return AuthResult{}, err
|
||||
}
|
||||
|
||||
return pca.base.AuthResultFromToken(ctx, authParams, token, true)
|
||||
return pca.base.AuthResultFromToken(ctx, authParams, token)
|
||||
}
|
||||
|
||||
type interactiveAuthResult struct {
|
||||
|
||||
8
vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
generated
vendored
8
vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
generated
vendored
@@ -1,3 +1,11 @@
|
||||
# v1.29.11 (2025-03-25)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.29.10 (2025-03-24)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.29.9 (2025-03-04.2)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
generated
vendored
@@ -3,4 +3,4 @@
|
||||
package config
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.29.9"
|
||||
const goModuleVersion = "1.29.11"
|
||||
|
||||
8
vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
generated
vendored
8
vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
generated
vendored
@@ -1,3 +1,11 @@
|
||||
# v1.17.64 (2025-03-25)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.17.63 (2025-03-24)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.17.62 (2025-03-04.2)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
generated
vendored
@@ -3,4 +3,4 @@
|
||||
package credentials
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.17.62"
|
||||
const goModuleVersion = "1.17.64"
|
||||
|
||||
8
vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md
generated
vendored
8
vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md
generated
vendored
@@ -1,3 +1,11 @@
|
||||
# v1.17.68 (2025-03-25)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.17.67 (2025-03-24)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.17.66 (2025-03-11)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go
generated
vendored
@@ -3,4 +3,4 @@
|
||||
package manager
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.17.66"
|
||||
const goModuleVersion = "1.17.68"
|
||||
|
||||
4
vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
generated
vendored
4
vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
generated
vendored
@@ -1,3 +1,7 @@
|
||||
# v1.25.2 (2025-03-25)
|
||||
|
||||
* No change notes available for this release.
|
||||
|
||||
# v1.25.1 (2025-03-04.2)
|
||||
|
||||
* **Bug Fix**: Add assurance test for operation order.
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
generated
vendored
@@ -3,4 +3,4 @@
|
||||
package sso
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.25.1"
|
||||
const goModuleVersion = "1.25.2"
|
||||
|
||||
8
vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
generated
vendored
@@ -227,6 +227,14 @@ var defaultPartitions = endpoints.Partitions{
|
||||
Region: "ap-southeast-4",
|
||||
},
|
||||
},
|
||||
endpoints.EndpointKey{
|
||||
Region: "ap-southeast-5",
|
||||
}: endpoints.Endpoint{
|
||||
Hostname: "portal.sso.ap-southeast-5.amazonaws.com",
|
||||
CredentialScope: endpoints.CredentialScope{
|
||||
Region: "ap-southeast-5",
|
||||
},
|
||||
},
|
||||
endpoints.EndpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoints.Endpoint{
|
||||
|
||||
4
vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
generated
vendored
4
vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
generated
vendored
@@ -1,3 +1,7 @@
|
||||
# v1.29.2 (2025-03-24)
|
||||
|
||||
* No change notes available for this release.
|
||||
|
||||
# v1.29.1 (2025-03-04.2)
|
||||
|
||||
* **Bug Fix**: Add assurance test for operation order.
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
generated
vendored
@@ -3,4 +3,4 @@
|
||||
package ssooidc
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.29.1"
|
||||
const goModuleVersion = "1.29.2"
|
||||
|
||||
8
vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
generated
vendored
@@ -227,6 +227,14 @@ var defaultPartitions = endpoints.Partitions{
|
||||
Region: "ap-southeast-4",
|
||||
},
|
||||
},
|
||||
endpoints.EndpointKey{
|
||||
Region: "ap-southeast-5",
|
||||
}: endpoints.Endpoint{
|
||||
Hostname: "oidc.ap-southeast-5.amazonaws.com",
|
||||
CredentialScope: endpoints.CredentialScope{
|
||||
Region: "ap-southeast-5",
|
||||
},
|
||||
},
|
||||
endpoints.EndpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoints.Endpoint{
|
||||
|
||||
16
vendor/github.com/golang-jwt/jwt/v5/README.md
generated
vendored
16
vendor/github.com/golang-jwt/jwt/v5/README.md
generated
vendored
@@ -10,11 +10,11 @@ implementation of [JSON Web
|
||||
Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
|
||||
|
||||
Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0)
|
||||
this project adds Go module support, but maintains backwards compatibility with
|
||||
this project adds Go module support, but maintains backward compatibility with
|
||||
older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the
|
||||
[`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version
|
||||
v5.0.0 introduces major improvements to the validation of tokens, but is not
|
||||
entirely backwards compatible.
|
||||
entirely backward compatible.
|
||||
|
||||
> After the original author of the library suggested migrating the maintenance
|
||||
> of `jwt-go`, a dedicated team of open source maintainers decided to clone the
|
||||
@@ -24,7 +24,7 @@ entirely backwards compatible.
|
||||
|
||||
|
||||
**SECURITY NOTICE:** Some older versions of Go have a security issue in the
|
||||
crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue
|
||||
crypto/elliptic. The recommendation is to upgrade to at least 1.15 See issue
|
||||
[dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more
|
||||
detail.
|
||||
|
||||
@@ -32,7 +32,7 @@ detail.
|
||||
what you
|
||||
expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/).
|
||||
This library attempts to make it easy to do the right thing by requiring key
|
||||
types match the expected alg, but you should take the extra step to verify it in
|
||||
types to match the expected alg, but you should take the extra step to verify it in
|
||||
your usage. See the examples provided.
|
||||
|
||||
### Supported Go versions
|
||||
@@ -41,7 +41,7 @@ Our support of Go versions is aligned with Go's [version release
|
||||
policy](https://golang.org/doc/devel/release#policy). So we will support a major
|
||||
version of Go until there are two newer major releases. We no longer support
|
||||
building jwt-go with unsupported Go versions, as these contain security
|
||||
vulnerabilities which will not be fixed.
|
||||
vulnerabilities that will not be fixed.
|
||||
|
||||
## What the heck is a JWT?
|
||||
|
||||
@@ -117,7 +117,7 @@ notable differences:
|
||||
|
||||
This library is considered production ready. Feedback and feature requests are
|
||||
appreciated. The API should be considered stable. There should be very few
|
||||
backwards-incompatible changes outside of major version updates (and only with
|
||||
backward-incompatible changes outside of major version updates (and only with
|
||||
good reason).
|
||||
|
||||
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull
|
||||
@@ -125,8 +125,8 @@ requests will land on `main`. Periodically, versions will be tagged from
|
||||
`main`. You can find all the releases on [the project releases
|
||||
page](https://github.com/golang-jwt/jwt/releases).
|
||||
|
||||
**BREAKING CHANGES:*** A full list of breaking changes is available in
|
||||
`VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating
|
||||
**BREAKING CHANGES:** A full list of breaking changes is available in
|
||||
`VERSION_HISTORY.md`. See [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information on updating
|
||||
your code.
|
||||
|
||||
## Extensions
|
||||
|
||||
4
vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
generated
vendored
4
vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
generated
vendored
@@ -2,11 +2,11 @@
|
||||
|
||||
## Supported Versions
|
||||
|
||||
As of February 2022 (and until this document is updated), the latest version `v4` is supported.
|
||||
As of November 2024 (and until this document is updated), the latest version `v5` is supported. In critical cases, we might supply back-ported patches for `v4`.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s).
|
||||
If you think you found a vulnerability, and even if you are not sure, please report it a [GitHub Security Advisory](https://github.com/golang-jwt/jwt/security/advisories/new). Please try be explicit, describe steps to reproduce the security issue with code example(s).
|
||||
|
||||
You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.
|
||||
|
||||
|
||||
36
vendor/github.com/golang-jwt/jwt/v5/parser.go
generated
vendored
36
vendor/github.com/golang-jwt/jwt/v5/parser.go
generated
vendored
@@ -8,6 +8,8 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const tokenDelimiter = "."
|
||||
|
||||
type Parser struct {
|
||||
// If populated, only these methods will be considered valid.
|
||||
validMethods []string
|
||||
@@ -136,9 +138,10 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
|
||||
// It's only ever useful in cases where you know the signature is valid (since it has already
|
||||
// been or will be checked elsewhere in the stack) and you want to extract values from it.
|
||||
func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
|
||||
parts = strings.Split(tokenString, ".")
|
||||
if len(parts) != 3 {
|
||||
return nil, parts, newError("token contains an invalid number of segments", ErrTokenMalformed)
|
||||
var ok bool
|
||||
parts, ok = splitToken(tokenString)
|
||||
if !ok {
|
||||
return nil, nil, newError("token contains an invalid number of segments", ErrTokenMalformed)
|
||||
}
|
||||
|
||||
token = &Token{Raw: tokenString}
|
||||
@@ -196,6 +199,33 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke
|
||||
return token, parts, nil
|
||||
}
|
||||
|
||||
// splitToken splits a token string into three parts: header, claims, and signature. It will only
|
||||
// return true if the token contains exactly two delimiters and three parts. In all other cases, it
|
||||
// will return nil parts and false.
|
||||
func splitToken(token string) ([]string, bool) {
|
||||
parts := make([]string, 3)
|
||||
header, remain, ok := strings.Cut(token, tokenDelimiter)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
parts[0] = header
|
||||
claims, remain, ok := strings.Cut(remain, tokenDelimiter)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
parts[1] = claims
|
||||
// One more cut to ensure the signature is the last part of the token and there are no more
|
||||
// delimiters. This avoids an issue where malicious input could contain additional delimiters
|
||||
// causing unecessary overhead parsing tokens.
|
||||
signature, _, unexpected := strings.Cut(remain, tokenDelimiter)
|
||||
if unexpected {
|
||||
return nil, false
|
||||
}
|
||||
parts[2] = signature
|
||||
|
||||
return parts, true
|
||||
}
|
||||
|
||||
// DecodeSegment decodes a JWT specific base64url encoding. This function will
|
||||
// take into account whether the [Parser] is configured with additional options,
|
||||
// such as [WithStrictDecoding] or [WithPaddingAllowed].
|
||||
|
||||
2
vendor/github.com/golang-jwt/jwt/v5/token.go
generated
vendored
2
vendor/github.com/golang-jwt/jwt/v5/token.go
generated
vendored
@@ -75,7 +75,7 @@ func (t *Token) SignedString(key interface{}) (string, error) {
|
||||
}
|
||||
|
||||
// SigningString generates the signing string. This is the most expensive part
|
||||
// of the whole deal. Unless you need this for something special, just go
|
||||
// of the whole deal. Unless you need this for something special, just go
|
||||
// straight for the SignedString.
|
||||
func (t *Token) SigningString() (string, error) {
|
||||
h, err := json.Marshal(t.Header)
|
||||
|
||||
64
vendor/github.com/hashicorp/go-version/CHANGELOG.md
generated
vendored
Normal file
64
vendor/github.com/hashicorp/go-version/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
# 1.7.0 (May 24, 2024)
|
||||
|
||||
ENHANCEMENTS:
|
||||
|
||||
- Remove `reflect` dependency ([#91](https://github.com/hashicorp/go-version/pull/91))
|
||||
- Implement the `database/sql.Scanner` and `database/sql/driver.Value` interfaces for `Version` ([#133](https://github.com/hashicorp/go-version/pull/133))
|
||||
|
||||
INTERNAL:
|
||||
|
||||
- [COMPLIANCE] Add Copyright and License Headers ([#115](https://github.com/hashicorp/go-version/pull/115))
|
||||
- [COMPLIANCE] Update MPL-2.0 LICENSE ([#105](https://github.com/hashicorp/go-version/pull/105))
|
||||
- Bump actions/cache from 3.0.11 to 3.2.5 ([#116](https://github.com/hashicorp/go-version/pull/116))
|
||||
- Bump actions/checkout from 3.2.0 to 3.3.0 ([#111](https://github.com/hashicorp/go-version/pull/111))
|
||||
- Bump actions/upload-artifact from 3.1.1 to 3.1.2 ([#112](https://github.com/hashicorp/go-version/pull/112))
|
||||
- GHA Migration ([#103](https://github.com/hashicorp/go-version/pull/103))
|
||||
- github: Pin external GitHub Actions to hashes ([#107](https://github.com/hashicorp/go-version/pull/107))
|
||||
- SEC-090: Automated trusted workflow pinning (2023-04-05) ([#124](https://github.com/hashicorp/go-version/pull/124))
|
||||
- update readme ([#104](https://github.com/hashicorp/go-version/pull/104))
|
||||
|
||||
# 1.6.0 (June 28, 2022)
|
||||
|
||||
FEATURES:
|
||||
|
||||
- Add `Prerelease` function to `Constraint` to return true if the version includes a prerelease field ([#100](https://github.com/hashicorp/go-version/pull/100))
|
||||
|
||||
# 1.5.0 (May 18, 2022)
|
||||
|
||||
FEATURES:
|
||||
|
||||
- Use `encoding` `TextMarshaler` & `TextUnmarshaler` instead of JSON equivalents ([#95](https://github.com/hashicorp/go-version/pull/95))
|
||||
- Add JSON handlers to allow parsing from/to JSON ([#93](https://github.com/hashicorp/go-version/pull/93))
|
||||
|
||||
# 1.4.0 (January 5, 2022)
|
||||
|
||||
FEATURES:
|
||||
|
||||
- Introduce `MustConstraints()` ([#87](https://github.com/hashicorp/go-version/pull/87))
|
||||
- `Constraints`: Introduce `Equals()` and `sort.Interface` methods ([#88](https://github.com/hashicorp/go-version/pull/88))
|
||||
|
||||
# 1.3.0 (March 31, 2021)
|
||||
|
||||
Please note that CHANGELOG.md does not exist in the source code prior to this release.
|
||||
|
||||
FEATURES:
|
||||
- Add `Core` function to return a version without prerelease or metadata ([#85](https://github.com/hashicorp/go-version/pull/85))
|
||||
|
||||
# 1.2.1 (June 17, 2020)
|
||||
|
||||
BUG FIXES:
|
||||
- Prevent `Version.Equal` method from panicking on `nil` encounter ([#73](https://github.com/hashicorp/go-version/pull/73))
|
||||
|
||||
# 1.2.0 (April 23, 2019)
|
||||
|
||||
FEATURES:
|
||||
- Add `GreaterThanOrEqual` and `LessThanOrEqual` helper methods ([#53](https://github.com/hashicorp/go-version/pull/53))
|
||||
|
||||
# 1.1.0 (Jan 07, 2019)
|
||||
|
||||
FEATURES:
|
||||
- Add `NewSemver` constructor ([#45](https://github.com/hashicorp/go-version/pull/45))
|
||||
|
||||
# 1.0.0 (August 24, 2018)
|
||||
|
||||
Initial release.
|
||||
356
vendor/github.com/hashicorp/go-version/LICENSE
generated
vendored
Normal file
356
vendor/github.com/hashicorp/go-version/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,356 @@
|
||||
Copyright (c) 2014 HashiCorp, Inc.
|
||||
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
||||
66
vendor/github.com/hashicorp/go-version/README.md
generated
vendored
Normal file
66
vendor/github.com/hashicorp/go-version/README.md
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
# Versioning Library for Go
|
||||

|
||||
[](https://godoc.org/github.com/hashicorp/go-version)
|
||||
|
||||
go-version is a library for parsing versions and version constraints,
|
||||
and verifying versions against a set of constraints. go-version
|
||||
can sort a collection of versions properly, handles prerelease/beta
|
||||
versions, can increment versions, etc.
|
||||
|
||||
Versions used with go-version must follow [SemVer](http://semver.org/).
|
||||
|
||||
## Installation and Usage
|
||||
|
||||
Package documentation can be found on
|
||||
[GoDoc](http://godoc.org/github.com/hashicorp/go-version).
|
||||
|
||||
Installation can be done with a normal `go get`:
|
||||
|
||||
```
|
||||
$ go get github.com/hashicorp/go-version
|
||||
```
|
||||
|
||||
#### Version Parsing and Comparison
|
||||
|
||||
```go
|
||||
v1, err := version.NewVersion("1.2")
|
||||
v2, err := version.NewVersion("1.5+metadata")
|
||||
|
||||
// Comparison example. There is also GreaterThan, Equal, and just
|
||||
// a simple Compare that returns an int allowing easy >=, <=, etc.
|
||||
if v1.LessThan(v2) {
|
||||
fmt.Printf("%s is less than %s", v1, v2)
|
||||
}
|
||||
```
|
||||
|
||||
#### Version Constraints
|
||||
|
||||
```go
|
||||
v1, err := version.NewVersion("1.2")
|
||||
|
||||
// Constraints example.
|
||||
constraints, err := version.NewConstraint(">= 1.0, < 1.4")
|
||||
if constraints.Check(v1) {
|
||||
fmt.Printf("%s satisfies constraints %s", v1, constraints)
|
||||
}
|
||||
```
|
||||
|
||||
#### Version Sorting
|
||||
|
||||
```go
|
||||
versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"}
|
||||
versions := make([]*version.Version, len(versionsRaw))
|
||||
for i, raw := range versionsRaw {
|
||||
v, _ := version.NewVersion(raw)
|
||||
versions[i] = v
|
||||
}
|
||||
|
||||
// After this, the versions are properly sorted
|
||||
sort.Sort(version.Collection(versions))
|
||||
```
|
||||
|
||||
## Issues and Contributing
|
||||
|
||||
If you find an issue with this library, please report an issue. If you'd
|
||||
like, we welcome any contributions. Fork this library and submit a pull
|
||||
request.
|
||||
298
vendor/github.com/hashicorp/go-version/constraint.go
generated
vendored
Normal file
298
vendor/github.com/hashicorp/go-version/constraint.go
generated
vendored
Normal file
@@ -0,0 +1,298 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package version
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Constraint represents a single constraint for a version, such as
|
||||
// ">= 1.0".
|
||||
type Constraint struct {
|
||||
f constraintFunc
|
||||
op operator
|
||||
check *Version
|
||||
original string
|
||||
}
|
||||
|
||||
func (c *Constraint) Equals(con *Constraint) bool {
|
||||
return c.op == con.op && c.check.Equal(con.check)
|
||||
}
|
||||
|
||||
// Constraints is a slice of constraints. We make a custom type so that
|
||||
// we can add methods to it.
|
||||
type Constraints []*Constraint
|
||||
|
||||
type constraintFunc func(v, c *Version) bool
|
||||
|
||||
var constraintOperators map[string]constraintOperation
|
||||
|
||||
type constraintOperation struct {
|
||||
op operator
|
||||
f constraintFunc
|
||||
}
|
||||
|
||||
var constraintRegexp *regexp.Regexp
|
||||
|
||||
func init() {
|
||||
constraintOperators = map[string]constraintOperation{
|
||||
"": {op: equal, f: constraintEqual},
|
||||
"=": {op: equal, f: constraintEqual},
|
||||
"!=": {op: notEqual, f: constraintNotEqual},
|
||||
">": {op: greaterThan, f: constraintGreaterThan},
|
||||
"<": {op: lessThan, f: constraintLessThan},
|
||||
">=": {op: greaterThanEqual, f: constraintGreaterThanEqual},
|
||||
"<=": {op: lessThanEqual, f: constraintLessThanEqual},
|
||||
"~>": {op: pessimistic, f: constraintPessimistic},
|
||||
}
|
||||
|
||||
ops := make([]string, 0, len(constraintOperators))
|
||||
for k := range constraintOperators {
|
||||
ops = append(ops, regexp.QuoteMeta(k))
|
||||
}
|
||||
|
||||
constraintRegexp = regexp.MustCompile(fmt.Sprintf(
|
||||
`^\s*(%s)\s*(%s)\s*$`,
|
||||
strings.Join(ops, "|"),
|
||||
VersionRegexpRaw))
|
||||
}
|
||||
|
||||
// NewConstraint will parse one or more constraints from the given
|
||||
// constraint string. The string must be a comma-separated list of
|
||||
// constraints.
|
||||
func NewConstraint(v string) (Constraints, error) {
|
||||
vs := strings.Split(v, ",")
|
||||
result := make([]*Constraint, len(vs))
|
||||
for i, single := range vs {
|
||||
c, err := parseSingle(single)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result[i] = c
|
||||
}
|
||||
|
||||
return Constraints(result), nil
|
||||
}
|
||||
|
||||
// MustConstraints is a helper that wraps a call to a function
|
||||
// returning (Constraints, error) and panics if error is non-nil.
|
||||
func MustConstraints(c Constraints, err error) Constraints {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Check tests if a version satisfies all the constraints.
|
||||
func (cs Constraints) Check(v *Version) bool {
|
||||
for _, c := range cs {
|
||||
if !c.Check(v) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Equals compares Constraints with other Constraints
|
||||
// for equality. This may not represent logical equivalence
|
||||
// of compared constraints.
|
||||
// e.g. even though '>0.1,>0.2' is logically equivalent
|
||||
// to '>0.2' it is *NOT* treated as equal.
|
||||
//
|
||||
// Missing operator is treated as equal to '=', whitespaces
|
||||
// are ignored and constraints are sorted before comaparison.
|
||||
func (cs Constraints) Equals(c Constraints) bool {
|
||||
if len(cs) != len(c) {
|
||||
return false
|
||||
}
|
||||
|
||||
// make copies to retain order of the original slices
|
||||
left := make(Constraints, len(cs))
|
||||
copy(left, cs)
|
||||
sort.Stable(left)
|
||||
right := make(Constraints, len(c))
|
||||
copy(right, c)
|
||||
sort.Stable(right)
|
||||
|
||||
// compare sorted slices
|
||||
for i, con := range left {
|
||||
if !con.Equals(right[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (cs Constraints) Len() int {
|
||||
return len(cs)
|
||||
}
|
||||
|
||||
func (cs Constraints) Less(i, j int) bool {
|
||||
if cs[i].op < cs[j].op {
|
||||
return true
|
||||
}
|
||||
if cs[i].op > cs[j].op {
|
||||
return false
|
||||
}
|
||||
|
||||
return cs[i].check.LessThan(cs[j].check)
|
||||
}
|
||||
|
||||
func (cs Constraints) Swap(i, j int) {
|
||||
cs[i], cs[j] = cs[j], cs[i]
|
||||
}
|
||||
|
||||
// Returns the string format of the constraints
|
||||
func (cs Constraints) String() string {
|
||||
csStr := make([]string, len(cs))
|
||||
for i, c := range cs {
|
||||
csStr[i] = c.String()
|
||||
}
|
||||
|
||||
return strings.Join(csStr, ",")
|
||||
}
|
||||
|
||||
// Check tests if a constraint is validated by the given version.
|
||||
func (c *Constraint) Check(v *Version) bool {
|
||||
return c.f(v, c.check)
|
||||
}
|
||||
|
||||
// Prerelease returns true if the version underlying this constraint
|
||||
// contains a prerelease field.
|
||||
func (c *Constraint) Prerelease() bool {
|
||||
return len(c.check.Prerelease()) > 0
|
||||
}
|
||||
|
||||
func (c *Constraint) String() string {
|
||||
return c.original
|
||||
}
|
||||
|
||||
func parseSingle(v string) (*Constraint, error) {
|
||||
matches := constraintRegexp.FindStringSubmatch(v)
|
||||
if matches == nil {
|
||||
return nil, fmt.Errorf("Malformed constraint: %s", v)
|
||||
}
|
||||
|
||||
check, err := NewVersion(matches[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cop := constraintOperators[matches[1]]
|
||||
|
||||
return &Constraint{
|
||||
f: cop.f,
|
||||
op: cop.op,
|
||||
check: check,
|
||||
original: v,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func prereleaseCheck(v, c *Version) bool {
|
||||
switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; {
|
||||
case cPre && vPre:
|
||||
// A constraint with a pre-release can only match a pre-release version
|
||||
// with the same base segments.
|
||||
return v.equalSegments(c)
|
||||
|
||||
case !cPre && vPre:
|
||||
// A constraint without a pre-release can only match a version without a
|
||||
// pre-release.
|
||||
return false
|
||||
|
||||
case cPre && !vPre:
|
||||
// OK, except with the pessimistic operator
|
||||
case !cPre && !vPre:
|
||||
// OK
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Constraint functions
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
type operator rune
|
||||
|
||||
const (
|
||||
equal operator = '='
|
||||
notEqual operator = '≠'
|
||||
greaterThan operator = '>'
|
||||
lessThan operator = '<'
|
||||
greaterThanEqual operator = '≥'
|
||||
lessThanEqual operator = '≤'
|
||||
pessimistic operator = '~'
|
||||
)
|
||||
|
||||
func constraintEqual(v, c *Version) bool {
|
||||
return v.Equal(c)
|
||||
}
|
||||
|
||||
func constraintNotEqual(v, c *Version) bool {
|
||||
return !v.Equal(c)
|
||||
}
|
||||
|
||||
func constraintGreaterThan(v, c *Version) bool {
|
||||
return prereleaseCheck(v, c) && v.Compare(c) == 1
|
||||
}
|
||||
|
||||
func constraintLessThan(v, c *Version) bool {
|
||||
return prereleaseCheck(v, c) && v.Compare(c) == -1
|
||||
}
|
||||
|
||||
func constraintGreaterThanEqual(v, c *Version) bool {
|
||||
return prereleaseCheck(v, c) && v.Compare(c) >= 0
|
||||
}
|
||||
|
||||
func constraintLessThanEqual(v, c *Version) bool {
|
||||
return prereleaseCheck(v, c) && v.Compare(c) <= 0
|
||||
}
|
||||
|
||||
func constraintPessimistic(v, c *Version) bool {
|
||||
// Using a pessimistic constraint with a pre-release, restricts versions to pre-releases
|
||||
if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the version being checked is naturally less than the constraint, then there
|
||||
// is no way for the version to be valid against the constraint
|
||||
if v.LessThan(c) {
|
||||
return false
|
||||
}
|
||||
// We'll use this more than once, so grab the length now so it's a little cleaner
|
||||
// to write the later checks
|
||||
cs := len(c.segments)
|
||||
|
||||
// If the version being checked has less specificity than the constraint, then there
|
||||
// is no way for the version to be valid against the constraint
|
||||
if cs > len(v.segments) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check the segments in the constraint against those in the version. If the version
|
||||
// being checked, at any point, does not have the same values in each index of the
|
||||
// constraints segments, then it cannot be valid against the constraint.
|
||||
for i := 0; i < c.si-1; i++ {
|
||||
if v.segments[i] != c.segments[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Check the last part of the segment in the constraint. If the version segment at
|
||||
// this index is less than the constraints segment at this index, then it cannot
|
||||
// be valid against the constraint
|
||||
if c.segments[cs-1] > v.segments[cs-1] {
|
||||
return false
|
||||
}
|
||||
|
||||
// If nothing has rejected the version by now, it's valid
|
||||
return true
|
||||
}
|
||||
441
vendor/github.com/hashicorp/go-version/version.go
generated
vendored
Normal file
441
vendor/github.com/hashicorp/go-version/version.go
generated
vendored
Normal file
@@ -0,0 +1,441 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package version
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// The compiled regular expression used to test the validity of a version.
|
||||
var (
|
||||
versionRegexp *regexp.Regexp
|
||||
semverRegexp *regexp.Regexp
|
||||
)
|
||||
|
||||
// The raw regular expression string used for testing the validity
|
||||
// of a version.
|
||||
const (
|
||||
VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
|
||||
`(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
|
||||
`(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
|
||||
`?`
|
||||
|
||||
// SemverRegexpRaw requires a separator between version and prerelease
|
||||
SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
|
||||
`(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
|
||||
`(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
|
||||
`?`
|
||||
)
|
||||
|
||||
// Version represents a single version.
|
||||
type Version struct {
|
||||
metadata string
|
||||
pre string
|
||||
segments []int64
|
||||
si int
|
||||
original string
|
||||
}
|
||||
|
||||
func init() {
|
||||
versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$")
|
||||
semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$")
|
||||
}
|
||||
|
||||
// NewVersion parses the given version and returns a new
|
||||
// Version.
|
||||
func NewVersion(v string) (*Version, error) {
|
||||
return newVersion(v, versionRegexp)
|
||||
}
|
||||
|
||||
// NewSemver parses the given version and returns a new
|
||||
// Version that adheres strictly to SemVer specs
|
||||
// https://semver.org/
|
||||
func NewSemver(v string) (*Version, error) {
|
||||
return newVersion(v, semverRegexp)
|
||||
}
|
||||
|
||||
func newVersion(v string, pattern *regexp.Regexp) (*Version, error) {
|
||||
matches := pattern.FindStringSubmatch(v)
|
||||
if matches == nil {
|
||||
return nil, fmt.Errorf("Malformed version: %s", v)
|
||||
}
|
||||
segmentsStr := strings.Split(matches[1], ".")
|
||||
segments := make([]int64, len(segmentsStr))
|
||||
for i, str := range segmentsStr {
|
||||
val, err := strconv.ParseInt(str, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"Error parsing version: %s", err)
|
||||
}
|
||||
|
||||
segments[i] = val
|
||||
}
|
||||
|
||||
// Even though we could support more than three segments, if we
|
||||
// got less than three, pad it with 0s. This is to cover the basic
|
||||
// default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum
|
||||
for i := len(segments); i < 3; i++ {
|
||||
segments = append(segments, 0)
|
||||
}
|
||||
|
||||
pre := matches[7]
|
||||
if pre == "" {
|
||||
pre = matches[4]
|
||||
}
|
||||
|
||||
return &Version{
|
||||
metadata: matches[10],
|
||||
pre: pre,
|
||||
segments: segments,
|
||||
si: len(segmentsStr),
|
||||
original: v,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Must is a helper that wraps a call to a function returning (*Version, error)
|
||||
// and panics if error is non-nil.
|
||||
func Must(v *Version, err error) *Version {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// Compare compares this version to another version. This
|
||||
// returns -1, 0, or 1 if this version is smaller, equal,
|
||||
// or larger than the other version, respectively.
|
||||
//
|
||||
// If you want boolean results, use the LessThan, Equal,
|
||||
// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods.
|
||||
func (v *Version) Compare(other *Version) int {
|
||||
// A quick, efficient equality check
|
||||
if v.String() == other.String() {
|
||||
return 0
|
||||
}
|
||||
|
||||
// If the segments are the same, we must compare on prerelease info
|
||||
if v.equalSegments(other) {
|
||||
preSelf := v.Prerelease()
|
||||
preOther := other.Prerelease()
|
||||
if preSelf == "" && preOther == "" {
|
||||
return 0
|
||||
}
|
||||
if preSelf == "" {
|
||||
return 1
|
||||
}
|
||||
if preOther == "" {
|
||||
return -1
|
||||
}
|
||||
|
||||
return comparePrereleases(preSelf, preOther)
|
||||
}
|
||||
|
||||
segmentsSelf := v.Segments64()
|
||||
segmentsOther := other.Segments64()
|
||||
// Get the highest specificity (hS), or if they're equal, just use segmentSelf length
|
||||
lenSelf := len(segmentsSelf)
|
||||
lenOther := len(segmentsOther)
|
||||
hS := lenSelf
|
||||
if lenSelf < lenOther {
|
||||
hS = lenOther
|
||||
}
|
||||
// Compare the segments
|
||||
// Because a constraint could have more/less specificity than the version it's
|
||||
// checking, we need to account for a lopsided or jagged comparison
|
||||
for i := 0; i < hS; i++ {
|
||||
if i > lenSelf-1 {
|
||||
// This means Self had the lower specificity
|
||||
// Check to see if the remaining segments in Other are all zeros
|
||||
if !allZero(segmentsOther[i:]) {
|
||||
// if not, it means that Other has to be greater than Self
|
||||
return -1
|
||||
}
|
||||
break
|
||||
} else if i > lenOther-1 {
|
||||
// this means Other had the lower specificity
|
||||
// Check to see if the remaining segments in Self are all zeros -
|
||||
if !allZero(segmentsSelf[i:]) {
|
||||
// if not, it means that Self has to be greater than Other
|
||||
return 1
|
||||
}
|
||||
break
|
||||
}
|
||||
lhs := segmentsSelf[i]
|
||||
rhs := segmentsOther[i]
|
||||
if lhs == rhs {
|
||||
continue
|
||||
} else if lhs < rhs {
|
||||
return -1
|
||||
}
|
||||
// Otherwis, rhs was > lhs, they're not equal
|
||||
return 1
|
||||
}
|
||||
|
||||
// if we got this far, they're equal
|
||||
return 0
|
||||
}
|
||||
|
||||
func (v *Version) equalSegments(other *Version) bool {
|
||||
segmentsSelf := v.Segments64()
|
||||
segmentsOther := other.Segments64()
|
||||
|
||||
if len(segmentsSelf) != len(segmentsOther) {
|
||||
return false
|
||||
}
|
||||
for i, v := range segmentsSelf {
|
||||
if v != segmentsOther[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func allZero(segs []int64) bool {
|
||||
for _, s := range segs {
|
||||
if s != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func comparePart(preSelf string, preOther string) int {
|
||||
if preSelf == preOther {
|
||||
return 0
|
||||
}
|
||||
|
||||
var selfInt int64
|
||||
selfNumeric := true
|
||||
selfInt, err := strconv.ParseInt(preSelf, 10, 64)
|
||||
if err != nil {
|
||||
selfNumeric = false
|
||||
}
|
||||
|
||||
var otherInt int64
|
||||
otherNumeric := true
|
||||
otherInt, err = strconv.ParseInt(preOther, 10, 64)
|
||||
if err != nil {
|
||||
otherNumeric = false
|
||||
}
|
||||
|
||||
// if a part is empty, we use the other to decide
|
||||
if preSelf == "" {
|
||||
if otherNumeric {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
if preOther == "" {
|
||||
if selfNumeric {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
if selfNumeric && !otherNumeric {
|
||||
return -1
|
||||
} else if !selfNumeric && otherNumeric {
|
||||
return 1
|
||||
} else if !selfNumeric && !otherNumeric && preSelf > preOther {
|
||||
return 1
|
||||
} else if selfInt > otherInt {
|
||||
return 1
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
func comparePrereleases(v string, other string) int {
|
||||
// the same pre release!
|
||||
if v == other {
|
||||
return 0
|
||||
}
|
||||
|
||||
// split both pre releases for analyse their parts
|
||||
selfPreReleaseMeta := strings.Split(v, ".")
|
||||
otherPreReleaseMeta := strings.Split(other, ".")
|
||||
|
||||
selfPreReleaseLen := len(selfPreReleaseMeta)
|
||||
otherPreReleaseLen := len(otherPreReleaseMeta)
|
||||
|
||||
biggestLen := otherPreReleaseLen
|
||||
if selfPreReleaseLen > otherPreReleaseLen {
|
||||
biggestLen = selfPreReleaseLen
|
||||
}
|
||||
|
||||
// loop for parts to find the first difference
|
||||
for i := 0; i < biggestLen; i = i + 1 {
|
||||
partSelfPre := ""
|
||||
if i < selfPreReleaseLen {
|
||||
partSelfPre = selfPreReleaseMeta[i]
|
||||
}
|
||||
|
||||
partOtherPre := ""
|
||||
if i < otherPreReleaseLen {
|
||||
partOtherPre = otherPreReleaseMeta[i]
|
||||
}
|
||||
|
||||
compare := comparePart(partSelfPre, partOtherPre)
|
||||
// if parts are equals, continue the loop
|
||||
if compare != 0 {
|
||||
return compare
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// Core returns a new version constructed from only the MAJOR.MINOR.PATCH
|
||||
// segments of the version, without prerelease or metadata.
|
||||
func (v *Version) Core() *Version {
|
||||
segments := v.Segments64()
|
||||
segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2])
|
||||
return Must(NewVersion(segmentsOnly))
|
||||
}
|
||||
|
||||
// Equal tests if two versions are equal.
|
||||
func (v *Version) Equal(o *Version) bool {
|
||||
if v == nil || o == nil {
|
||||
return v == o
|
||||
}
|
||||
|
||||
return v.Compare(o) == 0
|
||||
}
|
||||
|
||||
// GreaterThan tests if this version is greater than another version.
|
||||
func (v *Version) GreaterThan(o *Version) bool {
|
||||
return v.Compare(o) > 0
|
||||
}
|
||||
|
||||
// GreaterThanOrEqual tests if this version is greater than or equal to another version.
|
||||
func (v *Version) GreaterThanOrEqual(o *Version) bool {
|
||||
return v.Compare(o) >= 0
|
||||
}
|
||||
|
||||
// LessThan tests if this version is less than another version.
|
||||
func (v *Version) LessThan(o *Version) bool {
|
||||
return v.Compare(o) < 0
|
||||
}
|
||||
|
||||
// LessThanOrEqual tests if this version is less than or equal to another version.
|
||||
func (v *Version) LessThanOrEqual(o *Version) bool {
|
||||
return v.Compare(o) <= 0
|
||||
}
|
||||
|
||||
// Metadata returns any metadata that was part of the version
|
||||
// string.
|
||||
//
|
||||
// Metadata is anything that comes after the "+" in the version.
|
||||
// For example, with "1.2.3+beta", the metadata is "beta".
|
||||
func (v *Version) Metadata() string {
|
||||
return v.metadata
|
||||
}
|
||||
|
||||
// Prerelease returns any prerelease data that is part of the version,
|
||||
// or blank if there is no prerelease data.
|
||||
//
|
||||
// Prerelease information is anything that comes after the "-" in the
|
||||
// version (but before any metadata). For example, with "1.2.3-beta",
|
||||
// the prerelease information is "beta".
|
||||
func (v *Version) Prerelease() string {
|
||||
return v.pre
|
||||
}
|
||||
|
||||
// Segments returns the numeric segments of the version as a slice of ints.
|
||||
//
|
||||
// This excludes any metadata or pre-release information. For example,
|
||||
// for a version "1.2.3-beta", segments will return a slice of
|
||||
// 1, 2, 3.
|
||||
func (v *Version) Segments() []int {
|
||||
segmentSlice := make([]int, len(v.segments))
|
||||
for i, v := range v.segments {
|
||||
segmentSlice[i] = int(v)
|
||||
}
|
||||
return segmentSlice
|
||||
}
|
||||
|
||||
// Segments64 returns the numeric segments of the version as a slice of int64s.
|
||||
//
|
||||
// This excludes any metadata or pre-release information. For example,
|
||||
// for a version "1.2.3-beta", segments will return a slice of
|
||||
// 1, 2, 3.
|
||||
func (v *Version) Segments64() []int64 {
|
||||
result := make([]int64, len(v.segments))
|
||||
copy(result, v.segments)
|
||||
return result
|
||||
}
|
||||
|
||||
// String returns the full version string included pre-release
|
||||
// and metadata information.
|
||||
//
|
||||
// This value is rebuilt according to the parsed segments and other
|
||||
// information. Therefore, ambiguities in the version string such as
|
||||
// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and
|
||||
// missing parts (1.0 => 1.0.0) will be made into a canonicalized form
|
||||
// as shown in the parenthesized examples.
|
||||
func (v *Version) String() string {
|
||||
var buf bytes.Buffer
|
||||
fmtParts := make([]string, len(v.segments))
|
||||
for i, s := range v.segments {
|
||||
// We can ignore err here since we've pre-parsed the values in segments
|
||||
str := strconv.FormatInt(s, 10)
|
||||
fmtParts[i] = str
|
||||
}
|
||||
fmt.Fprintf(&buf, strings.Join(fmtParts, "."))
|
||||
if v.pre != "" {
|
||||
fmt.Fprintf(&buf, "-%s", v.pre)
|
||||
}
|
||||
if v.metadata != "" {
|
||||
fmt.Fprintf(&buf, "+%s", v.metadata)
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Original returns the original parsed version as-is, including any
|
||||
// potential whitespace, `v` prefix, etc.
|
||||
func (v *Version) Original() string {
|
||||
return v.original
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler interface.
|
||||
func (v *Version) UnmarshalText(b []byte) error {
|
||||
temp, err := NewVersion(string(b))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*v = *temp
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler interface.
|
||||
func (v *Version) MarshalText() ([]byte, error) {
|
||||
return []byte(v.String()), nil
|
||||
}
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (v *Version) Scan(src interface{}) error {
|
||||
switch src := src.(type) {
|
||||
case string:
|
||||
return v.UnmarshalText([]byte(src))
|
||||
case nil:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("cannot scan %T as Version", src)
|
||||
}
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface.
|
||||
func (v *Version) Value() (driver.Value, error) {
|
||||
return v.String(), nil
|
||||
}
|
||||
20
vendor/github.com/hashicorp/go-version/version_collection.go
generated
vendored
Normal file
20
vendor/github.com/hashicorp/go-version/version_collection.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package version
|
||||
|
||||
// Collection is a type that implements the sort.Interface interface
|
||||
// so that versions can be sorted.
|
||||
type Collection []*Version
|
||||
|
||||
func (v Collection) Len() int {
|
||||
return len(v)
|
||||
}
|
||||
|
||||
func (v Collection) Less(i, j int) bool {
|
||||
return v[i].LessThan(v[j])
|
||||
}
|
||||
|
||||
func (v Collection) Swap(i, j int) {
|
||||
v[i], v[j] = v[j], v[i]
|
||||
}
|
||||
@@ -1,111 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package staleness // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness"
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"time"
|
||||
|
||||
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity"
|
||||
)
|
||||
|
||||
// PriorityQueue represents a way to store entries sorted by their priority.
|
||||
// Pop() will return the oldest entry of the set.
|
||||
type PriorityQueue interface {
|
||||
// Update will add or update an entry, and reshuffle the queue internally as needed to keep it sorted
|
||||
Update(id identity.Stream, newPrio time.Time)
|
||||
// Peek will return the entry at the HEAD of the queue *without* removing it from the queue
|
||||
Peek() (identity.Stream, time.Time)
|
||||
// Pop will remove the entry at the HEAD of the queue and return it
|
||||
Pop() (identity.Stream, time.Time)
|
||||
// Len will return the number of entries in the queue
|
||||
Len() int
|
||||
}
|
||||
|
||||
// heapQueue implements heap.Interface.
|
||||
// We use it as the inner implementation of a heap-based sorted queue
|
||||
type heapQueue []*queueItem
|
||||
|
||||
type queueItem struct {
|
||||
key identity.Stream
|
||||
prio time.Time
|
||||
index int
|
||||
}
|
||||
|
||||
func (pq heapQueue) Len() int { return len(pq) }
|
||||
|
||||
func (pq heapQueue) Less(i, j int) bool {
|
||||
// We want Pop to give us the lowest priority
|
||||
return pq[i].prio.Before(pq[j].prio)
|
||||
}
|
||||
|
||||
func (pq heapQueue) Swap(i, j int) {
|
||||
pq[i], pq[j] = pq[j], pq[i]
|
||||
pq[i].index = i
|
||||
pq[j].index = j
|
||||
}
|
||||
|
||||
func (pq *heapQueue) Push(x any) {
|
||||
n := len(*pq)
|
||||
item := x.(*queueItem)
|
||||
item.index = n
|
||||
*pq = append(*pq, item)
|
||||
}
|
||||
|
||||
func (pq *heapQueue) Pop() any {
|
||||
old := *pq
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil // avoid memory leak
|
||||
item.index = -1 // for safety
|
||||
*pq = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
type heapPriorityQueue struct {
|
||||
inner heapQueue
|
||||
itemLookup map[identity.Stream]*queueItem
|
||||
}
|
||||
|
||||
func NewPriorityQueue() PriorityQueue {
|
||||
pq := &heapPriorityQueue{
|
||||
inner: heapQueue{},
|
||||
itemLookup: map[identity.Stream]*queueItem{},
|
||||
}
|
||||
heap.Init(&pq.inner)
|
||||
|
||||
return pq
|
||||
}
|
||||
|
||||
func (pq *heapPriorityQueue) Update(id identity.Stream, newPrio time.Time) {
|
||||
// Check if the entry already exists in the queue
|
||||
item, ok := pq.itemLookup[id]
|
||||
if ok {
|
||||
// If so, we can update it in place
|
||||
item.prio = newPrio
|
||||
heap.Fix(&pq.inner, item.index)
|
||||
} else {
|
||||
item = &queueItem{
|
||||
key: id,
|
||||
prio: newPrio,
|
||||
}
|
||||
heap.Push(&pq.inner, item)
|
||||
pq.itemLookup[id] = item
|
||||
}
|
||||
}
|
||||
|
||||
func (pq *heapPriorityQueue) Peek() (identity.Stream, time.Time) {
|
||||
val := pq.inner[0]
|
||||
return val.key, val.prio
|
||||
}
|
||||
|
||||
func (pq *heapPriorityQueue) Pop() (identity.Stream, time.Time) {
|
||||
val := heap.Pop(&pq.inner).(*queueItem)
|
||||
delete(pq.itemLookup, val.key)
|
||||
return val.key, val.prio
|
||||
}
|
||||
|
||||
func (pq *heapPriorityQueue) Len() int {
|
||||
return pq.inner.Len()
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package staleness // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity"
|
||||
)
|
||||
|
||||
type Tracker struct {
|
||||
pq PriorityQueue
|
||||
}
|
||||
|
||||
func NewTracker() Tracker {
|
||||
return Tracker{pq: NewPriorityQueue()}
|
||||
}
|
||||
|
||||
func (tr Tracker) Refresh(ts time.Time, ids ...identity.Stream) {
|
||||
for _, id := range ids {
|
||||
tr.pq.Update(id, ts)
|
||||
}
|
||||
}
|
||||
|
||||
func (tr Tracker) Collect(maxDuration time.Duration) []identity.Stream {
|
||||
now := time.Now()
|
||||
|
||||
var ids []identity.Stream
|
||||
for tr.pq.Len() > 0 {
|
||||
_, ts := tr.pq.Peek()
|
||||
if now.Sub(ts) < maxDuration {
|
||||
break
|
||||
}
|
||||
id, _ := tr.pq.Pop()
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
||||
return ids
|
||||
}
|
||||
@@ -6,15 +6,7 @@
|
||||
|
||||
The following telemetry is emitted by this component.
|
||||
|
||||
### otelcol_deltatocumulative.datapoints.dropped
|
||||
|
||||
number of datapoints dropped due to given 'reason'
|
||||
|
||||
| Unit | Metric Type | Value Type | Monotonic |
|
||||
| ---- | ----------- | ---------- | --------- |
|
||||
| {datapoint} | Sum | Int | true |
|
||||
|
||||
### otelcol_deltatocumulative.datapoints.linear
|
||||
### otelcol_deltatocumulative_datapoints
|
||||
|
||||
total number of datapoints processed. may have 'error' attribute, if processing failed
|
||||
|
||||
@@ -22,31 +14,7 @@ total number of datapoints processed. may have 'error' attribute, if processing
|
||||
| ---- | ----------- | ---------- | --------- |
|
||||
| {datapoint} | Sum | Int | true |
|
||||
|
||||
### otelcol_deltatocumulative.datapoints.processed
|
||||
|
||||
number of datapoints processed
|
||||
|
||||
| Unit | Metric Type | Value Type | Monotonic |
|
||||
| ---- | ----------- | ---------- | --------- |
|
||||
| {datapoint} | Sum | Int | true |
|
||||
|
||||
### otelcol_deltatocumulative.gaps.length
|
||||
|
||||
total duration where data was expected but not received
|
||||
|
||||
| Unit | Metric Type | Value Type | Monotonic |
|
||||
| ---- | ----------- | ---------- | --------- |
|
||||
| s | Sum | Int | true |
|
||||
|
||||
### otelcol_deltatocumulative.streams.evicted
|
||||
|
||||
number of streams evicted
|
||||
|
||||
| Unit | Metric Type | Value Type | Monotonic |
|
||||
| ---- | ----------- | ---------- | --------- |
|
||||
| {stream} | Sum | Int | true |
|
||||
|
||||
### otelcol_deltatocumulative.streams.limit
|
||||
### otelcol_deltatocumulative_streams_limit
|
||||
|
||||
upper limit of tracked streams
|
||||
|
||||
@@ -54,7 +22,7 @@ upper limit of tracked streams
|
||||
| ---- | ----------- | ---------- |
|
||||
| {stream} | Gauge | Int |
|
||||
|
||||
### otelcol_deltatocumulative.streams.max_stale
|
||||
### otelcol_deltatocumulative_streams_max_stale
|
||||
|
||||
duration after which streams inactive streams are dropped
|
||||
|
||||
@@ -62,15 +30,7 @@ duration after which streams inactive streams are dropped
|
||||
| ---- | ----------- | ---------- |
|
||||
| s | Gauge | Int |
|
||||
|
||||
### otelcol_deltatocumulative.streams.tracked
|
||||
|
||||
number of streams tracked
|
||||
|
||||
| Unit | Metric Type | Value Type | Monotonic |
|
||||
| ---- | ----------- | ---------- | --------- |
|
||||
| {dps} | Sum | Int | false |
|
||||
|
||||
### otelcol_deltatocumulative.streams.tracked.linear
|
||||
### otelcol_deltatocumulative_streams_tracked
|
||||
|
||||
number of streams tracked
|
||||
|
||||
|
||||
@@ -45,6 +45,10 @@ type Aggregator struct {
|
||||
|
||||
func Aggregate[T Type[T]](state, dp T, aggregate func(state, dp T) error) error {
|
||||
switch {
|
||||
case state.Timestamp() == 0:
|
||||
// first sample of series, no state to aggregate with
|
||||
dp.CopyTo(state)
|
||||
return nil
|
||||
case dp.StartTimestamp() < state.StartTimestamp():
|
||||
// belongs to older series
|
||||
return ErrOlderStart{Start: state.StartTimestamp(), Sample: dp.StartTimestamp()}
|
||||
|
||||
116
vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps/map.go
generated
vendored
Normal file
116
vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps/map.go
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package maps // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
func Limit(limit int64) Context {
|
||||
return Context{limit: limit, guard: new(atomic.Int64), total: new(atomic.Int64)}
|
||||
}
|
||||
|
||||
func New[K comparable, V any](ctx Context) *Parallel[K, V] {
|
||||
return &Parallel[K, V]{ctx: ctx, elems: *xsync.NewMapOf[K, V]()}
|
||||
}
|
||||
|
||||
// lowercase alias for unexported embedding
|
||||
type ctx = Context
|
||||
|
||||
// Parallel is a lock-free map-like structure. It can be safely used by multiple
|
||||
// routines concurrently.
|
||||
//
|
||||
// Due to the lock-free nature, typical get, put, delete operations are not
|
||||
// available. Instead, [Parallel.LoadOrStore] returns an existing value or
|
||||
// inserts a new one if missing. As such, values themselves should be mutable by
|
||||
// being reference types (pointers or pmetric.* types).
|
||||
//
|
||||
// Parallel enforces the [Context] size limit.
|
||||
type Parallel[K comparable, V any] struct {
|
||||
ctx
|
||||
elems xsync.MapOf[K, V]
|
||||
}
|
||||
|
||||
// Context holds size information about one or more maps.
|
||||
// Can be shared across maps for a common limit.
|
||||
type Context struct {
|
||||
limit int64
|
||||
guard *atomic.Int64
|
||||
total *atomic.Int64
|
||||
}
|
||||
|
||||
func (ctx Context) String() string {
|
||||
return fmt.Sprintf("(%d, %d)", ctx.guard.Load(), ctx.total.Load())
|
||||
}
|
||||
|
||||
// LoadOrStore loads existing values from the map or creates missing ones initialized to <def>.
|
||||
//
|
||||
// Return Value:
|
||||
// - <value>, true: m[k] already existed and was loaded
|
||||
// - <def>, false: m[k] was created and initialized to <def>
|
||||
// - <zero>, false: m[k] did not exist but was not created due to size limit
|
||||
func (m *Parallel[K, V]) LoadOrStore(k K, def V) (_ V, loaded bool) {
|
||||
// multiple routines may attempt to LoadOrStore the same value at once. as
|
||||
// such, we cannot use data-dependent instructions such as if(not exist)
|
||||
// {...}, because the <not exist> may have changed right after we checked
|
||||
// it.
|
||||
|
||||
v, ok := m.elems.Load(k)
|
||||
if ok {
|
||||
return v, true
|
||||
}
|
||||
|
||||
// as long as there appears to be actual space, try to store
|
||||
for m.total.Load() < m.limit {
|
||||
// multiple routines may do this. to enforce the limit, try to claim a
|
||||
// "slot" below the limit
|
||||
slot := m.guard.Add(1)
|
||||
if slot > m.limit {
|
||||
// slot we got is above the limit. either the map is now full (loop
|
||||
// will exit) or routines that won't actually store hold slots, in
|
||||
// which case we will try again.
|
||||
m.guard.Add(-1)
|
||||
continue
|
||||
}
|
||||
|
||||
// we got a valid slot. others may too. as such, we try to store, but
|
||||
// may end up loading instead if another routine stored just before us.
|
||||
v, loaded = m.elems.LoadOrStore(k, def)
|
||||
if loaded {
|
||||
// another routine stored, but we got a value. give up slot
|
||||
m.guard.Add(-1)
|
||||
} else {
|
||||
// we stored. increase the total size
|
||||
m.total.Add(1)
|
||||
}
|
||||
return v, loaded
|
||||
}
|
||||
|
||||
// we didn't store, because we hit the limit. attempt another load, just in
|
||||
// case another routine stored by now.
|
||||
return m.elems.Load(k)
|
||||
}
|
||||
|
||||
// LoadAndDelete deletes m[k], returning the value it had if it existed
|
||||
func (m *Parallel[K, V]) LoadAndDelete(k K) (_ V, loaded bool) {
|
||||
v, loaded := m.elems.LoadAndDelete(k)
|
||||
if loaded {
|
||||
// m[k] did exist. decrease size and open up a slot
|
||||
m.total.Add(-1)
|
||||
m.guard.Add(-1)
|
||||
}
|
||||
return v, loaded
|
||||
}
|
||||
|
||||
func (ctx Context) Size() int64 {
|
||||
return ctx.total.Load()
|
||||
}
|
||||
|
||||
// Exceeded reports whether a [Limited.LoadOrStore] failed due to the limit being exceeded.
|
||||
func Exceeded[T comparable](v T, loaded bool) bool {
|
||||
return !loaded && v == *new(T)
|
||||
}
|
||||
@@ -25,18 +25,13 @@ func Tracer(settings component.TelemetrySettings) trace.Tracer {
|
||||
// TelemetryBuilder provides an interface for components to report telemetry
|
||||
// as defined in metadata and user config.
|
||||
type TelemetryBuilder struct {
|
||||
meter metric.Meter
|
||||
mu sync.Mutex
|
||||
registrations []metric.Registration
|
||||
DeltatocumulativeDatapointsDropped metric.Int64Counter
|
||||
DeltatocumulativeDatapointsLinear metric.Int64Counter
|
||||
DeltatocumulativeDatapointsProcessed metric.Int64Counter
|
||||
DeltatocumulativeGapsLength metric.Int64Counter
|
||||
DeltatocumulativeStreamsEvicted metric.Int64Counter
|
||||
DeltatocumulativeStreamsLimit metric.Int64Gauge
|
||||
DeltatocumulativeStreamsMaxStale metric.Int64Gauge
|
||||
DeltatocumulativeStreamsTracked metric.Int64UpDownCounter
|
||||
DeltatocumulativeStreamsTrackedLinear metric.Int64ObservableUpDownCounter
|
||||
meter metric.Meter
|
||||
mu sync.Mutex
|
||||
registrations []metric.Registration
|
||||
DeltatocumulativeDatapoints metric.Int64Counter
|
||||
DeltatocumulativeStreamsLimit metric.Int64Gauge
|
||||
DeltatocumulativeStreamsMaxStale metric.Int64Gauge
|
||||
DeltatocumulativeStreamsTracked metric.Int64ObservableUpDownCounter
|
||||
}
|
||||
|
||||
// TelemetryBuilderOption applies changes to default builder.
|
||||
@@ -50,12 +45,12 @@ func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) {
|
||||
tbof(mb)
|
||||
}
|
||||
|
||||
// RegisterDeltatocumulativeStreamsTrackedLinearCallback sets callback for observable DeltatocumulativeStreamsTrackedLinear metric.
|
||||
func (builder *TelemetryBuilder) RegisterDeltatocumulativeStreamsTrackedLinearCallback(cb metric.Int64Callback) error {
|
||||
// RegisterDeltatocumulativeStreamsTrackedCallback sets callback for observable DeltatocumulativeStreamsTracked metric.
|
||||
func (builder *TelemetryBuilder) RegisterDeltatocumulativeStreamsTrackedCallback(cb metric.Int64Callback) error {
|
||||
reg, err := builder.meter.RegisterCallback(func(ctx context.Context, o metric.Observer) error {
|
||||
cb(ctx, &observerInt64{inst: builder.DeltatocumulativeStreamsTrackedLinear, obs: o})
|
||||
cb(ctx, &observerInt64{inst: builder.DeltatocumulativeStreamsTracked, obs: o})
|
||||
return nil
|
||||
}, builder.DeltatocumulativeStreamsTrackedLinear)
|
||||
}, builder.DeltatocumulativeStreamsTracked)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -93,56 +88,26 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...Teleme
|
||||
}
|
||||
builder.meter = Meter(settings)
|
||||
var err, errs error
|
||||
builder.DeltatocumulativeDatapointsDropped, err = builder.meter.Int64Counter(
|
||||
"otelcol_deltatocumulative.datapoints.dropped",
|
||||
metric.WithDescription("number of datapoints dropped due to given 'reason'"),
|
||||
metric.WithUnit("{datapoint}"),
|
||||
)
|
||||
errs = errors.Join(errs, err)
|
||||
builder.DeltatocumulativeDatapointsLinear, err = builder.meter.Int64Counter(
|
||||
"otelcol_deltatocumulative.datapoints.linear",
|
||||
builder.DeltatocumulativeDatapoints, err = builder.meter.Int64Counter(
|
||||
"otelcol_deltatocumulative_datapoints",
|
||||
metric.WithDescription("total number of datapoints processed. may have 'error' attribute, if processing failed"),
|
||||
metric.WithUnit("{datapoint}"),
|
||||
)
|
||||
errs = errors.Join(errs, err)
|
||||
builder.DeltatocumulativeDatapointsProcessed, err = builder.meter.Int64Counter(
|
||||
"otelcol_deltatocumulative.datapoints.processed",
|
||||
metric.WithDescription("number of datapoints processed"),
|
||||
metric.WithUnit("{datapoint}"),
|
||||
)
|
||||
errs = errors.Join(errs, err)
|
||||
builder.DeltatocumulativeGapsLength, err = builder.meter.Int64Counter(
|
||||
"otelcol_deltatocumulative.gaps.length",
|
||||
metric.WithDescription("total duration where data was expected but not received"),
|
||||
metric.WithUnit("s"),
|
||||
)
|
||||
errs = errors.Join(errs, err)
|
||||
builder.DeltatocumulativeStreamsEvicted, err = builder.meter.Int64Counter(
|
||||
"otelcol_deltatocumulative.streams.evicted",
|
||||
metric.WithDescription("number of streams evicted"),
|
||||
metric.WithUnit("{stream}"),
|
||||
)
|
||||
errs = errors.Join(errs, err)
|
||||
builder.DeltatocumulativeStreamsLimit, err = builder.meter.Int64Gauge(
|
||||
"otelcol_deltatocumulative.streams.limit",
|
||||
"otelcol_deltatocumulative_streams_limit",
|
||||
metric.WithDescription("upper limit of tracked streams"),
|
||||
metric.WithUnit("{stream}"),
|
||||
)
|
||||
errs = errors.Join(errs, err)
|
||||
builder.DeltatocumulativeStreamsMaxStale, err = builder.meter.Int64Gauge(
|
||||
"otelcol_deltatocumulative.streams.max_stale",
|
||||
"otelcol_deltatocumulative_streams_max_stale",
|
||||
metric.WithDescription("duration after which streams inactive streams are dropped"),
|
||||
metric.WithUnit("s"),
|
||||
)
|
||||
errs = errors.Join(errs, err)
|
||||
builder.DeltatocumulativeStreamsTracked, err = builder.meter.Int64UpDownCounter(
|
||||
"otelcol_deltatocumulative.streams.tracked",
|
||||
metric.WithDescription("number of streams tracked"),
|
||||
metric.WithUnit("{dps}"),
|
||||
)
|
||||
errs = errors.Join(errs, err)
|
||||
builder.DeltatocumulativeStreamsTrackedLinear, err = builder.meter.Int64ObservableUpDownCounter(
|
||||
"otelcol_deltatocumulative.streams.tracked.linear",
|
||||
builder.DeltatocumulativeStreamsTracked, err = builder.meter.Int64ObservableUpDownCounter(
|
||||
"otelcol_deltatocumulative_streams_tracked",
|
||||
metric.WithDescription("number of streams tracked"),
|
||||
metric.WithUnit("{dps}"),
|
||||
)
|
||||
|
||||
@@ -25,7 +25,7 @@ func New(set component.TelemetrySettings) (Metrics, error) {
|
||||
if err != nil {
|
||||
return Metrics{}, err
|
||||
}
|
||||
err = telb.RegisterDeltatocumulativeStreamsTrackedLinearCallback(func(_ context.Context, observer metric.Int64Observer) error {
|
||||
err = telb.RegisterDeltatocumulativeStreamsTrackedCallback(func(_ context.Context, observer metric.Int64Observer) error {
|
||||
observer.Observe(int64((*m.tracked)()))
|
||||
return nil
|
||||
})
|
||||
@@ -44,7 +44,7 @@ type Metrics struct {
|
||||
}
|
||||
|
||||
func (m *Metrics) Datapoints() Counter {
|
||||
return Counter{Int64Counter: m.DeltatocumulativeDatapointsLinear}
|
||||
return Counter{Int64Counter: m.DeltatocumulativeDatapoints}
|
||||
}
|
||||
|
||||
func (m *Metrics) WithTracked(streams func() int) {
|
||||
|
||||
@@ -12,14 +12,7 @@ status:
|
||||
telemetry:
|
||||
metrics:
|
||||
# streams
|
||||
deltatocumulative.streams.tracked:
|
||||
description: number of streams tracked
|
||||
unit: "{dps}"
|
||||
sum:
|
||||
value_type: int
|
||||
monotonic: false
|
||||
enabled: true
|
||||
deltatocumulative.streams.tracked.linear:
|
||||
deltatocumulative_streams_tracked:
|
||||
description: number of streams tracked
|
||||
unit: "{dps}"
|
||||
sum:
|
||||
@@ -27,52 +20,23 @@ telemetry:
|
||||
monotonic: false
|
||||
async: true
|
||||
enabled: true
|
||||
deltatocumulative.streams.limit:
|
||||
deltatocumulative_streams_limit:
|
||||
description: upper limit of tracked streams
|
||||
unit: "{stream}"
|
||||
gauge:
|
||||
value_type: int
|
||||
enabled: true
|
||||
deltatocumulative.streams.evicted:
|
||||
description: number of streams evicted
|
||||
unit: "{stream}"
|
||||
sum:
|
||||
value_type: int
|
||||
monotonic: true
|
||||
enabled: true
|
||||
deltatocumulative.streams.max_stale:
|
||||
deltatocumulative_streams_max_stale:
|
||||
description: duration after which streams inactive streams are dropped
|
||||
unit: "s"
|
||||
gauge:
|
||||
value_type: int
|
||||
enabled: true
|
||||
# datapoints
|
||||
deltatocumulative.datapoints.processed:
|
||||
description: number of datapoints processed
|
||||
unit: "{datapoint}"
|
||||
sum:
|
||||
value_type: int
|
||||
monotonic: true
|
||||
enabled: true
|
||||
deltatocumulative.datapoints.dropped:
|
||||
description: number of datapoints dropped due to given 'reason'
|
||||
unit: "{datapoint}"
|
||||
sum:
|
||||
value_type: int
|
||||
monotonic: true
|
||||
enabled: true
|
||||
|
||||
deltatocumulative.datapoints.linear:
|
||||
deltatocumulative_datapoints:
|
||||
description: total number of datapoints processed. may have 'error' attribute, if processing failed
|
||||
unit: "{datapoint}"
|
||||
sum:
|
||||
value_type: int
|
||||
monotonic: true
|
||||
enabled: true
|
||||
deltatocumulative.gaps.length:
|
||||
description: total duration where data was expected but not received
|
||||
unit: "s"
|
||||
sum:
|
||||
value_type: int
|
||||
monotonic: true
|
||||
enabled: true
|
||||
|
||||
@@ -8,15 +8,16 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"go.opentelemetry.io/collector/component"
|
||||
"go.opentelemetry.io/collector/consumer"
|
||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||
"go.opentelemetry.io/collector/processor"
|
||||
|
||||
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity"
|
||||
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness"
|
||||
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data"
|
||||
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta"
|
||||
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps"
|
||||
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics"
|
||||
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry"
|
||||
)
|
||||
@@ -29,44 +30,48 @@ type Processor struct {
|
||||
|
||||
last state
|
||||
aggr data.Aggregator
|
||||
mtx sync.Mutex
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
stale staleness.Tracker
|
||||
stale *xsync.MapOf[identity.Stream, time.Time]
|
||||
tel telemetry.Metrics
|
||||
}
|
||||
|
||||
func newProcessor(cfg *Config, tel telemetry.Metrics, next consumer.Metrics) *Processor {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
limit := maps.Limit(int64(cfg.MaxStreams))
|
||||
proc := Processor{
|
||||
next: next,
|
||||
cfg: *cfg,
|
||||
last: state{
|
||||
nums: make(map[identity.Stream]pmetric.NumberDataPoint),
|
||||
hist: make(map[identity.Stream]pmetric.HistogramDataPoint),
|
||||
expo: make(map[identity.Stream]pmetric.ExponentialHistogramDataPoint),
|
||||
ctx: limit,
|
||||
nums: maps.New[identity.Stream, *mutex[pmetric.NumberDataPoint]](limit),
|
||||
hist: maps.New[identity.Stream, *mutex[pmetric.HistogramDataPoint]](limit),
|
||||
expo: maps.New[identity.Stream, *mutex[pmetric.ExponentialHistogramDataPoint]](limit),
|
||||
},
|
||||
aggr: delta.Aggregator{Aggregator: new(data.Adder)},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
|
||||
stale: staleness.NewTracker(),
|
||||
stale: xsync.NewMapOf[identity.Stream, time.Time](),
|
||||
tel: tel,
|
||||
}
|
||||
|
||||
tel.WithTracked(proc.last.Len)
|
||||
tel.WithTracked(proc.last.Size)
|
||||
cfg.Metrics(tel)
|
||||
|
||||
return &proc
|
||||
}
|
||||
|
||||
func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
|
||||
p.mtx.Lock()
|
||||
defer p.mtx.Unlock()
|
||||
type vals struct {
|
||||
nums *mutex[pmetric.NumberDataPoint]
|
||||
hist *mutex[pmetric.HistogramDataPoint]
|
||||
expo *mutex[pmetric.ExponentialHistogramDataPoint]
|
||||
}
|
||||
|
||||
func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
|
||||
now := time.Now()
|
||||
|
||||
const (
|
||||
@@ -74,6 +79,12 @@ func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) erro
|
||||
drop = false
|
||||
)
|
||||
|
||||
zero := vals{
|
||||
nums: guard(pmetric.NewNumberDataPoint()),
|
||||
hist: guard(pmetric.NewHistogramDataPoint()),
|
||||
expo: guard(pmetric.NewExponentialHistogramDataPoint()),
|
||||
}
|
||||
|
||||
metrics.Filter(md, func(m metrics.Metric) bool {
|
||||
if m.AggregationTemporality() != pmetric.AggregationTemporalityDelta {
|
||||
return keep
|
||||
@@ -88,41 +99,70 @@ func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) erro
|
||||
var attrs telemetry.Attributes
|
||||
defer func() { p.tel.Datapoints().Inc(ctx, attrs...) }()
|
||||
|
||||
// if stream new and state capacity reached, reject
|
||||
exist := p.last.Has(id)
|
||||
if !exist && p.last.Len() >= p.cfg.MaxStreams {
|
||||
attrs.Set(telemetry.Error("limit"))
|
||||
return drop
|
||||
}
|
||||
|
||||
// stream is ok and active, update stale tracker
|
||||
p.stale.Refresh(now, id)
|
||||
|
||||
// this is the first sample of the stream. there is nothing to
|
||||
// aggregate with, so clone this value into the state and done
|
||||
if !exist {
|
||||
p.last.BeginWith(id, dp)
|
||||
return keep
|
||||
}
|
||||
|
||||
// aggregate with state from previous requests.
|
||||
// delta.AccumulateInto(state, dp) stores result in `state`.
|
||||
// this is then copied into `dp` (the value passed onto the pipeline)
|
||||
var err error
|
||||
switch dp := dp.(type) {
|
||||
case pmetric.NumberDataPoint:
|
||||
state := p.last.nums[id]
|
||||
err = p.aggr.Numbers(state, dp)
|
||||
state.CopyTo(dp)
|
||||
last, loaded := p.last.nums.LoadOrStore(id, zero.nums)
|
||||
if maps.Exceeded(last, loaded) {
|
||||
// state is full, reject stream
|
||||
attrs.Set(telemetry.Error("limit"))
|
||||
return drop
|
||||
}
|
||||
|
||||
// stream is ok and active, update stale tracker
|
||||
p.stale.Store(id, now)
|
||||
|
||||
if !loaded {
|
||||
// cached zero was stored, alloc new one
|
||||
zero.nums = guard(pmetric.NewNumberDataPoint())
|
||||
}
|
||||
|
||||
last.use(func(last pmetric.NumberDataPoint) {
|
||||
err = p.aggr.Numbers(last, dp)
|
||||
last.CopyTo(dp)
|
||||
})
|
||||
case pmetric.HistogramDataPoint:
|
||||
state := p.last.hist[id]
|
||||
err = p.aggr.Histograms(state, dp)
|
||||
state.CopyTo(dp)
|
||||
last, loaded := p.last.hist.LoadOrStore(id, zero.hist)
|
||||
if maps.Exceeded(last, loaded) {
|
||||
// state is full, reject stream
|
||||
attrs.Set(telemetry.Error("limit"))
|
||||
return drop
|
||||
}
|
||||
|
||||
// stream is ok and active, update stale tracker
|
||||
p.stale.Store(id, now)
|
||||
|
||||
if !loaded {
|
||||
// cached zero was stored, alloc new one
|
||||
zero.hist = guard(pmetric.NewHistogramDataPoint())
|
||||
}
|
||||
|
||||
last.use(func(last pmetric.HistogramDataPoint) {
|
||||
err = p.aggr.Histograms(last, dp)
|
||||
last.CopyTo(dp)
|
||||
})
|
||||
case pmetric.ExponentialHistogramDataPoint:
|
||||
state := p.last.expo[id]
|
||||
err = p.aggr.Exponential(state, dp)
|
||||
state.CopyTo(dp)
|
||||
last, loaded := p.last.expo.LoadOrStore(id, zero.expo)
|
||||
if maps.Exceeded(last, loaded) {
|
||||
// state is full, reject stream
|
||||
attrs.Set(telemetry.Error("limit"))
|
||||
return drop
|
||||
}
|
||||
|
||||
// stream is ok and active, update stale tracker
|
||||
p.stale.Store(id, now)
|
||||
|
||||
if !loaded {
|
||||
// cached zero was stored, alloc new one
|
||||
zero.expo = guard(pmetric.NewExponentialHistogramDataPoint())
|
||||
}
|
||||
|
||||
last.use(func(last pmetric.ExponentialHistogramDataPoint) {
|
||||
err = p.aggr.Exponential(last, dp)
|
||||
last.CopyTo(dp)
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
attrs.Set(telemetry.Cause(err))
|
||||
return drop
|
||||
@@ -155,12 +195,16 @@ func (p *Processor) Start(_ context.Context, _ component.Host) error {
|
||||
case <-p.ctx.Done():
|
||||
return
|
||||
case <-tick.C:
|
||||
p.mtx.Lock()
|
||||
stale := p.stale.Collect(p.cfg.MaxStale)
|
||||
for _, id := range stale {
|
||||
p.last.Delete(id)
|
||||
}
|
||||
p.mtx.Unlock()
|
||||
now := time.Now()
|
||||
p.stale.Range(func(id identity.Stream, last time.Time) bool {
|
||||
if now.Sub(last) > p.cfg.MaxStale {
|
||||
p.last.nums.LoadAndDelete(id)
|
||||
p.last.hist.LoadAndDelete(id)
|
||||
p.last.expo.LoadAndDelete(id)
|
||||
p.stale.Delete(id)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -180,38 +224,27 @@ func (p *Processor) Capabilities() consumer.Capabilities {
|
||||
|
||||
// state keeps a cumulative value, aggregated over time, per stream
|
||||
type state struct {
|
||||
nums map[identity.Stream]pmetric.NumberDataPoint
|
||||
hist map[identity.Stream]pmetric.HistogramDataPoint
|
||||
expo map[identity.Stream]pmetric.ExponentialHistogramDataPoint
|
||||
ctx maps.Context
|
||||
nums *maps.Parallel[identity.Stream, *mutex[pmetric.NumberDataPoint]]
|
||||
hist *maps.Parallel[identity.Stream, *mutex[pmetric.HistogramDataPoint]]
|
||||
expo *maps.Parallel[identity.Stream, *mutex[pmetric.ExponentialHistogramDataPoint]]
|
||||
}
|
||||
|
||||
func (m state) Len() int {
|
||||
return len(m.nums) + len(m.hist) + len(m.expo)
|
||||
func (s state) Size() int {
|
||||
return int(s.ctx.Size())
|
||||
}
|
||||
|
||||
func (m state) Has(id identity.Stream) bool {
|
||||
_, nok := m.nums[id]
|
||||
_, hok := m.hist[id]
|
||||
_, eok := m.expo[id]
|
||||
return nok || hok || eok
|
||||
type mutex[T any] struct {
|
||||
mtx sync.Mutex
|
||||
v T
|
||||
}
|
||||
|
||||
func (m state) Delete(id identity.Stream) {
|
||||
delete(m.nums, id)
|
||||
delete(m.hist, id)
|
||||
delete(m.expo, id)
|
||||
func (mtx *mutex[T]) use(do func(T)) {
|
||||
mtx.mtx.Lock()
|
||||
do(mtx.v)
|
||||
mtx.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (m state) BeginWith(id identity.Stream, dp any) {
|
||||
switch dp := dp.(type) {
|
||||
case pmetric.NumberDataPoint:
|
||||
m.nums[id] = pmetric.NewNumberDataPoint()
|
||||
dp.CopyTo(m.nums[id])
|
||||
case pmetric.HistogramDataPoint:
|
||||
m.hist[id] = pmetric.NewHistogramDataPoint()
|
||||
dp.CopyTo(m.hist[id])
|
||||
case pmetric.ExponentialHistogramDataPoint:
|
||||
m.expo[id] = pmetric.NewExponentialHistogramDataPoint()
|
||||
dp.CopyTo(m.expo[id])
|
||||
}
|
||||
func guard[T any](v T) *mutex[T] {
|
||||
return &mutex[T]{v: v}
|
||||
}
|
||||
|
||||
11
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
11
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
@@ -2,7 +2,10 @@
|
||||
linters:
|
||||
enable:
|
||||
- errcheck
|
||||
- forbidigo
|
||||
- godot
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
@@ -12,11 +15,17 @@ linters:
|
||||
- testifylint
|
||||
- unused
|
||||
|
||||
linter-settings:
|
||||
linters-settings:
|
||||
forbidigo:
|
||||
forbid:
|
||||
- p: ^fmt\.Print.*$
|
||||
msg: Do not commit print statements.
|
||||
godot:
|
||||
capital: true
|
||||
exclude:
|
||||
# Ignore "See: URL"
|
||||
- 'See:'
|
||||
goimports:
|
||||
local-prefixes: github.com/prometheus/procfs
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
8
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
8
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
@@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
||||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.59.0
|
||||
GOLANGCI_LINT_VERSION ?= v1.60.2
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
@@ -275,3 +275,9 @@ $(1)_precheck:
|
||||
exit 1; \
|
||||
fi
|
||||
endef
|
||||
|
||||
govulncheck: install-govulncheck
|
||||
govulncheck ./...
|
||||
|
||||
install-govulncheck:
|
||||
command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
6
vendor/github.com/prometheus/procfs/README.md
generated
vendored
6
vendor/github.com/prometheus/procfs/README.md
generated
vendored
@@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`.
|
||||
The procfs library includes a set of test fixtures which include many example files from
|
||||
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
|
||||
which is extracted automatically during testing. To add/update the test fixtures, first
|
||||
ensure the `fixtures` directory is up to date by removing the existing directory and then
|
||||
extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
|
||||
ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then
|
||||
extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`.
|
||||
|
||||
```bash
|
||||
rm -rf testdata/fixtures
|
||||
make test
|
||||
```
|
||||
|
||||
Next, make the required changes to the extracted files in the `fixtures` directory. When
|
||||
Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When
|
||||
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
|
||||
based on the updated `fixtures` directory. And finally, verify the changes using
|
||||
`git diff testdata/fixtures.ttar`.
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
4
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
@@ -23,9 +23,9 @@ import (
|
||||
|
||||
// Learned from include/uapi/linux/if_arp.h.
|
||||
const (
|
||||
// completed entry (ha valid).
|
||||
// Completed entry (ha valid).
|
||||
ATFComplete = 0x02
|
||||
// permanent entry.
|
||||
// Permanent entry.
|
||||
ATFPermanent = 0x04
|
||||
// Publish entry.
|
||||
ATFPublish = 0x08
|
||||
|
||||
10
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
10
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
@@ -24,8 +24,14 @@ type FS struct {
|
||||
isReal bool
|
||||
}
|
||||
|
||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||
const DefaultMountPoint = fs.DefaultProcMountPoint
|
||||
const (
|
||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||
DefaultMountPoint = fs.DefaultProcMountPoint
|
||||
|
||||
// SectorSize represents the size of a sector in bytes.
|
||||
// It is specific to Linux block I/O operations.
|
||||
SectorSize = 512
|
||||
)
|
||||
|
||||
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
|
||||
// It will error if the mount point directory can't be read or is a file.
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/fs_statfs_notype.go
generated
vendored
4
vendor/github.com/prometheus/procfs/fs_statfs_notype.go
generated
vendored
@@ -17,7 +17,7 @@
|
||||
package procfs
|
||||
|
||||
// isRealProc returns true on architectures that don't have a Type argument
|
||||
// in their Statfs_t struct
|
||||
func isRealProc(mountPoint string) (bool, error) {
|
||||
// in their Statfs_t struct.
|
||||
func isRealProc(_ string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
6
vendor/github.com/prometheus/procfs/fscache.go
generated
vendored
6
vendor/github.com/prometheus/procfs/fscache.go
generated
vendored
@@ -162,7 +162,7 @@ type Fscacheinfo struct {
|
||||
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
|
||||
// Number of release reqs ignored due to in-progress store
|
||||
ReleaseRequestsIgnoredDueToInProgressStore uint64
|
||||
// Number of page stores cancelled due to release req
|
||||
// Number of page stores canceled due to release req
|
||||
PageStoresCancelledByReleaseRequests uint64
|
||||
VmscanWaiting uint64
|
||||
// Number of times async ops added to pending queues
|
||||
@@ -171,11 +171,11 @@ type Fscacheinfo struct {
|
||||
OpsRunning uint64
|
||||
// Number of times async ops queued for processing
|
||||
OpsEnqueued uint64
|
||||
// Number of async ops cancelled
|
||||
// Number of async ops canceled
|
||||
OpsCancelled uint64
|
||||
// Number of async ops rejected due to object lookup/create failure
|
||||
OpsRejected uint64
|
||||
// Number of async ops initialised
|
||||
// Number of async ops initialized
|
||||
OpsInitialised uint64
|
||||
// Number of async ops queued for deferred release
|
||||
OpsDeferred uint64
|
||||
|
||||
3
vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
3
vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
@@ -28,6 +28,9 @@ const (
|
||||
|
||||
// DefaultConfigfsMountPoint is the common mount point of the configfs.
|
||||
DefaultConfigfsMountPoint = "/sys/kernel/config"
|
||||
|
||||
// DefaultSelinuxMountPoint is the common mount point of the selinuxfs.
|
||||
DefaultSelinuxMountPoint = "/sys/fs/selinux"
|
||||
)
|
||||
|
||||
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
|
||||
|
||||
14
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
14
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
@@ -14,6 +14,7 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -110,3 +111,16 @@ func ParseBool(b string) *bool {
|
||||
}
|
||||
return &truth
|
||||
}
|
||||
|
||||
// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX.
|
||||
func ReadHexFromFile(path string) (uint64, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
hexString := strings.TrimSpace(string(data))
|
||||
if !strings.HasPrefix(hexString, "0x") {
|
||||
return 0, errors.New("invalid format: hex string does not start with '0x'")
|
||||
}
|
||||
return strconv.ParseUint(hexString[2:], 16, 64)
|
||||
}
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
4
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
@@ -45,11 +45,11 @@ const (
|
||||
fieldTransport11TCPLen = 13
|
||||
fieldTransport11UDPLen = 10
|
||||
|
||||
// kernel version >= 4.14 MaxLen
|
||||
// Kernel version >= 4.14 MaxLen
|
||||
// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
|
||||
fieldTransport11RDMAMaxLen = 28
|
||||
|
||||
// kernel version <= 4.2 MinLen
|
||||
// Kernel version <= 4.2 MinLen
|
||||
// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
|
||||
fieldTransport11RDMAMinLen = 20
|
||||
)
|
||||
|
||||
96
vendor/github.com/prometheus/procfs/net_dev_snmp6.go
generated
vendored
Normal file
96
vendor/github.com/prometheus/procfs/net_dev_snmp6.go
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc/<PID>/net/dev_snmp6/.
|
||||
// The outer map's keys are interface names and the inner map's keys are stat names.
|
||||
//
|
||||
// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type.
|
||||
type NetDevSNMP6 map[string]map[string]uint64
|
||||
|
||||
// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/
|
||||
// directory.
|
||||
func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) {
|
||||
return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6"))
|
||||
}
|
||||
|
||||
// Returns kernel/system statistics read from interface files within the /proc/<PID>/net/dev_snmp6/
|
||||
// directory.
|
||||
func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) {
|
||||
return newNetDevSNMP6(p.path("net/dev_snmp6"))
|
||||
}
|
||||
|
||||
// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory.
|
||||
func newNetDevSNMP6(dir string) (NetDevSNMP6, error) {
|
||||
netDevSNMP6 := make(NetDevSNMP6)
|
||||
|
||||
// The net/dev_snmp6 folders contain one file per interface
|
||||
ifaceFiles, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
// On systems with IPv6 disabled, this directory won't exist.
|
||||
// Do nothing.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return netDevSNMP6, err
|
||||
}
|
||||
return netDevSNMP6, err
|
||||
}
|
||||
|
||||
for _, iFaceFile := range ifaceFiles {
|
||||
f, err := os.Open(dir + "/" + iFaceFile.Name())
|
||||
if err != nil {
|
||||
return netDevSNMP6, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f)
|
||||
if err != nil {
|
||||
return netDevSNMP6, err
|
||||
}
|
||||
}
|
||||
|
||||
return netDevSNMP6, nil
|
||||
}
|
||||
|
||||
func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) {
|
||||
m := make(map[string]uint64)
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
stat := strings.Fields(scanner.Text())
|
||||
if len(stat) < 2 {
|
||||
continue
|
||||
}
|
||||
key, val := stat[0], stat[1]
|
||||
|
||||
// Expect stat name to contain "6" or be "ifIndex"
|
||||
if strings.Contains(key, "6") || key == "ifIndex" {
|
||||
v, err := strconv.ParseUint(val, 10, 64)
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
|
||||
m[key] = v
|
||||
}
|
||||
}
|
||||
return m, scanner.Err()
|
||||
}
|
||||
8
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
8
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// readLimit is used by io.LimitReader while reading the content of the
|
||||
// Maximum size limit used by io.LimitReader while reading the content of the
|
||||
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
|
||||
// as each line represents a single used socket.
|
||||
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
|
||||
@@ -50,12 +50,12 @@ type (
|
||||
// UsedSockets shows the total number of parsed lines representing the
|
||||
// number of used sockets.
|
||||
UsedSockets uint64
|
||||
// Drops shows the total number of dropped packets of all UPD sockets.
|
||||
// Drops shows the total number of dropped packets of all UDP sockets.
|
||||
Drops *uint64
|
||||
}
|
||||
|
||||
// netIPSocketLine represents the fields parsed from a single line
|
||||
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
|
||||
// A single line parser for fields from /proc/net/{t,u}dp{,6}.
|
||||
// Fields which are not used by IPSocket are skipped.
|
||||
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
|
||||
// For the proc file format details, see https://linux.die.net/man/5/proc.
|
||||
netIPSocketLine struct {
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/net_tcp.go
generated
vendored
4
vendor/github.com/prometheus/procfs/net_tcp.go
generated
vendored
@@ -25,24 +25,28 @@ type (
|
||||
|
||||
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
|
||||
// read from /proc/net/tcp.
|
||||
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
|
||||
func (fs FS) NetTCP() (NetTCP, error) {
|
||||
return newNetTCP(fs.proc.Path("net/tcp"))
|
||||
}
|
||||
|
||||
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
|
||||
// read from /proc/net/tcp6.
|
||||
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
|
||||
func (fs FS) NetTCP6() (NetTCP, error) {
|
||||
return newNetTCP(fs.proc.Path("net/tcp6"))
|
||||
}
|
||||
|
||||
// NetTCPSummary returns already computed statistics like the total queue lengths
|
||||
// for TCP datagrams read from /proc/net/tcp.
|
||||
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
|
||||
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
|
||||
return newNetTCPSummary(fs.proc.Path("net/tcp"))
|
||||
}
|
||||
|
||||
// NetTCP6Summary returns already computed statistics like the total queue lengths
|
||||
// for TCP datagrams read from /proc/net/tcp6.
|
||||
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
|
||||
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
|
||||
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
|
||||
}
|
||||
|
||||
8
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
8
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
@@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
|
||||
return &nu, nil
|
||||
}
|
||||
|
||||
func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
|
||||
func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) {
|
||||
fields := strings.Fields(line)
|
||||
|
||||
l := len(fields)
|
||||
if l < min {
|
||||
return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
|
||||
if l < minFields {
|
||||
return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l)
|
||||
}
|
||||
|
||||
// Field offsets are as follows:
|
||||
@@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
|
||||
}
|
||||
|
||||
// Path field is optional.
|
||||
if l > min {
|
||||
if l > minFields {
|
||||
// Path occurs at either index 6 or 7 depending on whether inode is
|
||||
// already present.
|
||||
pathIdx := 7
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
@@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
|
||||
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
|
||||
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource
|
||||
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
|
||||
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
|
||||
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
@@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) {
|
||||
|
||||
ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
|
||||
"read_bytes: %d\nwrite_bytes: %d\n" +
|
||||
"cancelled_write_bytes: %d\n"
|
||||
"cancelled_write_bytes: %d\n" //nolint:misspell
|
||||
|
||||
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
|
||||
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
4
vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
@@ -19,7 +19,6 @@ package procfs
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
@@ -29,7 +28,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// match the header line before each mapped zone in `/proc/pid/smaps`.
|
||||
// Match the header line before each mapped zone in `/proc/pid/smaps`.
|
||||
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
|
||||
)
|
||||
|
||||
@@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
|
||||
func (s *ProcSMapsRollup) parseLine(line string) error {
|
||||
kv := strings.SplitN(line, ":", 2)
|
||||
if len(kv) != 2 {
|
||||
fmt.Println(line)
|
||||
return errors.New("invalid net/dev line, missing colon")
|
||||
}
|
||||
|
||||
|
||||
18
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
18
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
@@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
|
||||
}
|
||||
}
|
||||
case "NSpid":
|
||||
s.NSpids = calcNSPidsList(vString)
|
||||
nspids, err := calcNSPidsList(vString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.NSpids = nspids
|
||||
case "VmPeak":
|
||||
s.VmPeak = vUintBytes
|
||||
case "VmSize":
|
||||
@@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 {
|
||||
return g
|
||||
}
|
||||
|
||||
func calcNSPidsList(nspidsString string) []uint64 {
|
||||
s := strings.Split(nspidsString, " ")
|
||||
func calcNSPidsList(nspidsString string) ([]uint64, error) {
|
||||
s := strings.Split(nspidsString, "\t")
|
||||
var nspids []uint64
|
||||
|
||||
for _, nspid := range s {
|
||||
nspid, _ := strconv.ParseUint(nspid, 10, 64)
|
||||
if nspid == 0 {
|
||||
continue
|
||||
nspid, err := strconv.ParseUint(nspid, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nspids = append(nspids, nspid)
|
||||
}
|
||||
|
||||
return nspids
|
||||
return nspids, nil
|
||||
}
|
||||
|
||||
15
vendor/github.com/puzpuzpuz/xsync/v3/.gitignore
generated
vendored
Normal file
15
vendor/github.com/puzpuzpuz/xsync/v3/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
133
vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md
generated
vendored
Normal file
133
vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
# xsync benchmarks
|
||||
|
||||
If you're interested in `MapOf` comparison with some of the popular concurrent hash maps written in Go, check [this](https://github.com/cornelk/hashmap/pull/70) and [this](https://github.com/alphadose/haxmap/pull/22) PRs.
|
||||
|
||||
The below results were obtained for xsync v2.3.1 on a c6g.metal EC2 instance (64 CPU, 128GB RAM) running Linux and Go 1.19.3. I'd like to thank [@felixge](https://github.com/felixge) who kindly ran the benchmarks.
|
||||
|
||||
The following commands were used to run the benchmarks:
|
||||
```bash
|
||||
$ go test -run='^$' -cpu=1,2,4,8,16,32,64 -bench . -count=30 -timeout=0 | tee bench.txt
|
||||
$ benchstat bench.txt | tee benchstat.txt
|
||||
```
|
||||
|
||||
The below sections contain some of the results. Refer to [this gist](https://gist.github.com/puzpuzpuz/e62e38e06feadecfdc823c0f941ece0b) for the complete output.
|
||||
|
||||
Please note that `MapOf` got a number of optimizations since v2.3.1, so the current result is likely to be different.
|
||||
|
||||
### Counter vs. atomic int64
|
||||
|
||||
```
|
||||
name time/op
|
||||
Counter 27.3ns ± 1%
|
||||
Counter-2 27.2ns ±11%
|
||||
Counter-4 15.3ns ± 8%
|
||||
Counter-8 7.43ns ± 7%
|
||||
Counter-16 3.70ns ±10%
|
||||
Counter-32 1.77ns ± 3%
|
||||
Counter-64 0.96ns ±10%
|
||||
AtomicInt64 7.60ns ± 0%
|
||||
AtomicInt64-2 12.6ns ±13%
|
||||
AtomicInt64-4 13.5ns ±14%
|
||||
AtomicInt64-8 12.7ns ± 9%
|
||||
AtomicInt64-16 12.8ns ± 8%
|
||||
AtomicInt64-32 13.0ns ± 6%
|
||||
AtomicInt64-64 12.9ns ± 7%
|
||||
```
|
||||
|
||||
Here `time/op` stands for average time spent on operation. If you divide `10^9` by the result in nanoseconds per operation, you'd get the throughput in operations per second. Thus, the ideal theoretical scalability of a concurrent data structure implies that the reported `time/op` decreases proportionally with the increased number of CPU cores. On the contrary, if the measured time per operation increases when run on more cores, it means performance degradation.
|
||||
|
||||
### MapOf vs. sync.Map
|
||||
|
||||
1,000 `[int, int]` entries with a warm-up, 100% Loads:
|
||||
```
|
||||
IntegerMapOf_WarmUp/reads=100% 24.0ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=100%-2 12.0ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=100%-4 6.02ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=100%-8 3.01ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=100%-16 1.50ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=100%-32 0.75ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=100%-64 0.38ns ± 0%
|
||||
IntegerMapStandard_WarmUp/reads=100% 55.3ns ± 0%
|
||||
IntegerMapStandard_WarmUp/reads=100%-2 27.6ns ± 0%
|
||||
IntegerMapStandard_WarmUp/reads=100%-4 16.1ns ± 3%
|
||||
IntegerMapStandard_WarmUp/reads=100%-8 8.35ns ± 7%
|
||||
IntegerMapStandard_WarmUp/reads=100%-16 4.24ns ± 7%
|
||||
IntegerMapStandard_WarmUp/reads=100%-32 2.18ns ± 6%
|
||||
IntegerMapStandard_WarmUp/reads=100%-64 1.11ns ± 3%
|
||||
```
|
||||
|
||||
1,000 `[int, int]` entries with a warm-up, 99% Loads, 0.5% Stores, 0.5% Deletes:
|
||||
```
|
||||
IntegerMapOf_WarmUp/reads=99% 31.0ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=99%-2 16.4ns ± 1%
|
||||
IntegerMapOf_WarmUp/reads=99%-4 8.42ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=99%-8 4.41ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=99%-16 2.38ns ± 2%
|
||||
IntegerMapOf_WarmUp/reads=99%-32 1.37ns ± 4%
|
||||
IntegerMapOf_WarmUp/reads=99%-64 0.85ns ± 2%
|
||||
IntegerMapStandard_WarmUp/reads=99% 121ns ± 1%
|
||||
IntegerMapStandard_WarmUp/reads=99%-2 109ns ± 3%
|
||||
IntegerMapStandard_WarmUp/reads=99%-4 115ns ± 4%
|
||||
IntegerMapStandard_WarmUp/reads=99%-8 114ns ± 2%
|
||||
IntegerMapStandard_WarmUp/reads=99%-16 105ns ± 2%
|
||||
IntegerMapStandard_WarmUp/reads=99%-32 97.0ns ± 3%
|
||||
IntegerMapStandard_WarmUp/reads=99%-64 98.0ns ± 2%
|
||||
```
|
||||
|
||||
1,000 `[int, int]` entries with a warm-up, 75% Loads, 12.5% Stores, 12.5% Deletes:
|
||||
```
|
||||
IntegerMapOf_WarmUp/reads=75%-reads 46.2ns ± 1%
|
||||
IntegerMapOf_WarmUp/reads=75%-reads-2 36.7ns ± 2%
|
||||
IntegerMapOf_WarmUp/reads=75%-reads-4 22.0ns ± 1%
|
||||
IntegerMapOf_WarmUp/reads=75%-reads-8 12.8ns ± 2%
|
||||
IntegerMapOf_WarmUp/reads=75%-reads-16 7.69ns ± 1%
|
||||
IntegerMapOf_WarmUp/reads=75%-reads-32 5.16ns ± 1%
|
||||
IntegerMapOf_WarmUp/reads=75%-reads-64 4.91ns ± 1%
|
||||
IntegerMapStandard_WarmUp/reads=75%-reads 156ns ± 0%
|
||||
IntegerMapStandard_WarmUp/reads=75%-reads-2 177ns ± 1%
|
||||
IntegerMapStandard_WarmUp/reads=75%-reads-4 197ns ± 1%
|
||||
IntegerMapStandard_WarmUp/reads=75%-reads-8 221ns ± 2%
|
||||
IntegerMapStandard_WarmUp/reads=75%-reads-16 242ns ± 1%
|
||||
IntegerMapStandard_WarmUp/reads=75%-reads-32 258ns ± 1%
|
||||
IntegerMapStandard_WarmUp/reads=75%-reads-64 264ns ± 1%
|
||||
```
|
||||
|
||||
### MPMCQueue vs. Go channels
|
||||
|
||||
Concurrent producers and consumers (1:1), queue/channel size 1,000, some work done by both producers and consumers:
|
||||
```
|
||||
QueueProdConsWork100 252ns ± 0%
|
||||
QueueProdConsWork100-2 206ns ± 5%
|
||||
QueueProdConsWork100-4 136ns ±12%
|
||||
QueueProdConsWork100-8 110ns ± 6%
|
||||
QueueProdConsWork100-16 108ns ± 2%
|
||||
QueueProdConsWork100-32 102ns ± 2%
|
||||
QueueProdConsWork100-64 101ns ± 0%
|
||||
ChanProdConsWork100 283ns ± 0%
|
||||
ChanProdConsWork100-2 406ns ±21%
|
||||
ChanProdConsWork100-4 549ns ± 7%
|
||||
ChanProdConsWork100-8 754ns ± 7%
|
||||
ChanProdConsWork100-16 828ns ± 7%
|
||||
ChanProdConsWork100-32 810ns ± 8%
|
||||
ChanProdConsWork100-64 832ns ± 4%
|
||||
```
|
||||
|
||||
### RBMutex vs. sync.RWMutex
|
||||
|
||||
The writer locks on each 100,000 iteration with some work in the critical section for both readers and the writer:
|
||||
```
|
||||
RBMutexWorkWrite100000 146ns ± 0%
|
||||
RBMutexWorkWrite100000-2 73.3ns ± 0%
|
||||
RBMutexWorkWrite100000-4 36.7ns ± 0%
|
||||
RBMutexWorkWrite100000-8 18.6ns ± 0%
|
||||
RBMutexWorkWrite100000-16 9.83ns ± 3%
|
||||
RBMutexWorkWrite100000-32 5.53ns ± 0%
|
||||
RBMutexWorkWrite100000-64 4.04ns ± 3%
|
||||
RWMutexWorkWrite100000 121ns ± 0%
|
||||
RWMutexWorkWrite100000-2 128ns ± 1%
|
||||
RWMutexWorkWrite100000-4 124ns ± 2%
|
||||
RWMutexWorkWrite100000-8 101ns ± 1%
|
||||
RWMutexWorkWrite100000-16 92.9ns ± 1%
|
||||
RWMutexWorkWrite100000-32 89.9ns ± 1%
|
||||
RWMutexWorkWrite100000-64 88.4ns ± 1%
|
||||
```
|
||||
201
vendor/github.com/puzpuzpuz/xsync/v3/LICENSE
generated
vendored
Normal file
201
vendor/github.com/puzpuzpuz/xsync/v3/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
195
vendor/github.com/puzpuzpuz/xsync/v3/README.md
generated
vendored
Normal file
195
vendor/github.com/puzpuzpuz/xsync/v3/README.md
generated
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
[](https://pkg.go.dev/github.com/puzpuzpuz/xsync/v3)
|
||||
[](https://goreportcard.com/report/github.com/puzpuzpuz/xsync/v3)
|
||||
[](https://codecov.io/gh/puzpuzpuz/xsync)
|
||||
|
||||
# xsync
|
||||
|
||||
Concurrent data structures for Go. Aims to provide more scalable alternatives for some of the data structures from the standard `sync` package, but not only.
|
||||
|
||||
Covered with tests following the approach described [here](https://puzpuzpuz.dev/testing-concurrent-code-for-fun-and-profit).
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Benchmark results may be found [here](BENCHMARKS.md). I'd like to thank [@felixge](https://github.com/felixge) who kindly ran the benchmarks on a beefy multicore machine.
|
||||
|
||||
Also, a non-scientific, unfair benchmark comparing Java's [j.u.c.ConcurrentHashMap](https://docs.oracle.com/en/java/javase/17/docs/api/java.base/java/util/concurrent/ConcurrentHashMap.html) and `xsync.MapOf` is available [here](https://puzpuzpuz.dev/concurrent-map-in-go-vs-java-yet-another-meaningless-benchmark).
|
||||
|
||||
## Usage
|
||||
|
||||
The latest xsync major version is v3, so `/v3` suffix should be used when importing the library:
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
```
|
||||
|
||||
*Note for pre-v3 users*: v1 and v2 support is discontinued, so please upgrade to v3. While the API has some breaking changes, the migration should be trivial.
|
||||
|
||||
### Counter
|
||||
|
||||
A `Counter` is a striped `int64` counter inspired by the `j.u.c.a.LongAdder` class from the Java standard library.
|
||||
|
||||
```go
|
||||
c := xsync.NewCounter()
|
||||
// increment and decrement the counter
|
||||
c.Inc()
|
||||
c.Dec()
|
||||
// read the current value
|
||||
v := c.Value()
|
||||
```
|
||||
|
||||
Works better in comparison with a single atomically updated `int64` counter in high contention scenarios.
|
||||
|
||||
### Map
|
||||
|
||||
A `Map` is like a concurrent hash table-based map. It follows the interface of `sync.Map` with a number of valuable extensions like `Compute` or `Size`.
|
||||
|
||||
```go
|
||||
m := xsync.NewMap()
|
||||
m.Store("foo", "bar")
|
||||
v, ok := m.Load("foo")
|
||||
s := m.Size()
|
||||
```
|
||||
|
||||
`Map` uses a modified version of Cache-Line Hash Table (CLHT) data structure: https://github.com/LPD-EPFL/CLHT
|
||||
|
||||
CLHT is built around the idea of organizing the hash table in cache-line-sized buckets, so that on all modern CPUs update operations complete with minimal cache-line transfer. Also, `Get` operations are obstruction-free and involve no writes to shared memory, hence no mutexes or any other sort of locks. Due to this design, in all considered scenarios `Map` outperforms `sync.Map`.
|
||||
|
||||
One important difference with `sync.Map` is that only string keys are supported. That's because Golang standard library does not expose the built-in hash functions for `interface{}` values.
|
||||
|
||||
`MapOf[K, V]` is an implementation with parametrized key and value types. While it's still a CLHT-inspired hash map, `MapOf`'s design is quite different from `Map`. As a result, less GC pressure and fewer atomic operations on reads.
|
||||
|
||||
```go
|
||||
m := xsync.NewMapOf[string, string]()
|
||||
m.Store("foo", "bar")
|
||||
v, ok := m.Load("foo")
|
||||
```
|
||||
|
||||
Apart from CLHT, `MapOf` borrows ideas from Java's `j.u.c.ConcurrentHashMap` (immutable K/V pair structs instead of atomic snapshots) and C++'s `absl::flat_hash_map` (meta memory and SWAR-based lookups). It also has more dense memory layout when compared with `Map`. Long story short, `MapOf` should be preferred over `Map` when possible.
|
||||
|
||||
An important difference with `Map` is that `MapOf` supports arbitrary `comparable` key types:
|
||||
|
||||
```go
|
||||
type Point struct {
|
||||
x int32
|
||||
y int32
|
||||
}
|
||||
m := NewMapOf[Point, int]()
|
||||
m.Store(Point{42, 42}, 42)
|
||||
v, ok := m.Load(point{42, 42})
|
||||
```
|
||||
|
||||
Apart from `Range` method available for map iteration, there are also `ToPlainMap`/`ToPlainMapOf` utility functions to convert a `Map`/`MapOf` to a built-in Go's `map`:
|
||||
```go
|
||||
m := xsync.NewMapOf[int, int]()
|
||||
m.Store(42, 42)
|
||||
pm := xsync.ToPlainMapOf(m)
|
||||
```
|
||||
|
||||
Both `Map` and `MapOf` use the built-in Golang's hash function which has DDOS protection. This means that each map instance gets its own seed number and the hash function uses that seed for hash code calculation. However, for smaller keys this hash function has some overhead. So, if you don't need DDOS protection, you may provide a custom hash function when creating a `MapOf`. For instance, Murmur3 finalizer does a decent job when it comes to integers:
|
||||
|
||||
```go
|
||||
m := NewMapOfWithHasher[int, int](func(i int, _ uint64) uint64 {
|
||||
h := uint64(i)
|
||||
h = (h ^ (h >> 33)) * 0xff51afd7ed558ccd
|
||||
h = (h ^ (h >> 33)) * 0xc4ceb9fe1a85ec53
|
||||
return h ^ (h >> 33)
|
||||
})
|
||||
```
|
||||
|
||||
When benchmarking concurrent maps, make sure to configure all of the competitors with the same hash function or, at least, take hash function performance into the consideration.
|
||||
|
||||
### SPSCQueue
|
||||
|
||||
A `SPSCQueue` is a bounded single-producer single-consumer concurrent queue. This means that not more than a single goroutine must be publishing items to the queue while not more than a single goroutine must be consuming those items.
|
||||
|
||||
```go
|
||||
q := xsync.NewSPSCQueue(1024)
|
||||
// producer inserts an item into the queue
|
||||
// optimistic insertion attempt; doesn't block
|
||||
inserted := q.TryEnqueue("bar")
|
||||
// consumer obtains an item from the queue
|
||||
// optimistic obtain attempt; doesn't block
|
||||
item, ok := q.TryDequeue() // interface{} pointing to a string
|
||||
```
|
||||
|
||||
`SPSCQueueOf[I]` is an implementation with parametrized item type. It is available for Go 1.19 or later.
|
||||
|
||||
```go
|
||||
q := xsync.NewSPSCQueueOf[string](1024)
|
||||
inserted := q.TryEnqueue("foo")
|
||||
item, ok := q.TryDequeue() // string
|
||||
```
|
||||
|
||||
The queue is based on the data structure from this [article](https://rigtorp.se/ringbuffer). The idea is to reduce the CPU cache coherency traffic by keeping cached copies of read and write indexes used by producer and consumer respectively.
|
||||
|
||||
### MPMCQueue
|
||||
|
||||
A `MPMCQueue` is a bounded multi-producer multi-consumer concurrent queue.
|
||||
|
||||
```go
|
||||
q := xsync.NewMPMCQueue(1024)
|
||||
// producer optimistically inserts an item into the queue
|
||||
// optimistic insertion attempt; doesn't block
|
||||
inserted := q.TryEnqueue("bar")
|
||||
// consumer obtains an item from the queue
|
||||
// optimistic obtain attempt; doesn't block
|
||||
item, ok := q.TryDequeue() // interface{} pointing to a string
|
||||
```
|
||||
|
||||
`MPMCQueueOf[I]` is an implementation with parametrized item type. It is available for Go 1.19 or later.
|
||||
|
||||
```go
|
||||
q := xsync.NewMPMCQueueOf[string](1024)
|
||||
inserted := q.TryEnqueue("foo")
|
||||
item, ok := q.TryDequeue() // string
|
||||
```
|
||||
|
||||
The queue is based on the algorithm from the [MPMCQueue](https://github.com/rigtorp/MPMCQueue) C++ library which in its turn references D.Vyukov's [MPMC queue](https://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue). According to the following [classification](https://www.1024cores.net/home/lock-free-algorithms/queues), the queue is array-based, fails on overflow, provides causal FIFO, has blocking producers and consumers.
|
||||
|
||||
The idea of the algorithm is to allow parallelism for concurrent producers and consumers by introducing the notion of tickets, i.e. values of two counters, one per producers/consumers. An atomic increment of one of those counters is the only noticeable contention point in queue operations. The rest of the operation avoids contention on writes thanks to the turn-based read/write access for each of the queue items.
|
||||
|
||||
In essence, `MPMCQueue` is a specialized queue for scenarios where there are multiple concurrent producers and consumers of a single queue running on a large multicore machine.
|
||||
|
||||
To get the optimal performance, you may want to set the queue size to be large enough, say, an order of magnitude greater than the number of producers/consumers, to allow producers and consumers to progress with their queue operations in parallel most of the time.
|
||||
|
||||
### RBMutex
|
||||
|
||||
A `RBMutex` is a reader-biased reader/writer mutual exclusion lock. The lock can be held by many readers or a single writer.
|
||||
|
||||
```go
|
||||
mu := xsync.NewRBMutex()
|
||||
// reader lock calls return a token
|
||||
t := mu.RLock()
|
||||
// the token must be later used to unlock the mutex
|
||||
mu.RUnlock(t)
|
||||
// writer locks are the same as in sync.RWMutex
|
||||
mu.Lock()
|
||||
mu.Unlock()
|
||||
```
|
||||
|
||||
`RBMutex` is based on a modified version of BRAVO (Biased Locking for Reader-Writer Locks) algorithm: https://arxiv.org/pdf/1810.01553.pdf
|
||||
|
||||
The idea of the algorithm is to build on top of an existing reader-writer mutex and introduce a fast path for readers. On the fast path, reader lock attempts are sharded over an internal array based on the reader identity (a token in the case of Golang). This means that readers do not contend over a single atomic counter like it's done in, say, `sync.RWMutex` allowing for better scalability in terms of cores.
|
||||
|
||||
Hence, by the design `RBMutex` is a specialized mutex for scenarios, such as caches, where the vast majority of locks are acquired by readers and write lock acquire attempts are infrequent. In such scenarios, `RBMutex` should perform better than the `sync.RWMutex` on large multicore machines.
|
||||
|
||||
`RBMutex` extends `sync.RWMutex` internally and uses it as the "reader bias disabled" fallback, so the same semantics apply. The only noticeable difference is in the reader tokens returned from the `RLock`/`RUnlock` methods.
|
||||
|
||||
Apart from blocking methods, `RBMutex` also has methods for optimistic locking:
|
||||
```go
|
||||
mu := xsync.NewRBMutex()
|
||||
if locked, t := mu.TryRLock(); locked {
|
||||
// critical reader section...
|
||||
mu.RUnlock(t)
|
||||
}
|
||||
if mu.TryLock() {
|
||||
// critical writer section...
|
||||
mu.Unlock()
|
||||
}
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Licensed under MIT.
|
||||
99
vendor/github.com/puzpuzpuz/xsync/v3/counter.go
generated
vendored
Normal file
99
vendor/github.com/puzpuzpuz/xsync/v3/counter.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// pool for P tokens
|
||||
var ptokenPool sync.Pool
|
||||
|
||||
// a P token is used to point at the current OS thread (P)
|
||||
// on which the goroutine is run; exact identity of the thread,
|
||||
// as well as P migration tolerance, is not important since
|
||||
// it's used to as a best effort mechanism for assigning
|
||||
// concurrent operations (goroutines) to different stripes of
|
||||
// the counter
|
||||
type ptoken struct {
|
||||
idx uint32
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad [cacheLineSize - 4]byte
|
||||
}
|
||||
|
||||
// A Counter is a striped int64 counter.
|
||||
//
|
||||
// Should be preferred over a single atomically updated int64
|
||||
// counter in high contention scenarios.
|
||||
//
|
||||
// A Counter must not be copied after first use.
|
||||
type Counter struct {
|
||||
stripes []cstripe
|
||||
mask uint32
|
||||
}
|
||||
|
||||
type cstripe struct {
|
||||
c int64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad [cacheLineSize - 8]byte
|
||||
}
|
||||
|
||||
// NewCounter creates a new Counter instance.
|
||||
func NewCounter() *Counter {
|
||||
nstripes := nextPowOf2(parallelism())
|
||||
c := Counter{
|
||||
stripes: make([]cstripe, nstripes),
|
||||
mask: nstripes - 1,
|
||||
}
|
||||
return &c
|
||||
}
|
||||
|
||||
// Inc increments the counter by 1.
|
||||
func (c *Counter) Inc() {
|
||||
c.Add(1)
|
||||
}
|
||||
|
||||
// Dec decrements the counter by 1.
|
||||
func (c *Counter) Dec() {
|
||||
c.Add(-1)
|
||||
}
|
||||
|
||||
// Add adds the delta to the counter.
|
||||
func (c *Counter) Add(delta int64) {
|
||||
t, ok := ptokenPool.Get().(*ptoken)
|
||||
if !ok {
|
||||
t = new(ptoken)
|
||||
t.idx = runtime_fastrand()
|
||||
}
|
||||
for {
|
||||
stripe := &c.stripes[t.idx&c.mask]
|
||||
cnt := atomic.LoadInt64(&stripe.c)
|
||||
if atomic.CompareAndSwapInt64(&stripe.c, cnt, cnt+delta) {
|
||||
break
|
||||
}
|
||||
// Give a try with another randomly selected stripe.
|
||||
t.idx = runtime_fastrand()
|
||||
}
|
||||
ptokenPool.Put(t)
|
||||
}
|
||||
|
||||
// Value returns the current counter value.
|
||||
// The returned value may not include all of the latest operations in
|
||||
// presence of concurrent modifications of the counter.
|
||||
func (c *Counter) Value() int64 {
|
||||
v := int64(0)
|
||||
for i := 0; i < len(c.stripes); i++ {
|
||||
stripe := &c.stripes[i]
|
||||
v += atomic.LoadInt64(&stripe.c)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Reset resets the counter to zero.
|
||||
// This method should only be used when it is known that there are
|
||||
// no concurrent modifications of the counter.
|
||||
func (c *Counter) Reset() {
|
||||
for i := 0; i < len(c.stripes); i++ {
|
||||
stripe := &c.stripes[i]
|
||||
atomic.StoreInt64(&stripe.c, 0)
|
||||
}
|
||||
}
|
||||
917
vendor/github.com/puzpuzpuz/xsync/v3/map.go
generated
vendored
Normal file
917
vendor/github.com/puzpuzpuz/xsync/v3/map.go
generated
vendored
Normal file
@@ -0,0 +1,917 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type mapResizeHint int
|
||||
|
||||
const (
|
||||
mapGrowHint mapResizeHint = 0
|
||||
mapShrinkHint mapResizeHint = 1
|
||||
mapClearHint mapResizeHint = 2
|
||||
)
|
||||
|
||||
const (
|
||||
// number of Map entries per bucket; 3 entries lead to size of 64B
|
||||
// (one cache line) on 64-bit machines
|
||||
entriesPerMapBucket = 3
|
||||
// threshold fraction of table occupation to start a table shrinking
|
||||
// when deleting the last entry in a bucket chain
|
||||
mapShrinkFraction = 128
|
||||
// map load factor to trigger a table resize during insertion;
|
||||
// a map holds up to mapLoadFactor*entriesPerMapBucket*mapTableLen
|
||||
// key-value pairs (this is a soft limit)
|
||||
mapLoadFactor = 0.75
|
||||
// minimal table size, i.e. number of buckets; thus, minimal map
|
||||
// capacity can be calculated as entriesPerMapBucket*defaultMinMapTableLen
|
||||
defaultMinMapTableLen = 32
|
||||
// minimum counter stripes to use
|
||||
minMapCounterLen = 8
|
||||
// maximum counter stripes to use; stands for around 4KB of memory
|
||||
maxMapCounterLen = 32
|
||||
)
|
||||
|
||||
var (
|
||||
topHashMask = uint64((1<<20)-1) << 44
|
||||
topHashEntryMasks = [3]uint64{
|
||||
topHashMask,
|
||||
topHashMask >> 20,
|
||||
topHashMask >> 40,
|
||||
}
|
||||
)
|
||||
|
||||
// Map is like a Go map[string]interface{} but is safe for concurrent
|
||||
// use by multiple goroutines without additional locking or
|
||||
// coordination. It follows the interface of sync.Map with
|
||||
// a number of valuable extensions like Compute or Size.
|
||||
//
|
||||
// A Map must not be copied after first use.
|
||||
//
|
||||
// Map uses a modified version of Cache-Line Hash Table (CLHT)
|
||||
// data structure: https://github.com/LPD-EPFL/CLHT
|
||||
//
|
||||
// CLHT is built around idea to organize the hash table in
|
||||
// cache-line-sized buckets, so that on all modern CPUs update
|
||||
// operations complete with at most one cache-line transfer.
|
||||
// Also, Get operations involve no write to memory, as well as no
|
||||
// mutexes or any other sort of locks. Due to this design, in all
|
||||
// considered scenarios Map outperforms sync.Map.
|
||||
//
|
||||
// One important difference with sync.Map is that only string keys
|
||||
// are supported. That's because Golang standard library does not
|
||||
// expose the built-in hash functions for interface{} values.
|
||||
type Map struct {
|
||||
totalGrowths int64
|
||||
totalShrinks int64
|
||||
resizing int64 // resize in progress flag; updated atomically
|
||||
resizeMu sync.Mutex // only used along with resizeCond
|
||||
resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications)
|
||||
table unsafe.Pointer // *mapTable
|
||||
minTableLen int
|
||||
growOnly bool
|
||||
}
|
||||
|
||||
type mapTable struct {
|
||||
buckets []bucketPadded
|
||||
// striped counter for number of table entries;
|
||||
// used to determine if a table shrinking is needed
|
||||
// occupies min(buckets_memory/1024, 64KB) of memory
|
||||
size []counterStripe
|
||||
seed uint64
|
||||
}
|
||||
|
||||
type counterStripe struct {
|
||||
c int64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad [cacheLineSize - 8]byte
|
||||
}
|
||||
|
||||
type bucketPadded struct {
|
||||
//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
|
||||
pad [cacheLineSize - unsafe.Sizeof(bucket{})]byte
|
||||
bucket
|
||||
}
|
||||
|
||||
type bucket struct {
|
||||
next unsafe.Pointer // *bucketPadded
|
||||
keys [entriesPerMapBucket]unsafe.Pointer
|
||||
values [entriesPerMapBucket]unsafe.Pointer
|
||||
// topHashMutex is a 2-in-1 value.
|
||||
//
|
||||
// It contains packed top 20 bits (20 MSBs) of hash codes for keys
|
||||
// stored in the bucket:
|
||||
// | key 0's top hash | key 1's top hash | key 2's top hash | bitmap for keys | mutex |
|
||||
// | 20 bits | 20 bits | 20 bits | 3 bits | 1 bit |
|
||||
//
|
||||
// The least significant bit is used for the mutex (TTAS spinlock).
|
||||
topHashMutex uint64
|
||||
}
|
||||
|
||||
type rangeEntry struct {
|
||||
key unsafe.Pointer
|
||||
value unsafe.Pointer
|
||||
}
|
||||
|
||||
// MapConfig defines configurable Map/MapOf options.
|
||||
type MapConfig struct {
|
||||
sizeHint int
|
||||
growOnly bool
|
||||
}
|
||||
|
||||
// WithPresize configures new Map/MapOf instance with capacity enough
|
||||
// to hold sizeHint entries. The capacity is treated as the minimal
|
||||
// capacity meaning that the underlying hash table will never shrink
|
||||
// to a smaller capacity. If sizeHint is zero or negative, the value
|
||||
// is ignored.
|
||||
func WithPresize(sizeHint int) func(*MapConfig) {
|
||||
return func(c *MapConfig) {
|
||||
c.sizeHint = sizeHint
|
||||
}
|
||||
}
|
||||
|
||||
// WithGrowOnly configures new Map/MapOf instance to be grow-only.
|
||||
// This means that the underlying hash table grows in capacity when
|
||||
// new keys are added, but does not shrink when keys are deleted.
|
||||
// The only exception to this rule is the Clear method which
|
||||
// shrinks the hash table back to the initial capacity.
|
||||
func WithGrowOnly() func(*MapConfig) {
|
||||
return func(c *MapConfig) {
|
||||
c.growOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
// NewMap creates a new Map instance configured with the given
|
||||
// options.
|
||||
func NewMap(options ...func(*MapConfig)) *Map {
|
||||
c := &MapConfig{
|
||||
sizeHint: defaultMinMapTableLen * entriesPerMapBucket,
|
||||
}
|
||||
for _, o := range options {
|
||||
o(c)
|
||||
}
|
||||
|
||||
m := &Map{}
|
||||
m.resizeCond = *sync.NewCond(&m.resizeMu)
|
||||
var table *mapTable
|
||||
if c.sizeHint <= defaultMinMapTableLen*entriesPerMapBucket {
|
||||
table = newMapTable(defaultMinMapTableLen)
|
||||
} else {
|
||||
tableLen := nextPowOf2(uint32((float64(c.sizeHint) / entriesPerMapBucket) / mapLoadFactor))
|
||||
table = newMapTable(int(tableLen))
|
||||
}
|
||||
m.minTableLen = len(table.buckets)
|
||||
m.growOnly = c.growOnly
|
||||
atomic.StorePointer(&m.table, unsafe.Pointer(table))
|
||||
return m
|
||||
}
|
||||
|
||||
// NewMapPresized creates a new Map instance with capacity enough to hold
|
||||
// sizeHint entries. The capacity is treated as the minimal capacity
|
||||
// meaning that the underlying hash table will never shrink to
|
||||
// a smaller capacity. If sizeHint is zero or negative, the value
|
||||
// is ignored.
|
||||
//
|
||||
// Deprecated: use NewMap in combination with WithPresize.
|
||||
func NewMapPresized(sizeHint int) *Map {
|
||||
return NewMap(WithPresize(sizeHint))
|
||||
}
|
||||
|
||||
func newMapTable(minTableLen int) *mapTable {
|
||||
buckets := make([]bucketPadded, minTableLen)
|
||||
counterLen := minTableLen >> 10
|
||||
if counterLen < minMapCounterLen {
|
||||
counterLen = minMapCounterLen
|
||||
} else if counterLen > maxMapCounterLen {
|
||||
counterLen = maxMapCounterLen
|
||||
}
|
||||
counter := make([]counterStripe, counterLen)
|
||||
t := &mapTable{
|
||||
buckets: buckets,
|
||||
size: counter,
|
||||
seed: makeSeed(),
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ToPlainMap returns a native map with a copy of xsync Map's
|
||||
// contents. The copied xsync Map should not be modified while
|
||||
// this call is made. If the copied Map is modified, the copying
|
||||
// behavior is the same as in the Range method.
|
||||
func ToPlainMap(m *Map) map[string]interface{} {
|
||||
pm := make(map[string]interface{})
|
||||
if m != nil {
|
||||
m.Range(func(key string, value interface{}) bool {
|
||||
pm[key] = value
|
||||
return true
|
||||
})
|
||||
}
|
||||
return pm
|
||||
}
|
||||
|
||||
// Load returns the value stored in the map for a key, or nil if no
|
||||
// value is present.
|
||||
// The ok result indicates whether value was found in the map.
|
||||
func (m *Map) Load(key string) (value interface{}, ok bool) {
|
||||
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||
hash := hashString(key, table.seed)
|
||||
bidx := uint64(len(table.buckets)-1) & hash
|
||||
b := &table.buckets[bidx]
|
||||
for {
|
||||
topHashes := atomic.LoadUint64(&b.topHashMutex)
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if !topHashMatch(hash, topHashes, i) {
|
||||
continue
|
||||
}
|
||||
atomic_snapshot:
|
||||
// Start atomic snapshot.
|
||||
vp := atomic.LoadPointer(&b.values[i])
|
||||
kp := atomic.LoadPointer(&b.keys[i])
|
||||
if kp != nil && vp != nil {
|
||||
if key == derefKey(kp) {
|
||||
if uintptr(vp) == uintptr(atomic.LoadPointer(&b.values[i])) {
|
||||
// Atomic snapshot succeeded.
|
||||
return derefValue(vp), true
|
||||
}
|
||||
// Concurrent update/remove. Go for another spin.
|
||||
goto atomic_snapshot
|
||||
}
|
||||
}
|
||||
}
|
||||
bptr := atomic.LoadPointer(&b.next)
|
||||
if bptr == nil {
|
||||
return
|
||||
}
|
||||
b = (*bucketPadded)(bptr)
|
||||
}
|
||||
}
|
||||
|
||||
// Store sets the value for a key.
|
||||
func (m *Map) Store(key string, value interface{}) {
|
||||
m.doCompute(
|
||||
key,
|
||||
func(interface{}, bool) (interface{}, bool) {
|
||||
return value, false
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadOrStore returns the existing value for the key if present.
|
||||
// Otherwise, it stores and returns the given value.
|
||||
// The loaded result is true if the value was loaded, false if stored.
|
||||
func (m *Map) LoadOrStore(key string, value interface{}) (actual interface{}, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(interface{}, bool) (interface{}, bool) {
|
||||
return value, false
|
||||
},
|
||||
true,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadAndStore returns the existing value for the key if present,
|
||||
// while setting the new value for the key.
|
||||
// It stores the new value and returns the existing one, if present.
|
||||
// The loaded result is true if the existing value was loaded,
|
||||
// false otherwise.
|
||||
func (m *Map) LoadAndStore(key string, value interface{}) (actual interface{}, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(interface{}, bool) (interface{}, bool) {
|
||||
return value, false
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadOrCompute returns the existing value for the key if present.
|
||||
// Otherwise, it computes the value using the provided function, and
|
||||
// then stores and returns the computed value. The loaded result is
|
||||
// true if the value was loaded, false if computed.
|
||||
//
|
||||
// This call locks a hash table bucket while the compute function
|
||||
// is executed. It means that modifications on other entries in
|
||||
// the bucket will be blocked until the valueFn executes. Consider
|
||||
// this when the function includes long-running operations.
|
||||
func (m *Map) LoadOrCompute(key string, valueFn func() interface{}) (actual interface{}, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(interface{}, bool) (interface{}, bool) {
|
||||
return valueFn(), false
|
||||
},
|
||||
true,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadOrTryCompute returns the existing value for the key if present.
|
||||
// Otherwise, it tries to compute the value using the provided function
|
||||
// and, if successful, stores and returns the computed value. The loaded
|
||||
// result is true if the value was loaded, or false if computed (whether
|
||||
// successfully or not). If the compute attempt was cancelled (due to an
|
||||
// error, for example), a nil value will be returned.
|
||||
//
|
||||
// This call locks a hash table bucket while the compute function
|
||||
// is executed. It means that modifications on other entries in
|
||||
// the bucket will be blocked until the valueFn executes. Consider
|
||||
// this when the function includes long-running operations.
|
||||
func (m *Map) LoadOrTryCompute(
|
||||
key string,
|
||||
valueFn func() (newValue interface{}, cancel bool),
|
||||
) (value interface{}, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(interface{}, bool) (interface{}, bool) {
|
||||
nv, c := valueFn()
|
||||
if !c {
|
||||
return nv, false
|
||||
}
|
||||
return nil, true
|
||||
},
|
||||
true,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// Compute either sets the computed new value for the key or deletes
|
||||
// the value for the key. When the delete result of the valueFn function
|
||||
// is set to true, the value will be deleted, if it exists. When delete
|
||||
// is set to false, the value is updated to the newValue.
|
||||
// The ok result indicates whether value was computed and stored, thus, is
|
||||
// present in the map. The actual result contains the new value in cases where
|
||||
// the value was computed and stored. See the example for a few use cases.
|
||||
//
|
||||
// This call locks a hash table bucket while the compute function
|
||||
// is executed. It means that modifications on other entries in
|
||||
// the bucket will be blocked until the valueFn executes. Consider
|
||||
// this when the function includes long-running operations.
|
||||
func (m *Map) Compute(
|
||||
key string,
|
||||
valueFn func(oldValue interface{}, loaded bool) (newValue interface{}, delete bool),
|
||||
) (actual interface{}, ok bool) {
|
||||
return m.doCompute(key, valueFn, false, true)
|
||||
}
|
||||
|
||||
// LoadAndDelete deletes the value for a key, returning the previous
|
||||
// value if any. The loaded result reports whether the key was
|
||||
// present.
|
||||
func (m *Map) LoadAndDelete(key string) (value interface{}, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(value interface{}, loaded bool) (interface{}, bool) {
|
||||
return value, true
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// Delete deletes the value for a key.
|
||||
func (m *Map) Delete(key string) {
|
||||
m.doCompute(
|
||||
key,
|
||||
func(value interface{}, loaded bool) (interface{}, bool) {
|
||||
return value, true
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
func (m *Map) doCompute(
|
||||
key string,
|
||||
valueFn func(oldValue interface{}, loaded bool) (interface{}, bool),
|
||||
loadIfExists, computeOnly bool,
|
||||
) (interface{}, bool) {
|
||||
// Read-only path.
|
||||
if loadIfExists {
|
||||
if v, ok := m.Load(key); ok {
|
||||
return v, !computeOnly
|
||||
}
|
||||
}
|
||||
// Write path.
|
||||
for {
|
||||
compute_attempt:
|
||||
var (
|
||||
emptyb *bucketPadded
|
||||
emptyidx int
|
||||
hintNonEmpty int
|
||||
)
|
||||
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||
tableLen := len(table.buckets)
|
||||
hash := hashString(key, table.seed)
|
||||
bidx := uint64(len(table.buckets)-1) & hash
|
||||
rootb := &table.buckets[bidx]
|
||||
lockBucket(&rootb.topHashMutex)
|
||||
// The following two checks must go in reverse to what's
|
||||
// in the resize method.
|
||||
if m.resizeInProgress() {
|
||||
// Resize is in progress. Wait, then go for another attempt.
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
m.waitForResize()
|
||||
goto compute_attempt
|
||||
}
|
||||
if m.newerTableExists(table) {
|
||||
// Someone resized the table. Go for another attempt.
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
goto compute_attempt
|
||||
}
|
||||
b := rootb
|
||||
for {
|
||||
topHashes := atomic.LoadUint64(&b.topHashMutex)
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.keys[i] == nil {
|
||||
if emptyb == nil {
|
||||
emptyb = b
|
||||
emptyidx = i
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !topHashMatch(hash, topHashes, i) {
|
||||
hintNonEmpty++
|
||||
continue
|
||||
}
|
||||
if key == derefKey(b.keys[i]) {
|
||||
vp := b.values[i]
|
||||
if loadIfExists {
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
return derefValue(vp), !computeOnly
|
||||
}
|
||||
// In-place update/delete.
|
||||
// We get a copy of the value via an interface{} on each call,
|
||||
// thus the live value pointers are unique. Otherwise atomic
|
||||
// snapshot won't be correct in case of multiple Store calls
|
||||
// using the same value.
|
||||
oldValue := derefValue(vp)
|
||||
newValue, del := valueFn(oldValue, true)
|
||||
if del {
|
||||
// Deletion.
|
||||
// First we update the value, then the key.
|
||||
// This is important for atomic snapshot states.
|
||||
atomic.StoreUint64(&b.topHashMutex, eraseTopHash(topHashes, i))
|
||||
atomic.StorePointer(&b.values[i], nil)
|
||||
atomic.StorePointer(&b.keys[i], nil)
|
||||
leftEmpty := false
|
||||
if hintNonEmpty == 0 {
|
||||
leftEmpty = isEmptyBucket(b)
|
||||
}
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
table.addSize(bidx, -1)
|
||||
// Might need to shrink the table.
|
||||
if leftEmpty {
|
||||
m.resize(table, mapShrinkHint)
|
||||
}
|
||||
return oldValue, !computeOnly
|
||||
}
|
||||
nvp := unsafe.Pointer(&newValue)
|
||||
if assertionsEnabled && vp == nvp {
|
||||
panic("non-unique value pointer")
|
||||
}
|
||||
atomic.StorePointer(&b.values[i], nvp)
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
if computeOnly {
|
||||
// Compute expects the new value to be returned.
|
||||
return newValue, true
|
||||
}
|
||||
// LoadAndStore expects the old value to be returned.
|
||||
return oldValue, true
|
||||
}
|
||||
hintNonEmpty++
|
||||
}
|
||||
if b.next == nil {
|
||||
if emptyb != nil {
|
||||
// Insertion into an existing bucket.
|
||||
var zeroV interface{}
|
||||
newValue, del := valueFn(zeroV, false)
|
||||
if del {
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
return zeroV, false
|
||||
}
|
||||
// First we update the value, then the key.
|
||||
// This is important for atomic snapshot states.
|
||||
topHashes = atomic.LoadUint64(&emptyb.topHashMutex)
|
||||
atomic.StoreUint64(&emptyb.topHashMutex, storeTopHash(hash, topHashes, emptyidx))
|
||||
atomic.StorePointer(&emptyb.values[emptyidx], unsafe.Pointer(&newValue))
|
||||
atomic.StorePointer(&emptyb.keys[emptyidx], unsafe.Pointer(&key))
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
table.addSize(bidx, 1)
|
||||
return newValue, computeOnly
|
||||
}
|
||||
growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor
|
||||
if table.sumSize() > int64(growThreshold) {
|
||||
// Need to grow the table. Then go for another attempt.
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
m.resize(table, mapGrowHint)
|
||||
goto compute_attempt
|
||||
}
|
||||
// Insertion into a new bucket.
|
||||
var zeroV interface{}
|
||||
newValue, del := valueFn(zeroV, false)
|
||||
if del {
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
return newValue, false
|
||||
}
|
||||
// Create and append a bucket.
|
||||
newb := new(bucketPadded)
|
||||
newb.keys[0] = unsafe.Pointer(&key)
|
||||
newb.values[0] = unsafe.Pointer(&newValue)
|
||||
newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0)
|
||||
atomic.StorePointer(&b.next, unsafe.Pointer(newb))
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
table.addSize(bidx, 1)
|
||||
return newValue, computeOnly
|
||||
}
|
||||
b = (*bucketPadded)(b.next)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Map) newerTableExists(table *mapTable) bool {
|
||||
curTablePtr := atomic.LoadPointer(&m.table)
|
||||
return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
|
||||
}
|
||||
|
||||
func (m *Map) resizeInProgress() bool {
|
||||
return atomic.LoadInt64(&m.resizing) == 1
|
||||
}
|
||||
|
||||
func (m *Map) waitForResize() {
|
||||
m.resizeMu.Lock()
|
||||
for m.resizeInProgress() {
|
||||
m.resizeCond.Wait()
|
||||
}
|
||||
m.resizeMu.Unlock()
|
||||
}
|
||||
|
||||
func (m *Map) resize(knownTable *mapTable, hint mapResizeHint) {
|
||||
knownTableLen := len(knownTable.buckets)
|
||||
// Fast path for shrink attempts.
|
||||
if hint == mapShrinkHint {
|
||||
if m.growOnly ||
|
||||
m.minTableLen == knownTableLen ||
|
||||
knownTable.sumSize() > int64((knownTableLen*entriesPerMapBucket)/mapShrinkFraction) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Slow path.
|
||||
if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
|
||||
// Someone else started resize. Wait for it to finish.
|
||||
m.waitForResize()
|
||||
return
|
||||
}
|
||||
var newTable *mapTable
|
||||
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||
tableLen := len(table.buckets)
|
||||
switch hint {
|
||||
case mapGrowHint:
|
||||
// Grow the table with factor of 2.
|
||||
atomic.AddInt64(&m.totalGrowths, 1)
|
||||
newTable = newMapTable(tableLen << 1)
|
||||
case mapShrinkHint:
|
||||
shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
|
||||
if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
|
||||
// Shrink the table with factor of 2.
|
||||
atomic.AddInt64(&m.totalShrinks, 1)
|
||||
newTable = newMapTable(tableLen >> 1)
|
||||
} else {
|
||||
// No need to shrink. Wake up all waiters and give up.
|
||||
m.resizeMu.Lock()
|
||||
atomic.StoreInt64(&m.resizing, 0)
|
||||
m.resizeCond.Broadcast()
|
||||
m.resizeMu.Unlock()
|
||||
return
|
||||
}
|
||||
case mapClearHint:
|
||||
newTable = newMapTable(m.minTableLen)
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected resize hint: %d", hint))
|
||||
}
|
||||
// Copy the data only if we're not clearing the map.
|
||||
if hint != mapClearHint {
|
||||
for i := 0; i < tableLen; i++ {
|
||||
copied := copyBucket(&table.buckets[i], newTable)
|
||||
newTable.addSizePlain(uint64(i), copied)
|
||||
}
|
||||
}
|
||||
// Publish the new table and wake up all waiters.
|
||||
atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
|
||||
m.resizeMu.Lock()
|
||||
atomic.StoreInt64(&m.resizing, 0)
|
||||
m.resizeCond.Broadcast()
|
||||
m.resizeMu.Unlock()
|
||||
}
|
||||
|
||||
func copyBucket(b *bucketPadded, destTable *mapTable) (copied int) {
|
||||
rootb := b
|
||||
lockBucket(&rootb.topHashMutex)
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.keys[i] != nil {
|
||||
k := derefKey(b.keys[i])
|
||||
hash := hashString(k, destTable.seed)
|
||||
bidx := uint64(len(destTable.buckets)-1) & hash
|
||||
destb := &destTable.buckets[bidx]
|
||||
appendToBucket(hash, b.keys[i], b.values[i], destb)
|
||||
copied++
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
return
|
||||
}
|
||||
b = (*bucketPadded)(b.next)
|
||||
}
|
||||
}
|
||||
|
||||
func appendToBucket(hash uint64, keyPtr, valPtr unsafe.Pointer, b *bucketPadded) {
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.keys[i] == nil {
|
||||
b.keys[i] = keyPtr
|
||||
b.values[i] = valPtr
|
||||
b.topHashMutex = storeTopHash(hash, b.topHashMutex, i)
|
||||
return
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
newb := new(bucketPadded)
|
||||
newb.keys[0] = keyPtr
|
||||
newb.values[0] = valPtr
|
||||
newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0)
|
||||
b.next = unsafe.Pointer(newb)
|
||||
return
|
||||
}
|
||||
b = (*bucketPadded)(b.next)
|
||||
}
|
||||
}
|
||||
|
||||
func isEmptyBucket(rootb *bucketPadded) bool {
|
||||
b := rootb
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.keys[i] != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
return true
|
||||
}
|
||||
b = (*bucketPadded)(b.next)
|
||||
}
|
||||
}
|
||||
|
||||
// Range calls f sequentially for each key and value present in the
|
||||
// map. If f returns false, range stops the iteration.
|
||||
//
|
||||
// Range does not necessarily correspond to any consistent snapshot
|
||||
// of the Map's contents: no key will be visited more than once, but
|
||||
// if the value for any key is stored or deleted concurrently, Range
|
||||
// may reflect any mapping for that key from any point during the
|
||||
// Range call.
|
||||
//
|
||||
// It is safe to modify the map while iterating it, including entry
|
||||
// creation, modification and deletion. However, the concurrent
|
||||
// modification rule apply, i.e. the changes may be not reflected
|
||||
// in the subsequently iterated entries.
|
||||
func (m *Map) Range(f func(key string, value interface{}) bool) {
|
||||
var zeroEntry rangeEntry
|
||||
// Pre-allocate array big enough to fit entries for most hash tables.
|
||||
bentries := make([]rangeEntry, 0, 16*entriesPerMapBucket)
|
||||
tablep := atomic.LoadPointer(&m.table)
|
||||
table := *(*mapTable)(tablep)
|
||||
for i := range table.buckets {
|
||||
rootb := &table.buckets[i]
|
||||
b := rootb
|
||||
// Prevent concurrent modifications and copy all entries into
|
||||
// the intermediate slice.
|
||||
lockBucket(&rootb.topHashMutex)
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.keys[i] != nil {
|
||||
bentries = append(bentries, rangeEntry{
|
||||
key: b.keys[i],
|
||||
value: b.values[i],
|
||||
})
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
break
|
||||
}
|
||||
b = (*bucketPadded)(b.next)
|
||||
}
|
||||
// Call the function for all copied entries.
|
||||
for j := range bentries {
|
||||
k := derefKey(bentries[j].key)
|
||||
v := derefValue(bentries[j].value)
|
||||
if !f(k, v) {
|
||||
return
|
||||
}
|
||||
// Remove the reference to avoid preventing the copied
|
||||
// entries from being GCed until this method finishes.
|
||||
bentries[j] = zeroEntry
|
||||
}
|
||||
bentries = bentries[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// Clear deletes all keys and values currently stored in the map.
|
||||
func (m *Map) Clear() {
|
||||
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||
m.resize(table, mapClearHint)
|
||||
}
|
||||
|
||||
// Size returns current size of the map.
|
||||
func (m *Map) Size() int {
|
||||
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||
return int(table.sumSize())
|
||||
}
|
||||
|
||||
func derefKey(keyPtr unsafe.Pointer) string {
|
||||
return *(*string)(keyPtr)
|
||||
}
|
||||
|
||||
func derefValue(valuePtr unsafe.Pointer) interface{} {
|
||||
return *(*interface{})(valuePtr)
|
||||
}
|
||||
|
||||
func lockBucket(mu *uint64) {
|
||||
for {
|
||||
var v uint64
|
||||
for {
|
||||
v = atomic.LoadUint64(mu)
|
||||
if v&1 != 1 {
|
||||
break
|
||||
}
|
||||
runtime.Gosched()
|
||||
}
|
||||
if atomic.CompareAndSwapUint64(mu, v, v|1) {
|
||||
return
|
||||
}
|
||||
runtime.Gosched()
|
||||
}
|
||||
}
|
||||
|
||||
func unlockBucket(mu *uint64) {
|
||||
v := atomic.LoadUint64(mu)
|
||||
atomic.StoreUint64(mu, v&^1)
|
||||
}
|
||||
|
||||
func topHashMatch(hash, topHashes uint64, idx int) bool {
|
||||
if topHashes&(1<<(idx+1)) == 0 {
|
||||
// Entry is not present.
|
||||
return false
|
||||
}
|
||||
hash = hash & topHashMask
|
||||
topHashes = (topHashes & topHashEntryMasks[idx]) << (20 * idx)
|
||||
return hash == topHashes
|
||||
}
|
||||
|
||||
func storeTopHash(hash, topHashes uint64, idx int) uint64 {
|
||||
// Zero out top hash at idx.
|
||||
topHashes = topHashes &^ topHashEntryMasks[idx]
|
||||
// Chop top 20 MSBs of the given hash and position them at idx.
|
||||
hash = (hash & topHashMask) >> (20 * idx)
|
||||
// Store the MSBs.
|
||||
topHashes = topHashes | hash
|
||||
// Mark the entry as present.
|
||||
return topHashes | (1 << (idx + 1))
|
||||
}
|
||||
|
||||
func eraseTopHash(topHashes uint64, idx int) uint64 {
|
||||
return topHashes &^ (1 << (idx + 1))
|
||||
}
|
||||
|
||||
func (table *mapTable) addSize(bucketIdx uint64, delta int) {
|
||||
cidx := uint64(len(table.size)-1) & bucketIdx
|
||||
atomic.AddInt64(&table.size[cidx].c, int64(delta))
|
||||
}
|
||||
|
||||
func (table *mapTable) addSizePlain(bucketIdx uint64, delta int) {
|
||||
cidx := uint64(len(table.size)-1) & bucketIdx
|
||||
table.size[cidx].c += int64(delta)
|
||||
}
|
||||
|
||||
func (table *mapTable) sumSize() int64 {
|
||||
sum := int64(0)
|
||||
for i := range table.size {
|
||||
sum += atomic.LoadInt64(&table.size[i].c)
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
// MapStats is Map/MapOf statistics.
|
||||
//
|
||||
// Warning: map statistics are intented to be used for diagnostic
|
||||
// purposes, not for production code. This means that breaking changes
|
||||
// may be introduced into this struct even between minor releases.
|
||||
type MapStats struct {
|
||||
// RootBuckets is the number of root buckets in the hash table.
|
||||
// Each bucket holds a few entries.
|
||||
RootBuckets int
|
||||
// TotalBuckets is the total number of buckets in the hash table,
|
||||
// including root and their chained buckets. Each bucket holds
|
||||
// a few entries.
|
||||
TotalBuckets int
|
||||
// EmptyBuckets is the number of buckets that hold no entries.
|
||||
EmptyBuckets int
|
||||
// Capacity is the Map/MapOf capacity, i.e. the total number of
|
||||
// entries that all buckets can physically hold. This number
|
||||
// does not consider the load factor.
|
||||
Capacity int
|
||||
// Size is the exact number of entries stored in the map.
|
||||
Size int
|
||||
// Counter is the number of entries stored in the map according
|
||||
// to the internal atomic counter. In case of concurrent map
|
||||
// modifications this number may be different from Size.
|
||||
Counter int
|
||||
// CounterLen is the number of internal atomic counter stripes.
|
||||
// This number may grow with the map capacity to improve
|
||||
// multithreaded scalability.
|
||||
CounterLen int
|
||||
// MinEntries is the minimum number of entries per a chain of
|
||||
// buckets, i.e. a root bucket and its chained buckets.
|
||||
MinEntries int
|
||||
// MinEntries is the maximum number of entries per a chain of
|
||||
// buckets, i.e. a root bucket and its chained buckets.
|
||||
MaxEntries int
|
||||
// TotalGrowths is the number of times the hash table grew.
|
||||
TotalGrowths int64
|
||||
// TotalGrowths is the number of times the hash table shrinked.
|
||||
TotalShrinks int64
|
||||
}
|
||||
|
||||
// ToString returns string representation of map stats.
|
||||
func (s *MapStats) ToString() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("MapStats{\n")
|
||||
sb.WriteString(fmt.Sprintf("RootBuckets: %d\n", s.RootBuckets))
|
||||
sb.WriteString(fmt.Sprintf("TotalBuckets: %d\n", s.TotalBuckets))
|
||||
sb.WriteString(fmt.Sprintf("EmptyBuckets: %d\n", s.EmptyBuckets))
|
||||
sb.WriteString(fmt.Sprintf("Capacity: %d\n", s.Capacity))
|
||||
sb.WriteString(fmt.Sprintf("Size: %d\n", s.Size))
|
||||
sb.WriteString(fmt.Sprintf("Counter: %d\n", s.Counter))
|
||||
sb.WriteString(fmt.Sprintf("CounterLen: %d\n", s.CounterLen))
|
||||
sb.WriteString(fmt.Sprintf("MinEntries: %d\n", s.MinEntries))
|
||||
sb.WriteString(fmt.Sprintf("MaxEntries: %d\n", s.MaxEntries))
|
||||
sb.WriteString(fmt.Sprintf("TotalGrowths: %d\n", s.TotalGrowths))
|
||||
sb.WriteString(fmt.Sprintf("TotalShrinks: %d\n", s.TotalShrinks))
|
||||
sb.WriteString("}\n")
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// Stats returns statistics for the Map. Just like other map
|
||||
// methods, this one is thread-safe. Yet it's an O(N) operation,
|
||||
// so it should be used only for diagnostics or debugging purposes.
|
||||
func (m *Map) Stats() MapStats {
|
||||
stats := MapStats{
|
||||
TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
|
||||
TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
|
||||
MinEntries: math.MaxInt32,
|
||||
}
|
||||
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||
stats.RootBuckets = len(table.buckets)
|
||||
stats.Counter = int(table.sumSize())
|
||||
stats.CounterLen = len(table.size)
|
||||
for i := range table.buckets {
|
||||
nentries := 0
|
||||
b := &table.buckets[i]
|
||||
stats.TotalBuckets++
|
||||
for {
|
||||
nentriesLocal := 0
|
||||
stats.Capacity += entriesPerMapBucket
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if atomic.LoadPointer(&b.keys[i]) != nil {
|
||||
stats.Size++
|
||||
nentriesLocal++
|
||||
}
|
||||
}
|
||||
nentries += nentriesLocal
|
||||
if nentriesLocal == 0 {
|
||||
stats.EmptyBuckets++
|
||||
}
|
||||
if b.next == nil {
|
||||
break
|
||||
}
|
||||
b = (*bucketPadded)(atomic.LoadPointer(&b.next))
|
||||
stats.TotalBuckets++
|
||||
}
|
||||
if nentries < stats.MinEntries {
|
||||
stats.MinEntries = nentries
|
||||
}
|
||||
if nentries > stats.MaxEntries {
|
||||
stats.MaxEntries = nentries
|
||||
}
|
||||
}
|
||||
return stats
|
||||
}
|
||||
738
vendor/github.com/puzpuzpuz/xsync/v3/mapof.go
generated
vendored
Normal file
738
vendor/github.com/puzpuzpuz/xsync/v3/mapof.go
generated
vendored
Normal file
@@ -0,0 +1,738 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// number of MapOf entries per bucket; 5 entries lead to size of 64B
|
||||
// (one cache line) on 64-bit machines
|
||||
entriesPerMapOfBucket = 5
|
||||
defaultMeta uint64 = 0x8080808080808080
|
||||
metaMask uint64 = 0xffffffffff
|
||||
defaultMetaMasked uint64 = defaultMeta & metaMask
|
||||
emptyMetaSlot uint8 = 0x80
|
||||
)
|
||||
|
||||
// MapOf is like a Go map[K]V but is safe for concurrent
|
||||
// use by multiple goroutines without additional locking or
|
||||
// coordination. It follows the interface of sync.Map with
|
||||
// a number of valuable extensions like Compute or Size.
|
||||
//
|
||||
// A MapOf must not be copied after first use.
|
||||
//
|
||||
// MapOf uses a modified version of Cache-Line Hash Table (CLHT)
|
||||
// data structure: https://github.com/LPD-EPFL/CLHT
|
||||
//
|
||||
// CLHT is built around idea to organize the hash table in
|
||||
// cache-line-sized buckets, so that on all modern CPUs update
|
||||
// operations complete with at most one cache-line transfer.
|
||||
// Also, Get operations involve no write to memory, as well as no
|
||||
// mutexes or any other sort of locks. Due to this design, in all
|
||||
// considered scenarios MapOf outperforms sync.Map.
|
||||
//
|
||||
// MapOf also borrows ideas from Java's j.u.c.ConcurrentHashMap
|
||||
// (immutable K/V pair structs instead of atomic snapshots)
|
||||
// and C++'s absl::flat_hash_map (meta memory and SWAR-based
|
||||
// lookups).
|
||||
type MapOf[K comparable, V any] struct {
|
||||
totalGrowths int64
|
||||
totalShrinks int64
|
||||
resizing int64 // resize in progress flag; updated atomically
|
||||
resizeMu sync.Mutex // only used along with resizeCond
|
||||
resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications)
|
||||
table unsafe.Pointer // *mapOfTable
|
||||
hasher func(K, uint64) uint64
|
||||
minTableLen int
|
||||
growOnly bool
|
||||
}
|
||||
|
||||
type mapOfTable[K comparable, V any] struct {
|
||||
buckets []bucketOfPadded
|
||||
// striped counter for number of table entries;
|
||||
// used to determine if a table shrinking is needed
|
||||
// occupies min(buckets_memory/1024, 64KB) of memory
|
||||
size []counterStripe
|
||||
seed uint64
|
||||
}
|
||||
|
||||
// bucketOfPadded is a CL-sized map bucket holding up to
|
||||
// entriesPerMapOfBucket entries.
|
||||
type bucketOfPadded struct {
|
||||
//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
|
||||
pad [cacheLineSize - unsafe.Sizeof(bucketOf{})]byte
|
||||
bucketOf
|
||||
}
|
||||
|
||||
type bucketOf struct {
|
||||
meta uint64
|
||||
entries [entriesPerMapOfBucket]unsafe.Pointer // *entryOf
|
||||
next unsafe.Pointer // *bucketOfPadded
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// entryOf is an immutable map entry.
|
||||
type entryOf[K comparable, V any] struct {
|
||||
key K
|
||||
value V
|
||||
}
|
||||
|
||||
// NewMapOf creates a new MapOf instance configured with the given
|
||||
// options.
|
||||
func NewMapOf[K comparable, V any](options ...func(*MapConfig)) *MapOf[K, V] {
|
||||
return NewMapOfWithHasher[K, V](defaultHasher[K](), options...)
|
||||
}
|
||||
|
||||
// NewMapOfWithHasher creates a new MapOf instance configured with
|
||||
// the given hasher and options. The hash function is used instead
|
||||
// of the built-in hash function configured when a map is created
|
||||
// with the NewMapOf function.
|
||||
func NewMapOfWithHasher[K comparable, V any](
|
||||
hasher func(K, uint64) uint64,
|
||||
options ...func(*MapConfig),
|
||||
) *MapOf[K, V] {
|
||||
c := &MapConfig{
|
||||
sizeHint: defaultMinMapTableLen * entriesPerMapOfBucket,
|
||||
}
|
||||
for _, o := range options {
|
||||
o(c)
|
||||
}
|
||||
|
||||
m := &MapOf[K, V]{}
|
||||
m.resizeCond = *sync.NewCond(&m.resizeMu)
|
||||
m.hasher = hasher
|
||||
var table *mapOfTable[K, V]
|
||||
if c.sizeHint <= defaultMinMapTableLen*entriesPerMapOfBucket {
|
||||
table = newMapOfTable[K, V](defaultMinMapTableLen)
|
||||
} else {
|
||||
tableLen := nextPowOf2(uint32((float64(c.sizeHint) / entriesPerMapOfBucket) / mapLoadFactor))
|
||||
table = newMapOfTable[K, V](int(tableLen))
|
||||
}
|
||||
m.minTableLen = len(table.buckets)
|
||||
m.growOnly = c.growOnly
|
||||
atomic.StorePointer(&m.table, unsafe.Pointer(table))
|
||||
return m
|
||||
}
|
||||
|
||||
// NewMapOfPresized creates a new MapOf instance with capacity enough
|
||||
// to hold sizeHint entries. The capacity is treated as the minimal capacity
|
||||
// meaning that the underlying hash table will never shrink to
|
||||
// a smaller capacity. If sizeHint is zero or negative, the value
|
||||
// is ignored.
|
||||
//
|
||||
// Deprecated: use NewMapOf in combination with WithPresize.
|
||||
func NewMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] {
|
||||
return NewMapOf[K, V](WithPresize(sizeHint))
|
||||
}
|
||||
|
||||
func newMapOfTable[K comparable, V any](minTableLen int) *mapOfTable[K, V] {
|
||||
buckets := make([]bucketOfPadded, minTableLen)
|
||||
for i := range buckets {
|
||||
buckets[i].meta = defaultMeta
|
||||
}
|
||||
counterLen := minTableLen >> 10
|
||||
if counterLen < minMapCounterLen {
|
||||
counterLen = minMapCounterLen
|
||||
} else if counterLen > maxMapCounterLen {
|
||||
counterLen = maxMapCounterLen
|
||||
}
|
||||
counter := make([]counterStripe, counterLen)
|
||||
t := &mapOfTable[K, V]{
|
||||
buckets: buckets,
|
||||
size: counter,
|
||||
seed: makeSeed(),
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ToPlainMapOf returns a native map with a copy of xsync Map's
|
||||
// contents. The copied xsync Map should not be modified while
|
||||
// this call is made. If the copied Map is modified, the copying
|
||||
// behavior is the same as in the Range method.
|
||||
func ToPlainMapOf[K comparable, V any](m *MapOf[K, V]) map[K]V {
|
||||
pm := make(map[K]V)
|
||||
if m != nil {
|
||||
m.Range(func(key K, value V) bool {
|
||||
pm[key] = value
|
||||
return true
|
||||
})
|
||||
}
|
||||
return pm
|
||||
}
|
||||
|
||||
// Load returns the value stored in the map for a key, or zero value
|
||||
// of type V if no value is present.
|
||||
// The ok result indicates whether value was found in the map.
|
||||
func (m *MapOf[K, V]) Load(key K) (value V, ok bool) {
|
||||
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||
hash := m.hasher(key, table.seed)
|
||||
h1 := h1(hash)
|
||||
h2w := broadcast(h2(hash))
|
||||
bidx := uint64(len(table.buckets)-1) & h1
|
||||
b := &table.buckets[bidx]
|
||||
for {
|
||||
metaw := atomic.LoadUint64(&b.meta)
|
||||
markedw := markZeroBytes(metaw^h2w) & metaMask
|
||||
for markedw != 0 {
|
||||
idx := firstMarkedByteIndex(markedw)
|
||||
eptr := atomic.LoadPointer(&b.entries[idx])
|
||||
if eptr != nil {
|
||||
e := (*entryOf[K, V])(eptr)
|
||||
if e.key == key {
|
||||
return e.value, true
|
||||
}
|
||||
}
|
||||
markedw &= markedw - 1
|
||||
}
|
||||
bptr := atomic.LoadPointer(&b.next)
|
||||
if bptr == nil {
|
||||
return
|
||||
}
|
||||
b = (*bucketOfPadded)(bptr)
|
||||
}
|
||||
}
|
||||
|
||||
// Store sets the value for a key.
|
||||
func (m *MapOf[K, V]) Store(key K, value V) {
|
||||
m.doCompute(
|
||||
key,
|
||||
func(V, bool) (V, bool) {
|
||||
return value, false
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadOrStore returns the existing value for the key if present.
|
||||
// Otherwise, it stores and returns the given value.
|
||||
// The loaded result is true if the value was loaded, false if stored.
|
||||
func (m *MapOf[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(V, bool) (V, bool) {
|
||||
return value, false
|
||||
},
|
||||
true,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadAndStore returns the existing value for the key if present,
|
||||
// while setting the new value for the key.
|
||||
// It stores the new value and returns the existing one, if present.
|
||||
// The loaded result is true if the existing value was loaded,
|
||||
// false otherwise.
|
||||
func (m *MapOf[K, V]) LoadAndStore(key K, value V) (actual V, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(V, bool) (V, bool) {
|
||||
return value, false
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadOrCompute returns the existing value for the key if present.
|
||||
// Otherwise, it computes the value using the provided function, and
|
||||
// then stores and returns the computed value. The loaded result is
|
||||
// true if the value was loaded, false if computed.
|
||||
//
|
||||
// This call locks a hash table bucket while the compute function
|
||||
// is executed. It means that modifications on other entries in
|
||||
// the bucket will be blocked until the valueFn executes. Consider
|
||||
// this when the function includes long-running operations.
|
||||
func (m *MapOf[K, V]) LoadOrCompute(key K, valueFn func() V) (actual V, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(V, bool) (V, bool) {
|
||||
return valueFn(), false
|
||||
},
|
||||
true,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadOrTryCompute returns the existing value for the key if present.
|
||||
// Otherwise, it tries to compute the value using the provided function
|
||||
// and, if successful, stores and returns the computed value. The loaded
|
||||
// result is true if the value was loaded, or false if computed (whether
|
||||
// successfully or not). If the compute attempt was cancelled (due to an
|
||||
// error, for example), a zero value of type V will be returned.
|
||||
//
|
||||
// This call locks a hash table bucket while the compute function
|
||||
// is executed. It means that modifications on other entries in
|
||||
// the bucket will be blocked until the valueFn executes. Consider
|
||||
// this when the function includes long-running operations.
|
||||
func (m *MapOf[K, V]) LoadOrTryCompute(
|
||||
key K,
|
||||
valueFn func() (newValue V, cancel bool),
|
||||
) (value V, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(V, bool) (V, bool) {
|
||||
nv, c := valueFn()
|
||||
if !c {
|
||||
return nv, false
|
||||
}
|
||||
return nv, true // nv is ignored
|
||||
},
|
||||
true,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// Compute either sets the computed new value for the key or deletes
|
||||
// the value for the key. When the delete result of the valueFn function
|
||||
// is set to true, the value will be deleted, if it exists. When delete
|
||||
// is set to false, the value is updated to the newValue.
|
||||
// The ok result indicates whether value was computed and stored, thus, is
|
||||
// present in the map. The actual result contains the new value in cases where
|
||||
// the value was computed and stored. See the example for a few use cases.
|
||||
//
|
||||
// This call locks a hash table bucket while the compute function
|
||||
// is executed. It means that modifications on other entries in
|
||||
// the bucket will be blocked until the valueFn executes. Consider
|
||||
// this when the function includes long-running operations.
|
||||
func (m *MapOf[K, V]) Compute(
|
||||
key K,
|
||||
valueFn func(oldValue V, loaded bool) (newValue V, delete bool),
|
||||
) (actual V, ok bool) {
|
||||
return m.doCompute(key, valueFn, false, true)
|
||||
}
|
||||
|
||||
// LoadAndDelete deletes the value for a key, returning the previous
|
||||
// value if any. The loaded result reports whether the key was
|
||||
// present.
|
||||
func (m *MapOf[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(value V, loaded bool) (V, bool) {
|
||||
return value, true
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// Delete deletes the value for a key.
|
||||
func (m *MapOf[K, V]) Delete(key K) {
|
||||
m.doCompute(
|
||||
key,
|
||||
func(value V, loaded bool) (V, bool) {
|
||||
return value, true
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
func (m *MapOf[K, V]) doCompute(
|
||||
key K,
|
||||
valueFn func(oldValue V, loaded bool) (V, bool),
|
||||
loadIfExists, computeOnly bool,
|
||||
) (V, bool) {
|
||||
// Read-only path.
|
||||
if loadIfExists {
|
||||
if v, ok := m.Load(key); ok {
|
||||
return v, !computeOnly
|
||||
}
|
||||
}
|
||||
// Write path.
|
||||
for {
|
||||
compute_attempt:
|
||||
var (
|
||||
emptyb *bucketOfPadded
|
||||
emptyidx int
|
||||
)
|
||||
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||
tableLen := len(table.buckets)
|
||||
hash := m.hasher(key, table.seed)
|
||||
h1 := h1(hash)
|
||||
h2 := h2(hash)
|
||||
h2w := broadcast(h2)
|
||||
bidx := uint64(len(table.buckets)-1) & h1
|
||||
rootb := &table.buckets[bidx]
|
||||
rootb.mu.Lock()
|
||||
// The following two checks must go in reverse to what's
|
||||
// in the resize method.
|
||||
if m.resizeInProgress() {
|
||||
// Resize is in progress. Wait, then go for another attempt.
|
||||
rootb.mu.Unlock()
|
||||
m.waitForResize()
|
||||
goto compute_attempt
|
||||
}
|
||||
if m.newerTableExists(table) {
|
||||
// Someone resized the table. Go for another attempt.
|
||||
rootb.mu.Unlock()
|
||||
goto compute_attempt
|
||||
}
|
||||
b := rootb
|
||||
for {
|
||||
metaw := b.meta
|
||||
markedw := markZeroBytes(metaw^h2w) & metaMask
|
||||
for markedw != 0 {
|
||||
idx := firstMarkedByteIndex(markedw)
|
||||
eptr := b.entries[idx]
|
||||
if eptr != nil {
|
||||
e := (*entryOf[K, V])(eptr)
|
||||
if e.key == key {
|
||||
if loadIfExists {
|
||||
rootb.mu.Unlock()
|
||||
return e.value, !computeOnly
|
||||
}
|
||||
// In-place update/delete.
|
||||
// We get a copy of the value via an interface{} on each call,
|
||||
// thus the live value pointers are unique. Otherwise atomic
|
||||
// snapshot won't be correct in case of multiple Store calls
|
||||
// using the same value.
|
||||
oldv := e.value
|
||||
newv, del := valueFn(oldv, true)
|
||||
if del {
|
||||
// Deletion.
|
||||
// First we update the hash, then the entry.
|
||||
newmetaw := setByte(metaw, emptyMetaSlot, idx)
|
||||
atomic.StoreUint64(&b.meta, newmetaw)
|
||||
atomic.StorePointer(&b.entries[idx], nil)
|
||||
rootb.mu.Unlock()
|
||||
table.addSize(bidx, -1)
|
||||
// Might need to shrink the table if we left bucket empty.
|
||||
if newmetaw == defaultMeta {
|
||||
m.resize(table, mapShrinkHint)
|
||||
}
|
||||
return oldv, !computeOnly
|
||||
}
|
||||
newe := new(entryOf[K, V])
|
||||
newe.key = key
|
||||
newe.value = newv
|
||||
atomic.StorePointer(&b.entries[idx], unsafe.Pointer(newe))
|
||||
rootb.mu.Unlock()
|
||||
if computeOnly {
|
||||
// Compute expects the new value to be returned.
|
||||
return newv, true
|
||||
}
|
||||
// LoadAndStore expects the old value to be returned.
|
||||
return oldv, true
|
||||
}
|
||||
}
|
||||
markedw &= markedw - 1
|
||||
}
|
||||
if emptyb == nil {
|
||||
// Search for empty entries (up to 5 per bucket).
|
||||
emptyw := metaw & defaultMetaMasked
|
||||
if emptyw != 0 {
|
||||
idx := firstMarkedByteIndex(emptyw)
|
||||
emptyb = b
|
||||
emptyidx = idx
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
if emptyb != nil {
|
||||
// Insertion into an existing bucket.
|
||||
var zeroV V
|
||||
newValue, del := valueFn(zeroV, false)
|
||||
if del {
|
||||
rootb.mu.Unlock()
|
||||
return zeroV, false
|
||||
}
|
||||
newe := new(entryOf[K, V])
|
||||
newe.key = key
|
||||
newe.value = newValue
|
||||
// First we update meta, then the entry.
|
||||
atomic.StoreUint64(&emptyb.meta, setByte(emptyb.meta, h2, emptyidx))
|
||||
atomic.StorePointer(&emptyb.entries[emptyidx], unsafe.Pointer(newe))
|
||||
rootb.mu.Unlock()
|
||||
table.addSize(bidx, 1)
|
||||
return newValue, computeOnly
|
||||
}
|
||||
growThreshold := float64(tableLen) * entriesPerMapOfBucket * mapLoadFactor
|
||||
if table.sumSize() > int64(growThreshold) {
|
||||
// Need to grow the table. Then go for another attempt.
|
||||
rootb.mu.Unlock()
|
||||
m.resize(table, mapGrowHint)
|
||||
goto compute_attempt
|
||||
}
|
||||
// Insertion into a new bucket.
|
||||
var zeroV V
|
||||
newValue, del := valueFn(zeroV, false)
|
||||
if del {
|
||||
rootb.mu.Unlock()
|
||||
return newValue, false
|
||||
}
|
||||
// Create and append a bucket.
|
||||
newb := new(bucketOfPadded)
|
||||
newb.meta = setByte(defaultMeta, h2, 0)
|
||||
newe := new(entryOf[K, V])
|
||||
newe.key = key
|
||||
newe.value = newValue
|
||||
newb.entries[0] = unsafe.Pointer(newe)
|
||||
atomic.StorePointer(&b.next, unsafe.Pointer(newb))
|
||||
rootb.mu.Unlock()
|
||||
table.addSize(bidx, 1)
|
||||
return newValue, computeOnly
|
||||
}
|
||||
b = (*bucketOfPadded)(b.next)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MapOf[K, V]) newerTableExists(table *mapOfTable[K, V]) bool {
|
||||
curTablePtr := atomic.LoadPointer(&m.table)
|
||||
return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
|
||||
}
|
||||
|
||||
func (m *MapOf[K, V]) resizeInProgress() bool {
|
||||
return atomic.LoadInt64(&m.resizing) == 1
|
||||
}
|
||||
|
||||
func (m *MapOf[K, V]) waitForResize() {
|
||||
m.resizeMu.Lock()
|
||||
for m.resizeInProgress() {
|
||||
m.resizeCond.Wait()
|
||||
}
|
||||
m.resizeMu.Unlock()
|
||||
}
|
||||
|
||||
func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) {
|
||||
knownTableLen := len(knownTable.buckets)
|
||||
// Fast path for shrink attempts.
|
||||
if hint == mapShrinkHint {
|
||||
if m.growOnly ||
|
||||
m.minTableLen == knownTableLen ||
|
||||
knownTable.sumSize() > int64((knownTableLen*entriesPerMapOfBucket)/mapShrinkFraction) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Slow path.
|
||||
if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
|
||||
// Someone else started resize. Wait for it to finish.
|
||||
m.waitForResize()
|
||||
return
|
||||
}
|
||||
var newTable *mapOfTable[K, V]
|
||||
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||
tableLen := len(table.buckets)
|
||||
switch hint {
|
||||
case mapGrowHint:
|
||||
// Grow the table with factor of 2.
|
||||
atomic.AddInt64(&m.totalGrowths, 1)
|
||||
newTable = newMapOfTable[K, V](tableLen << 1)
|
||||
case mapShrinkHint:
|
||||
shrinkThreshold := int64((tableLen * entriesPerMapOfBucket) / mapShrinkFraction)
|
||||
if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
|
||||
// Shrink the table with factor of 2.
|
||||
atomic.AddInt64(&m.totalShrinks, 1)
|
||||
newTable = newMapOfTable[K, V](tableLen >> 1)
|
||||
} else {
|
||||
// No need to shrink. Wake up all waiters and give up.
|
||||
m.resizeMu.Lock()
|
||||
atomic.StoreInt64(&m.resizing, 0)
|
||||
m.resizeCond.Broadcast()
|
||||
m.resizeMu.Unlock()
|
||||
return
|
||||
}
|
||||
case mapClearHint:
|
||||
newTable = newMapOfTable[K, V](m.minTableLen)
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected resize hint: %d", hint))
|
||||
}
|
||||
// Copy the data only if we're not clearing the map.
|
||||
if hint != mapClearHint {
|
||||
for i := 0; i < tableLen; i++ {
|
||||
copied := copyBucketOf(&table.buckets[i], newTable, m.hasher)
|
||||
newTable.addSizePlain(uint64(i), copied)
|
||||
}
|
||||
}
|
||||
// Publish the new table and wake up all waiters.
|
||||
atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
|
||||
m.resizeMu.Lock()
|
||||
atomic.StoreInt64(&m.resizing, 0)
|
||||
m.resizeCond.Broadcast()
|
||||
m.resizeMu.Unlock()
|
||||
}
|
||||
|
||||
func copyBucketOf[K comparable, V any](
|
||||
b *bucketOfPadded,
|
||||
destTable *mapOfTable[K, V],
|
||||
hasher func(K, uint64) uint64,
|
||||
) (copied int) {
|
||||
rootb := b
|
||||
rootb.mu.Lock()
|
||||
for {
|
||||
for i := 0; i < entriesPerMapOfBucket; i++ {
|
||||
if b.entries[i] != nil {
|
||||
e := (*entryOf[K, V])(b.entries[i])
|
||||
hash := hasher(e.key, destTable.seed)
|
||||
bidx := uint64(len(destTable.buckets)-1) & h1(hash)
|
||||
destb := &destTable.buckets[bidx]
|
||||
appendToBucketOf(h2(hash), b.entries[i], destb)
|
||||
copied++
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
rootb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
b = (*bucketOfPadded)(b.next)
|
||||
}
|
||||
}
|
||||
|
||||
// Range calls f sequentially for each key and value present in the
|
||||
// map. If f returns false, range stops the iteration.
|
||||
//
|
||||
// Range does not necessarily correspond to any consistent snapshot
|
||||
// of the Map's contents: no key will be visited more than once, but
|
||||
// if the value for any key is stored or deleted concurrently, Range
|
||||
// may reflect any mapping for that key from any point during the
|
||||
// Range call.
|
||||
//
|
||||
// It is safe to modify the map while iterating it, including entry
|
||||
// creation, modification and deletion. However, the concurrent
|
||||
// modification rule apply, i.e. the changes may be not reflected
|
||||
// in the subsequently iterated entries.
|
||||
func (m *MapOf[K, V]) Range(f func(key K, value V) bool) {
|
||||
var zeroPtr unsafe.Pointer
|
||||
// Pre-allocate array big enough to fit entries for most hash tables.
|
||||
bentries := make([]unsafe.Pointer, 0, 16*entriesPerMapOfBucket)
|
||||
tablep := atomic.LoadPointer(&m.table)
|
||||
table := *(*mapOfTable[K, V])(tablep)
|
||||
for i := range table.buckets {
|
||||
rootb := &table.buckets[i]
|
||||
b := rootb
|
||||
// Prevent concurrent modifications and copy all entries into
|
||||
// the intermediate slice.
|
||||
rootb.mu.Lock()
|
||||
for {
|
||||
for i := 0; i < entriesPerMapOfBucket; i++ {
|
||||
if b.entries[i] != nil {
|
||||
bentries = append(bentries, b.entries[i])
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
rootb.mu.Unlock()
|
||||
break
|
||||
}
|
||||
b = (*bucketOfPadded)(b.next)
|
||||
}
|
||||
// Call the function for all copied entries.
|
||||
for j := range bentries {
|
||||
entry := (*entryOf[K, V])(bentries[j])
|
||||
if !f(entry.key, entry.value) {
|
||||
return
|
||||
}
|
||||
// Remove the reference to avoid preventing the copied
|
||||
// entries from being GCed until this method finishes.
|
||||
bentries[j] = zeroPtr
|
||||
}
|
||||
bentries = bentries[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// Clear deletes all keys and values currently stored in the map.
|
||||
func (m *MapOf[K, V]) Clear() {
|
||||
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||
m.resize(table, mapClearHint)
|
||||
}
|
||||
|
||||
// Size returns current size of the map.
|
||||
func (m *MapOf[K, V]) Size() int {
|
||||
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||
return int(table.sumSize())
|
||||
}
|
||||
|
||||
func appendToBucketOf(h2 uint8, entryPtr unsafe.Pointer, b *bucketOfPadded) {
|
||||
for {
|
||||
for i := 0; i < entriesPerMapOfBucket; i++ {
|
||||
if b.entries[i] == nil {
|
||||
b.meta = setByte(b.meta, h2, i)
|
||||
b.entries[i] = entryPtr
|
||||
return
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
newb := new(bucketOfPadded)
|
||||
newb.meta = setByte(defaultMeta, h2, 0)
|
||||
newb.entries[0] = entryPtr
|
||||
b.next = unsafe.Pointer(newb)
|
||||
return
|
||||
}
|
||||
b = (*bucketOfPadded)(b.next)
|
||||
}
|
||||
}
|
||||
|
||||
func (table *mapOfTable[K, V]) addSize(bucketIdx uint64, delta int) {
|
||||
cidx := uint64(len(table.size)-1) & bucketIdx
|
||||
atomic.AddInt64(&table.size[cidx].c, int64(delta))
|
||||
}
|
||||
|
||||
func (table *mapOfTable[K, V]) addSizePlain(bucketIdx uint64, delta int) {
|
||||
cidx := uint64(len(table.size)-1) & bucketIdx
|
||||
table.size[cidx].c += int64(delta)
|
||||
}
|
||||
|
||||
func (table *mapOfTable[K, V]) sumSize() int64 {
|
||||
sum := int64(0)
|
||||
for i := range table.size {
|
||||
sum += atomic.LoadInt64(&table.size[i].c)
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func h1(h uint64) uint64 {
|
||||
return h >> 7
|
||||
}
|
||||
|
||||
func h2(h uint64) uint8 {
|
||||
return uint8(h & 0x7f)
|
||||
}
|
||||
|
||||
// Stats returns statistics for the MapOf. Just like other map
|
||||
// methods, this one is thread-safe. Yet it's an O(N) operation,
|
||||
// so it should be used only for diagnostics or debugging purposes.
|
||||
func (m *MapOf[K, V]) Stats() MapStats {
|
||||
stats := MapStats{
|
||||
TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
|
||||
TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
|
||||
MinEntries: math.MaxInt32,
|
||||
}
|
||||
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||
stats.RootBuckets = len(table.buckets)
|
||||
stats.Counter = int(table.sumSize())
|
||||
stats.CounterLen = len(table.size)
|
||||
for i := range table.buckets {
|
||||
nentries := 0
|
||||
b := &table.buckets[i]
|
||||
stats.TotalBuckets++
|
||||
for {
|
||||
nentriesLocal := 0
|
||||
stats.Capacity += entriesPerMapOfBucket
|
||||
for i := 0; i < entriesPerMapOfBucket; i++ {
|
||||
if atomic.LoadPointer(&b.entries[i]) != nil {
|
||||
stats.Size++
|
||||
nentriesLocal++
|
||||
}
|
||||
}
|
||||
nentries += nentriesLocal
|
||||
if nentriesLocal == 0 {
|
||||
stats.EmptyBuckets++
|
||||
}
|
||||
if b.next == nil {
|
||||
break
|
||||
}
|
||||
b = (*bucketOfPadded)(atomic.LoadPointer(&b.next))
|
||||
stats.TotalBuckets++
|
||||
}
|
||||
if nentries < stats.MinEntries {
|
||||
stats.MinEntries = nentries
|
||||
}
|
||||
if nentries > stats.MaxEntries {
|
||||
stats.MaxEntries = nentries
|
||||
}
|
||||
}
|
||||
return stats
|
||||
}
|
||||
125
vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go
generated
vendored
Normal file
125
vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// A MPMCQueue is a bounded multi-producer multi-consumer concurrent
|
||||
// queue.
|
||||
//
|
||||
// MPMCQueue instances must be created with NewMPMCQueue function.
|
||||
// A MPMCQueue must not be copied after first use.
|
||||
//
|
||||
// Based on the data structure from the following C++ library:
|
||||
// https://github.com/rigtorp/MPMCQueue
|
||||
type MPMCQueue struct {
|
||||
cap uint64
|
||||
head uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
hpad [cacheLineSize - 8]byte
|
||||
tail uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
tpad [cacheLineSize - 8]byte
|
||||
slots []slotPadded
|
||||
}
|
||||
|
||||
type slotPadded struct {
|
||||
slot
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad [cacheLineSize - unsafe.Sizeof(slot{})]byte
|
||||
}
|
||||
|
||||
type slot struct {
|
||||
turn uint64
|
||||
item interface{}
|
||||
}
|
||||
|
||||
// NewMPMCQueue creates a new MPMCQueue instance with the given
|
||||
// capacity.
|
||||
func NewMPMCQueue(capacity int) *MPMCQueue {
|
||||
if capacity < 1 {
|
||||
panic("capacity must be positive number")
|
||||
}
|
||||
return &MPMCQueue{
|
||||
cap: uint64(capacity),
|
||||
slots: make([]slotPadded, capacity),
|
||||
}
|
||||
}
|
||||
|
||||
// Enqueue inserts the given item into the queue.
|
||||
// Blocks, if the queue is full.
|
||||
//
|
||||
// Deprecated: use TryEnqueue in combination with runtime.Gosched().
|
||||
func (q *MPMCQueue) Enqueue(item interface{}) {
|
||||
head := atomic.AddUint64(&q.head, 1) - 1
|
||||
slot := &q.slots[q.idx(head)]
|
||||
turn := q.turn(head) * 2
|
||||
for atomic.LoadUint64(&slot.turn) != turn {
|
||||
runtime.Gosched()
|
||||
}
|
||||
slot.item = item
|
||||
atomic.StoreUint64(&slot.turn, turn+1)
|
||||
}
|
||||
|
||||
// Dequeue retrieves and removes the item from the head of the queue.
|
||||
// Blocks, if the queue is empty.
|
||||
//
|
||||
// Deprecated: use TryDequeue in combination with runtime.Gosched().
|
||||
func (q *MPMCQueue) Dequeue() interface{} {
|
||||
tail := atomic.AddUint64(&q.tail, 1) - 1
|
||||
slot := &q.slots[q.idx(tail)]
|
||||
turn := q.turn(tail)*2 + 1
|
||||
for atomic.LoadUint64(&slot.turn) != turn {
|
||||
runtime.Gosched()
|
||||
}
|
||||
item := slot.item
|
||||
slot.item = nil
|
||||
atomic.StoreUint64(&slot.turn, turn+1)
|
||||
return item
|
||||
}
|
||||
|
||||
// TryEnqueue inserts the given item into the queue. Does not block
|
||||
// and returns immediately. The result indicates that the queue isn't
|
||||
// full and the item was inserted.
|
||||
func (q *MPMCQueue) TryEnqueue(item interface{}) bool {
|
||||
head := atomic.LoadUint64(&q.head)
|
||||
slot := &q.slots[q.idx(head)]
|
||||
turn := q.turn(head) * 2
|
||||
if atomic.LoadUint64(&slot.turn) == turn {
|
||||
if atomic.CompareAndSwapUint64(&q.head, head, head+1) {
|
||||
slot.item = item
|
||||
atomic.StoreUint64(&slot.turn, turn+1)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// TryDequeue retrieves and removes the item from the head of the
|
||||
// queue. Does not block and returns immediately. The ok result
|
||||
// indicates that the queue isn't empty and an item was retrieved.
|
||||
func (q *MPMCQueue) TryDequeue() (item interface{}, ok bool) {
|
||||
tail := atomic.LoadUint64(&q.tail)
|
||||
slot := &q.slots[q.idx(tail)]
|
||||
turn := q.turn(tail)*2 + 1
|
||||
if atomic.LoadUint64(&slot.turn) == turn {
|
||||
if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) {
|
||||
item = slot.item
|
||||
ok = true
|
||||
slot.item = nil
|
||||
atomic.StoreUint64(&slot.turn, turn+1)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (q *MPMCQueue) idx(i uint64) uint64 {
|
||||
return i % q.cap
|
||||
}
|
||||
|
||||
func (q *MPMCQueue) turn(i uint64) uint64 {
|
||||
return i / q.cap
|
||||
}
|
||||
138
vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go
generated
vendored
Normal file
138
vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go
generated
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
//go:build go1.19
|
||||
// +build go1.19
|
||||
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// A MPMCQueueOf is a bounded multi-producer multi-consumer concurrent
|
||||
// queue. It's a generic version of MPMCQueue.
|
||||
//
|
||||
// MPMCQueueOf instances must be created with NewMPMCQueueOf function.
|
||||
// A MPMCQueueOf must not be copied after first use.
|
||||
//
|
||||
// Based on the data structure from the following C++ library:
|
||||
// https://github.com/rigtorp/MPMCQueue
|
||||
type MPMCQueueOf[I any] struct {
|
||||
cap uint64
|
||||
head uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
hpad [cacheLineSize - 8]byte
|
||||
tail uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
tpad [cacheLineSize - 8]byte
|
||||
slots []slotOfPadded[I]
|
||||
}
|
||||
|
||||
type slotOfPadded[I any] struct {
|
||||
slotOf[I]
|
||||
// Unfortunately, proper padding like the below one:
|
||||
//
|
||||
// pad [cacheLineSize - (unsafe.Sizeof(slotOf[I]{}) % cacheLineSize)]byte
|
||||
//
|
||||
// won't compile, so here we add a best-effort padding for items up to
|
||||
// 56 bytes size.
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad [cacheLineSize - unsafe.Sizeof(atomic.Uint64{})]byte
|
||||
}
|
||||
|
||||
type slotOf[I any] struct {
|
||||
// atomic.Uint64 is used here to get proper 8 byte alignment on
|
||||
// 32-bit archs.
|
||||
turn atomic.Uint64
|
||||
item I
|
||||
}
|
||||
|
||||
// NewMPMCQueueOf creates a new MPMCQueueOf instance with the given
|
||||
// capacity.
|
||||
func NewMPMCQueueOf[I any](capacity int) *MPMCQueueOf[I] {
|
||||
if capacity < 1 {
|
||||
panic("capacity must be positive number")
|
||||
}
|
||||
return &MPMCQueueOf[I]{
|
||||
cap: uint64(capacity),
|
||||
slots: make([]slotOfPadded[I], capacity),
|
||||
}
|
||||
}
|
||||
|
||||
// Enqueue inserts the given item into the queue.
|
||||
// Blocks, if the queue is full.
|
||||
//
|
||||
// Deprecated: use TryEnqueue in combination with runtime.Gosched().
|
||||
func (q *MPMCQueueOf[I]) Enqueue(item I) {
|
||||
head := atomic.AddUint64(&q.head, 1) - 1
|
||||
slot := &q.slots[q.idx(head)]
|
||||
turn := q.turn(head) * 2
|
||||
for slot.turn.Load() != turn {
|
||||
runtime.Gosched()
|
||||
}
|
||||
slot.item = item
|
||||
slot.turn.Store(turn + 1)
|
||||
}
|
||||
|
||||
// Dequeue retrieves and removes the item from the head of the queue.
|
||||
// Blocks, if the queue is empty.
|
||||
//
|
||||
// Deprecated: use TryDequeue in combination with runtime.Gosched().
|
||||
func (q *MPMCQueueOf[I]) Dequeue() I {
|
||||
var zeroI I
|
||||
tail := atomic.AddUint64(&q.tail, 1) - 1
|
||||
slot := &q.slots[q.idx(tail)]
|
||||
turn := q.turn(tail)*2 + 1
|
||||
for slot.turn.Load() != turn {
|
||||
runtime.Gosched()
|
||||
}
|
||||
item := slot.item
|
||||
slot.item = zeroI
|
||||
slot.turn.Store(turn + 1)
|
||||
return item
|
||||
}
|
||||
|
||||
// TryEnqueue inserts the given item into the queue. Does not block
|
||||
// and returns immediately. The result indicates that the queue isn't
|
||||
// full and the item was inserted.
|
||||
func (q *MPMCQueueOf[I]) TryEnqueue(item I) bool {
|
||||
head := atomic.LoadUint64(&q.head)
|
||||
slot := &q.slots[q.idx(head)]
|
||||
turn := q.turn(head) * 2
|
||||
if slot.turn.Load() == turn {
|
||||
if atomic.CompareAndSwapUint64(&q.head, head, head+1) {
|
||||
slot.item = item
|
||||
slot.turn.Store(turn + 1)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// TryDequeue retrieves and removes the item from the head of the
|
||||
// queue. Does not block and returns immediately. The ok result
|
||||
// indicates that the queue isn't empty and an item was retrieved.
|
||||
func (q *MPMCQueueOf[I]) TryDequeue() (item I, ok bool) {
|
||||
tail := atomic.LoadUint64(&q.tail)
|
||||
slot := &q.slots[q.idx(tail)]
|
||||
turn := q.turn(tail)*2 + 1
|
||||
if slot.turn.Load() == turn {
|
||||
if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) {
|
||||
var zeroI I
|
||||
item = slot.item
|
||||
ok = true
|
||||
slot.item = zeroI
|
||||
slot.turn.Store(turn + 1)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (q *MPMCQueueOf[I]) idx(i uint64) uint64 {
|
||||
return i % q.cap
|
||||
}
|
||||
|
||||
func (q *MPMCQueueOf[I]) turn(i uint64) uint64 {
|
||||
return i / q.cap
|
||||
}
|
||||
188
vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go
generated
vendored
Normal file
188
vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go
generated
vendored
Normal file
@@ -0,0 +1,188 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// slow-down guard
|
||||
const nslowdown = 7
|
||||
|
||||
// pool for reader tokens
|
||||
var rtokenPool sync.Pool
|
||||
|
||||
// RToken is a reader lock token.
|
||||
type RToken struct {
|
||||
slot uint32
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad [cacheLineSize - 4]byte
|
||||
}
|
||||
|
||||
// A RBMutex is a reader biased reader/writer mutual exclusion lock.
|
||||
// The lock can be held by an many readers or a single writer.
|
||||
// The zero value for a RBMutex is an unlocked mutex.
|
||||
//
|
||||
// A RBMutex must not be copied after first use.
|
||||
//
|
||||
// RBMutex is based on a modified version of BRAVO
|
||||
// (Biased Locking for Reader-Writer Locks) algorithm:
|
||||
// https://arxiv.org/pdf/1810.01553.pdf
|
||||
//
|
||||
// RBMutex is a specialized mutex for scenarios, such as caches,
|
||||
// where the vast majority of locks are acquired by readers and write
|
||||
// lock acquire attempts are infrequent. In such scenarios, RBMutex
|
||||
// performs better than sync.RWMutex on large multicore machines.
|
||||
//
|
||||
// RBMutex extends sync.RWMutex internally and uses it as the "reader
|
||||
// bias disabled" fallback, so the same semantics apply. The only
|
||||
// noticeable difference is in reader tokens returned from the
|
||||
// RLock/RUnlock methods.
|
||||
type RBMutex struct {
|
||||
rslots []rslot
|
||||
rmask uint32
|
||||
rbias int32
|
||||
inhibitUntil time.Time
|
||||
rw sync.RWMutex
|
||||
}
|
||||
|
||||
type rslot struct {
|
||||
mu int32
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad [cacheLineSize - 4]byte
|
||||
}
|
||||
|
||||
// NewRBMutex creates a new RBMutex instance.
|
||||
func NewRBMutex() *RBMutex {
|
||||
nslots := nextPowOf2(parallelism())
|
||||
mu := RBMutex{
|
||||
rslots: make([]rslot, nslots),
|
||||
rmask: nslots - 1,
|
||||
rbias: 1,
|
||||
}
|
||||
return &mu
|
||||
}
|
||||
|
||||
// TryRLock tries to lock m for reading without blocking.
|
||||
// When TryRLock succeeds, it returns true and a reader token.
|
||||
// In case of a failure, a false is returned.
|
||||
func (mu *RBMutex) TryRLock() (bool, *RToken) {
|
||||
if t := mu.fastRlock(); t != nil {
|
||||
return true, t
|
||||
}
|
||||
// Optimistic slow path.
|
||||
if mu.rw.TryRLock() {
|
||||
if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) {
|
||||
atomic.StoreInt32(&mu.rbias, 1)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// RLock locks m for reading and returns a reader token. The
|
||||
// token must be used in the later RUnlock call.
|
||||
//
|
||||
// Should not be used for recursive read locking; a blocked Lock
|
||||
// call excludes new readers from acquiring the lock.
|
||||
func (mu *RBMutex) RLock() *RToken {
|
||||
if t := mu.fastRlock(); t != nil {
|
||||
return t
|
||||
}
|
||||
// Slow path.
|
||||
mu.rw.RLock()
|
||||
if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) {
|
||||
atomic.StoreInt32(&mu.rbias, 1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mu *RBMutex) fastRlock() *RToken {
|
||||
if atomic.LoadInt32(&mu.rbias) == 1 {
|
||||
t, ok := rtokenPool.Get().(*RToken)
|
||||
if !ok {
|
||||
t = new(RToken)
|
||||
t.slot = runtime_fastrand()
|
||||
}
|
||||
// Try all available slots to distribute reader threads to slots.
|
||||
for i := 0; i < len(mu.rslots); i++ {
|
||||
slot := t.slot + uint32(i)
|
||||
rslot := &mu.rslots[slot&mu.rmask]
|
||||
rslotmu := atomic.LoadInt32(&rslot.mu)
|
||||
if atomic.CompareAndSwapInt32(&rslot.mu, rslotmu, rslotmu+1) {
|
||||
if atomic.LoadInt32(&mu.rbias) == 1 {
|
||||
// Hot path succeeded.
|
||||
t.slot = slot
|
||||
return t
|
||||
}
|
||||
// The mutex is no longer reader biased. Roll back.
|
||||
atomic.AddInt32(&rslot.mu, -1)
|
||||
rtokenPool.Put(t)
|
||||
return nil
|
||||
}
|
||||
// Contention detected. Give a try with the next slot.
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RUnlock undoes a single RLock call. A reader token obtained from
|
||||
// the RLock call must be provided. RUnlock does not affect other
|
||||
// simultaneous readers. A panic is raised if m is not locked for
|
||||
// reading on entry to RUnlock.
|
||||
func (mu *RBMutex) RUnlock(t *RToken) {
|
||||
if t == nil {
|
||||
mu.rw.RUnlock()
|
||||
return
|
||||
}
|
||||
if atomic.AddInt32(&mu.rslots[t.slot&mu.rmask].mu, -1) < 0 {
|
||||
panic("invalid reader state detected")
|
||||
}
|
||||
rtokenPool.Put(t)
|
||||
}
|
||||
|
||||
// TryLock tries to lock m for writing without blocking.
|
||||
func (mu *RBMutex) TryLock() bool {
|
||||
if mu.rw.TryLock() {
|
||||
if atomic.LoadInt32(&mu.rbias) == 1 {
|
||||
atomic.StoreInt32(&mu.rbias, 0)
|
||||
for i := 0; i < len(mu.rslots); i++ {
|
||||
if atomic.LoadInt32(&mu.rslots[i].mu) > 0 {
|
||||
// There is a reader. Roll back.
|
||||
atomic.StoreInt32(&mu.rbias, 1)
|
||||
mu.rw.Unlock()
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Lock locks m for writing. If the lock is already locked for
|
||||
// reading or writing, Lock blocks until the lock is available.
|
||||
func (mu *RBMutex) Lock() {
|
||||
mu.rw.Lock()
|
||||
if atomic.LoadInt32(&mu.rbias) == 1 {
|
||||
atomic.StoreInt32(&mu.rbias, 0)
|
||||
start := time.Now()
|
||||
for i := 0; i < len(mu.rslots); i++ {
|
||||
for atomic.LoadInt32(&mu.rslots[i].mu) > 0 {
|
||||
runtime.Gosched()
|
||||
}
|
||||
}
|
||||
mu.inhibitUntil = time.Now().Add(time.Since(start) * nslowdown)
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock unlocks m for writing. A panic is raised if m is not locked
|
||||
// for writing on entry to Unlock.
|
||||
//
|
||||
// As with RWMutex, a locked RBMutex is not associated with a
|
||||
// particular goroutine. One goroutine may RLock (Lock) a RBMutex and
|
||||
// then arrange for another goroutine to RUnlock (Unlock) it.
|
||||
func (mu *RBMutex) Unlock() {
|
||||
mu.rw.Unlock()
|
||||
}
|
||||
92
vendor/github.com/puzpuzpuz/xsync/v3/spscqueue.go
generated
vendored
Normal file
92
vendor/github.com/puzpuzpuz/xsync/v3/spscqueue.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// A SPSCQueue is a bounded single-producer single-consumer concurrent
|
||||
// queue. This means that not more than a single goroutine must be
|
||||
// publishing items to the queue while not more than a single goroutine
|
||||
// must be consuming those items.
|
||||
//
|
||||
// SPSCQueue instances must be created with NewSPSCQueue function.
|
||||
// A SPSCQueue must not be copied after first use.
|
||||
//
|
||||
// Based on the data structure from the following article:
|
||||
// https://rigtorp.se/ringbuffer/
|
||||
type SPSCQueue struct {
|
||||
cap uint64
|
||||
pidx uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad0 [cacheLineSize - 8]byte
|
||||
pcachedIdx uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad1 [cacheLineSize - 8]byte
|
||||
cidx uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad2 [cacheLineSize - 8]byte
|
||||
ccachedIdx uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad3 [cacheLineSize - 8]byte
|
||||
items []interface{}
|
||||
}
|
||||
|
||||
// NewSPSCQueue creates a new SPSCQueue instance with the given
|
||||
// capacity.
|
||||
func NewSPSCQueue(capacity int) *SPSCQueue {
|
||||
if capacity < 1 {
|
||||
panic("capacity must be positive number")
|
||||
}
|
||||
return &SPSCQueue{
|
||||
cap: uint64(capacity + 1),
|
||||
items: make([]interface{}, capacity+1),
|
||||
}
|
||||
}
|
||||
|
||||
// TryEnqueue inserts the given item into the queue. Does not block
|
||||
// and returns immediately. The result indicates that the queue isn't
|
||||
// full and the item was inserted.
|
||||
func (q *SPSCQueue) TryEnqueue(item interface{}) bool {
|
||||
// relaxed memory order would be enough here
|
||||
idx := atomic.LoadUint64(&q.pidx)
|
||||
nextIdx := idx + 1
|
||||
if nextIdx == q.cap {
|
||||
nextIdx = 0
|
||||
}
|
||||
cachedIdx := q.ccachedIdx
|
||||
if nextIdx == cachedIdx {
|
||||
cachedIdx = atomic.LoadUint64(&q.cidx)
|
||||
q.ccachedIdx = cachedIdx
|
||||
if nextIdx == cachedIdx {
|
||||
return false
|
||||
}
|
||||
}
|
||||
q.items[idx] = item
|
||||
atomic.StoreUint64(&q.pidx, nextIdx)
|
||||
return true
|
||||
}
|
||||
|
||||
// TryDequeue retrieves and removes the item from the head of the
|
||||
// queue. Does not block and returns immediately. The ok result
|
||||
// indicates that the queue isn't empty and an item was retrieved.
|
||||
func (q *SPSCQueue) TryDequeue() (item interface{}, ok bool) {
|
||||
// relaxed memory order would be enough here
|
||||
idx := atomic.LoadUint64(&q.cidx)
|
||||
cachedIdx := q.pcachedIdx
|
||||
if idx == cachedIdx {
|
||||
cachedIdx = atomic.LoadUint64(&q.pidx)
|
||||
q.pcachedIdx = cachedIdx
|
||||
if idx == cachedIdx {
|
||||
return
|
||||
}
|
||||
}
|
||||
item = q.items[idx]
|
||||
q.items[idx] = nil
|
||||
ok = true
|
||||
nextIdx := idx + 1
|
||||
if nextIdx == q.cap {
|
||||
nextIdx = 0
|
||||
}
|
||||
atomic.StoreUint64(&q.cidx, nextIdx)
|
||||
return
|
||||
}
|
||||
96
vendor/github.com/puzpuzpuz/xsync/v3/spscqueueof.go
generated
vendored
Normal file
96
vendor/github.com/puzpuzpuz/xsync/v3/spscqueueof.go
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
//go:build go1.19
|
||||
// +build go1.19
|
||||
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// A SPSCQueueOf is a bounded single-producer single-consumer concurrent
|
||||
// queue. This means that not more than a single goroutine must be
|
||||
// publishing items to the queue while not more than a single goroutine
|
||||
// must be consuming those items.
|
||||
//
|
||||
// SPSCQueueOf instances must be created with NewSPSCQueueOf function.
|
||||
// A SPSCQueueOf must not be copied after first use.
|
||||
//
|
||||
// Based on the data structure from the following article:
|
||||
// https://rigtorp.se/ringbuffer/
|
||||
type SPSCQueueOf[I any] struct {
|
||||
cap uint64
|
||||
pidx uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad0 [cacheLineSize - 8]byte
|
||||
pcachedIdx uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad1 [cacheLineSize - 8]byte
|
||||
cidx uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad2 [cacheLineSize - 8]byte
|
||||
ccachedIdx uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad3 [cacheLineSize - 8]byte
|
||||
items []I
|
||||
}
|
||||
|
||||
// NewSPSCQueueOf creates a new SPSCQueueOf instance with the given
|
||||
// capacity.
|
||||
func NewSPSCQueueOf[I any](capacity int) *SPSCQueueOf[I] {
|
||||
if capacity < 1 {
|
||||
panic("capacity must be positive number")
|
||||
}
|
||||
return &SPSCQueueOf[I]{
|
||||
cap: uint64(capacity + 1),
|
||||
items: make([]I, capacity+1),
|
||||
}
|
||||
}
|
||||
|
||||
// TryEnqueue inserts the given item into the queue. Does not block
|
||||
// and returns immediately. The result indicates that the queue isn't
|
||||
// full and the item was inserted.
|
||||
func (q *SPSCQueueOf[I]) TryEnqueue(item I) bool {
|
||||
// relaxed memory order would be enough here
|
||||
idx := atomic.LoadUint64(&q.pidx)
|
||||
next_idx := idx + 1
|
||||
if next_idx == q.cap {
|
||||
next_idx = 0
|
||||
}
|
||||
cached_idx := q.ccachedIdx
|
||||
if next_idx == cached_idx {
|
||||
cached_idx = atomic.LoadUint64(&q.cidx)
|
||||
q.ccachedIdx = cached_idx
|
||||
if next_idx == cached_idx {
|
||||
return false
|
||||
}
|
||||
}
|
||||
q.items[idx] = item
|
||||
atomic.StoreUint64(&q.pidx, next_idx)
|
||||
return true
|
||||
}
|
||||
|
||||
// TryDequeue retrieves and removes the item from the head of the
|
||||
// queue. Does not block and returns immediately. The ok result
|
||||
// indicates that the queue isn't empty and an item was retrieved.
|
||||
func (q *SPSCQueueOf[I]) TryDequeue() (item I, ok bool) {
|
||||
// relaxed memory order would be enough here
|
||||
idx := atomic.LoadUint64(&q.cidx)
|
||||
cached_idx := q.pcachedIdx
|
||||
if idx == cached_idx {
|
||||
cached_idx = atomic.LoadUint64(&q.pidx)
|
||||
q.pcachedIdx = cached_idx
|
||||
if idx == cached_idx {
|
||||
return
|
||||
}
|
||||
}
|
||||
var zeroI I
|
||||
item = q.items[idx]
|
||||
q.items[idx] = zeroI
|
||||
ok = true
|
||||
next_idx := idx + 1
|
||||
if next_idx == q.cap {
|
||||
next_idx = 0
|
||||
}
|
||||
atomic.StoreUint64(&q.cidx, next_idx)
|
||||
return
|
||||
}
|
||||
66
vendor/github.com/puzpuzpuz/xsync/v3/util.go
generated
vendored
Normal file
66
vendor/github.com/puzpuzpuz/xsync/v3/util.go
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
"runtime"
|
||||
_ "unsafe"
|
||||
)
|
||||
|
||||
// test-only assert()-like flag
|
||||
var assertionsEnabled = false
|
||||
|
||||
const (
|
||||
// cacheLineSize is used in paddings to prevent false sharing;
|
||||
// 64B are used instead of 128B as a compromise between
|
||||
// memory footprint and performance; 128B usage may give ~30%
|
||||
// improvement on NUMA machines.
|
||||
cacheLineSize = 64
|
||||
)
|
||||
|
||||
// nextPowOf2 computes the next highest power of 2 of 32-bit v.
|
||||
// Source: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
|
||||
func nextPowOf2(v uint32) uint32 {
|
||||
if v == 0 {
|
||||
return 1
|
||||
}
|
||||
v--
|
||||
v |= v >> 1
|
||||
v |= v >> 2
|
||||
v |= v >> 4
|
||||
v |= v >> 8
|
||||
v |= v >> 16
|
||||
v++
|
||||
return v
|
||||
}
|
||||
|
||||
func parallelism() uint32 {
|
||||
maxProcs := uint32(runtime.GOMAXPROCS(0))
|
||||
numCores := uint32(runtime.NumCPU())
|
||||
if maxProcs < numCores {
|
||||
return maxProcs
|
||||
}
|
||||
return numCores
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
//go:linkname runtime_fastrand runtime.fastrand
|
||||
func runtime_fastrand() uint32
|
||||
|
||||
func broadcast(b uint8) uint64 {
|
||||
return 0x101010101010101 * uint64(b)
|
||||
}
|
||||
|
||||
func firstMarkedByteIndex(w uint64) int {
|
||||
return bits.TrailingZeros64(w) >> 3
|
||||
}
|
||||
|
||||
// SWAR byte search: may produce false positives, e.g. for 0x0100,
|
||||
// so make sure to double-check bytes found by this function.
|
||||
func markZeroBytes(w uint64) uint64 {
|
||||
return ((w - 0x0101010101010101) & (^w) & 0x8080808080808080)
|
||||
}
|
||||
|
||||
func setByte(w uint64, b uint8, idx int) uint64 {
|
||||
shift := idx << 3
|
||||
return (w &^ (0xff << shift)) | (uint64(b) << shift)
|
||||
}
|
||||
77
vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go
generated
vendored
Normal file
77
vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// makeSeed creates a random seed.
|
||||
func makeSeed() uint64 {
|
||||
var s1 uint32
|
||||
for {
|
||||
s1 = runtime_fastrand()
|
||||
// We use seed 0 to indicate an uninitialized seed/hash,
|
||||
// so keep trying until we get a non-zero seed.
|
||||
if s1 != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
s2 := runtime_fastrand()
|
||||
return uint64(s1)<<32 | uint64(s2)
|
||||
}
|
||||
|
||||
// hashString calculates a hash of s with the given seed.
|
||||
func hashString(s string, seed uint64) uint64 {
|
||||
if s == "" {
|
||||
return seed
|
||||
}
|
||||
strh := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
return uint64(runtime_memhash(unsafe.Pointer(strh.Data), uintptr(seed), uintptr(strh.Len)))
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
//go:linkname runtime_memhash runtime.memhash
|
||||
func runtime_memhash(p unsafe.Pointer, h, s uintptr) uintptr
|
||||
|
||||
// defaultHasher creates a fast hash function for the given comparable type.
|
||||
// The only limitation is that the type should not contain interfaces inside
|
||||
// based on runtime.typehash.
|
||||
func defaultHasher[T comparable]() func(T, uint64) uint64 {
|
||||
var zero T
|
||||
|
||||
if reflect.TypeOf(&zero).Elem().Kind() == reflect.Interface {
|
||||
return func(value T, seed uint64) uint64 {
|
||||
iValue := any(value)
|
||||
i := (*iface)(unsafe.Pointer(&iValue))
|
||||
return runtime_typehash64(i.typ, i.word, seed)
|
||||
}
|
||||
} else {
|
||||
var iZero any = zero
|
||||
i := (*iface)(unsafe.Pointer(&iZero))
|
||||
return func(value T, seed uint64) uint64 {
|
||||
return runtime_typehash64(i.typ, unsafe.Pointer(&value), seed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// how interface is represented in memory
|
||||
type iface struct {
|
||||
typ uintptr
|
||||
word unsafe.Pointer
|
||||
}
|
||||
|
||||
// same as runtime_typehash, but always returns a uint64
|
||||
// see: maphash.rthash function for details
|
||||
func runtime_typehash64(t uintptr, p unsafe.Pointer, seed uint64) uint64 {
|
||||
if unsafe.Sizeof(uintptr(0)) == 8 {
|
||||
return uint64(runtime_typehash(t, p, uintptr(seed)))
|
||||
}
|
||||
|
||||
lo := runtime_typehash(t, p, uintptr(seed))
|
||||
hi := runtime_typehash(t, p, uintptr(seed>>32))
|
||||
return uint64(hi)<<32 | uint64(lo)
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
//go:linkname runtime_typehash runtime.typehash
|
||||
func runtime_typehash(t uintptr, p unsafe.Pointer, h uintptr) uintptr
|
||||
85
vendor/go.opentelemetry.io/collector/confmap/README.md
generated
vendored
85
vendor/go.opentelemetry.io/collector/confmap/README.md
generated
vendored
@@ -93,6 +93,91 @@ The `Resolve` method proceeds in the following steps:
|
||||
4. For each "Converter", call "Convert" for the "result".
|
||||
5. Return the "result", aka effective, configuration.
|
||||
|
||||
#### (Experimental) Append merging strategy for lists
|
||||
|
||||
You can opt-in to experimentally combine slices instead of discarding the existing ones by enabling the `confmap.enableMergeAppendOption` feature flag. Lists are appended in the order in which they appear in their configuration sources.
|
||||
This will **not** become the default in the future, we are still deciding how this should be configured and want your feedback on [this issue](https://github.com/open-telemetry/opentelemetry-collector/issues/8754).
|
||||
|
||||
##### Example
|
||||
Consider the following configs,
|
||||
|
||||
```yaml
|
||||
# main.yaml
|
||||
receivers:
|
||||
otlp/in:
|
||||
processors:
|
||||
attributes/example:
|
||||
actions:
|
||||
- key: key
|
||||
value: "value"
|
||||
action: upsert
|
||||
|
||||
exporters:
|
||||
otlp/out:
|
||||
extensions:
|
||||
file_storage:
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [ otlp/in ]
|
||||
processors: [ attributes/example ]
|
||||
exporters: [ otlp/out ]
|
||||
extensions: [ file_storage ]
|
||||
```
|
||||
|
||||
|
||||
```yaml
|
||||
# extra_extension.yaml
|
||||
processors:
|
||||
batch:
|
||||
extensions:
|
||||
healthcheckv2:
|
||||
|
||||
service:
|
||||
extensions: [ healthcheckv2 ]
|
||||
pipelines:
|
||||
traces:
|
||||
processors: [ batch ]
|
||||
```
|
||||
|
||||
If you run the Collector with following command,
|
||||
```
|
||||
otelcol --config=main.yaml --config=extra_extension.yaml --feature-gates=confmap.enableMergeAppendOption
|
||||
```
|
||||
then the final configuration after config resolution will look like following:
|
||||
|
||||
```yaml
|
||||
# main.yaml
|
||||
receivers:
|
||||
otlp/in:
|
||||
processors:
|
||||
attributes/example:
|
||||
actions:
|
||||
- key: key
|
||||
value: "value"
|
||||
action: upsert
|
||||
batch:
|
||||
exporters:
|
||||
otlp/out:
|
||||
extensions:
|
||||
file_storage:
|
||||
healthcheckv2:
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [ otlp/in ]
|
||||
processors: [ attributes/example, batch ]
|
||||
exporters: [ otlp/out ]
|
||||
extensions: [ file_storage, healthcheckv2 ]
|
||||
```
|
||||
|
||||
Notice that the `service::extensions` list is a combination of both configurations. By default, the value of the last configuration source passed, `extra_extension`, would be used, so the extensions list would be: `service::extensions: [healthcheckv2]`.
|
||||
|
||||
> [!NOTE]
|
||||
> By enabling this feature gate, all the lists in the given configuration will be merged.
|
||||
|
||||
### Watching for Updates
|
||||
After the configuration was processed, the `Resolver` can be used as a single point to watch for updates in the
|
||||
configuration retrieved via the `Provider` used to retrieve the “initial” configuration and to generate the “effective” one.
|
||||
|
||||
9
vendor/go.opentelemetry.io/collector/confmap/confmap.go
generated
vendored
9
vendor/go.opentelemetry.io/collector/confmap/confmap.go
generated
vendored
@@ -171,6 +171,15 @@ func (l *Conf) Merge(in *Conf) error {
|
||||
return l.k.Merge(in.k)
|
||||
}
|
||||
|
||||
// mergeAppend merges the input given configuration into the existing config.
|
||||
// Note that the given map may be modified.
|
||||
// Additionally, mergeAppend performs deduplication when merging lists.
|
||||
// For example, if listA = [extension1, extension2] and listB = [extension1, extension3],
|
||||
// the resulting list will be [extension1, extension2, extension3].
|
||||
func (l *Conf) mergeAppend(in *Conf) error {
|
||||
return l.k.Load(confmap.Provider(in.ToStringMap(), ""), nil, koanf.WithMergeFunc(mergeAppend))
|
||||
}
|
||||
|
||||
// Sub returns new Conf instance representing a sub-config of this instance.
|
||||
// It returns an error is the sub-config is not a map[string]any (use Get()), and an empty Map if none exists.
|
||||
func (l *Conf) Sub(key string) (*Conf, error) {
|
||||
|
||||
71
vendor/go.opentelemetry.io/collector/confmap/merge.go
generated
vendored
Normal file
71
vendor/go.opentelemetry.io/collector/confmap/merge.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package confmap // import "go.opentelemetry.io/collector/confmap"
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func mergeAppend(src, dest map[string]any) error {
|
||||
// mergeAppend recursively merges the src map into the dest map (left to right),
|
||||
// modifying and expanding the dest map in the process.
|
||||
// This function does not overwrite lists, and ensures that the final value is a name-aware
|
||||
// copy of lists from src and dest.
|
||||
|
||||
for sKey, sVal := range src {
|
||||
dVal, dOk := dest[sKey]
|
||||
if !dOk {
|
||||
// key is not present in destination config. Hence, add it to destination map
|
||||
dest[sKey] = sVal
|
||||
continue
|
||||
}
|
||||
|
||||
srcVal := reflect.ValueOf(sVal)
|
||||
destVal := reflect.ValueOf(dVal)
|
||||
|
||||
if destVal.Kind() != srcVal.Kind() {
|
||||
// different kinds. Override the destination map
|
||||
dest[sKey] = sVal
|
||||
continue
|
||||
}
|
||||
|
||||
switch srcVal.Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
// both of them are array. Merge them
|
||||
dest[sKey] = mergeSlice(srcVal, destVal)
|
||||
case reflect.Map:
|
||||
// both of them are maps. Recursively call the mergeAppend
|
||||
_ = mergeAppend(sVal.(map[string]any), dVal.(map[string]any))
|
||||
default:
|
||||
// any other datatype. Override the destination map
|
||||
dest[sKey] = sVal
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mergeSlice(src, dest reflect.Value) any {
|
||||
slice := reflect.MakeSlice(src.Type(), 0, src.Cap()+dest.Cap())
|
||||
for i := 0; i < dest.Len(); i++ {
|
||||
slice = reflect.Append(slice, dest.Index(i))
|
||||
}
|
||||
|
||||
for i := 0; i < src.Len(); i++ {
|
||||
if isPresent(slice, src.Index(i)) {
|
||||
continue
|
||||
}
|
||||
slice = reflect.Append(slice, src.Index(i))
|
||||
}
|
||||
return slice.Interface()
|
||||
}
|
||||
|
||||
func isPresent(slice reflect.Value, val reflect.Value) bool {
|
||||
for i := 0; i < slice.Len(); i++ {
|
||||
if slice.Index(i).Equal(val) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
18
vendor/go.opentelemetry.io/collector/confmap/resolver.go
generated
vendored
18
vendor/go.opentelemetry.io/collector/confmap/resolver.go
generated
vendored
@@ -12,6 +12,16 @@ import (
|
||||
|
||||
"go.uber.org/multierr"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.opentelemetry.io/collector/featuregate"
|
||||
)
|
||||
|
||||
var enableMergeAppendOption = featuregate.GlobalRegistry().MustRegister(
|
||||
"confmap.enableMergeAppendOption",
|
||||
featuregate.StageAlpha,
|
||||
featuregate.WithRegisterFromVersion("v0.120.0"),
|
||||
featuregate.WithRegisterDescription("Combines lists when resolving configs from different sources. This feature gate will not be stabilized 'as is'; the current behavior will remain the default."),
|
||||
featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/8754"),
|
||||
)
|
||||
|
||||
// follows drive-letter specification:
|
||||
@@ -170,7 +180,13 @@ func (mr *Resolver) Resolve(ctx context.Context) (*Conf, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = retMap.Merge(retCfgMap); err != nil {
|
||||
if enableMergeAppendOption.IsEnabled() {
|
||||
// only use MergeAppend when enableMergeAppendOption featuregate is enabled.
|
||||
err = retMap.mergeAppend(retCfgMap)
|
||||
} else {
|
||||
err = retMap.Merge(retCfgMap)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
202
vendor/go.opentelemetry.io/collector/featuregate/LICENSE
generated
vendored
Normal file
202
vendor/go.opentelemetry.io/collector/featuregate/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
1
vendor/go.opentelemetry.io/collector/featuregate/Makefile
generated
vendored
Normal file
1
vendor/go.opentelemetry.io/collector/featuregate/Makefile
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
include ../Makefile.Common
|
||||
77
vendor/go.opentelemetry.io/collector/featuregate/README.md
generated
vendored
Normal file
77
vendor/go.opentelemetry.io/collector/featuregate/README.md
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
# Collector Feature Gates
|
||||
|
||||
This package provides a mechanism that allows operators to enable and disable
|
||||
experimental or transitional features at deployment time. These flags should
|
||||
be able to govern the behavior of the application starting as early as possible
|
||||
and should be available to every component such that decisions may be made
|
||||
based on flags at the component level.
|
||||
|
||||
## Usage
|
||||
|
||||
Feature gates must be defined and registered with the global registry in
|
||||
an `init()` function. This makes the `Gate` available to be configured and
|
||||
queried with the defined [`Stage`](#feature-lifecycle) default value.
|
||||
A `Gate` can have a list of associated issues that allow users to refer to
|
||||
the issue and report any additional problems or understand the context of the `Gate`.
|
||||
Once a `Gate` has been marked as `Stable`, it must have a `RemovalVersion` set.
|
||||
|
||||
```go
|
||||
var myFeatureGate = featuregate.GlobalRegistry().MustRegister(
|
||||
"namespaced.uniqueIdentifier",
|
||||
featuregate.Stable,
|
||||
featuregate.WithRegisterFromVersion("v0.65.0")
|
||||
featuregate.WithRegisterDescription("A brief description of what the gate controls"),
|
||||
featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/6167"),
|
||||
featuregate.WithRegisterToVersion("v0.70.0"))
|
||||
```
|
||||
|
||||
The status of the gate may later be checked by interrogating the global
|
||||
feature gate registry:
|
||||
|
||||
```go
|
||||
if myFeatureGate.IsEnabled() {
|
||||
setupNewFeature()
|
||||
}
|
||||
```
|
||||
|
||||
Note that querying the registry takes a read lock and accesses a map, so it
|
||||
should be done once and the result cached for local use if repeated checks
|
||||
are required. Avoid querying the registry in a loop.
|
||||
|
||||
## Controlling Gates
|
||||
|
||||
Feature gates can be enabled or disabled via the CLI, with the
|
||||
`--feature-gates` flag. When using the CLI flag, gate
|
||||
identifiers must be presented as a comma-delimited list. Gate identifiers
|
||||
prefixed with `-` will disable the gate and prefixing with `+` or with no
|
||||
prefix will enable the gate.
|
||||
|
||||
```shell
|
||||
otelcol --config=config.yaml --feature-gates=gate1,-gate2,+gate3
|
||||
```
|
||||
|
||||
This will enable `gate1` and `gate3` and disable `gate2`.
|
||||
|
||||
## Feature Lifecycle
|
||||
|
||||
Features controlled by a `Gate` should follow a three-stage lifecycle,
|
||||
modeled after the [system used by Kubernetes](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-stages):
|
||||
|
||||
1. An `alpha` stage where the feature is disabled by default and must be enabled
|
||||
through a `Gate`.
|
||||
2. A `beta` stage where the feature has been well tested and is enabled by
|
||||
default but can be disabled through a `Gate`.
|
||||
3. A generally available or `stable` stage where the feature is permanently enabled. At this stage
|
||||
the gate should no longer be explicitly used. Disabling the gate will produce an error and
|
||||
explicitly enabling will produce a warning log.
|
||||
4. A `stable` feature gate will be removed in the version specified by its `ToVersion` value.
|
||||
|
||||
Features that prove unworkable in the `alpha` stage may be discontinued
|
||||
without proceeding to the `beta` stage. Instead, they will proceed to the
|
||||
`deprecated` stage, which will feature is permanently disabled. A feature gate will
|
||||
be removed once it has been `deprecated` for at least 2 releases of the collector.
|
||||
|
||||
Features that make it to the `beta` stage are intended to reach general availability but may still be discontinued.
|
||||
If, after wider use, it is determined that the gate should be discontinued it will be reverted to the `alpha` stage
|
||||
for 2 releases and then proceed to the `deprecated` stage. If instead it is ready for general availability it will
|
||||
proceed to the `stable` stage.
|
||||
71
vendor/go.opentelemetry.io/collector/featuregate/flag.go
generated
vendored
Normal file
71
vendor/go.opentelemetry.io/collector/featuregate/flag.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package featuregate // import "go.opentelemetry.io/collector/featuregate"
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"strings"
|
||||
|
||||
"go.uber.org/multierr"
|
||||
)
|
||||
|
||||
const (
|
||||
featureGatesFlag = "feature-gates"
|
||||
featureGatesFlagDescription = "Comma-delimited list of feature gate identifiers. Prefix with '-' to disable the feature. '+' or no prefix will enable the feature."
|
||||
)
|
||||
|
||||
// RegisterFlagsOption is an option for RegisterFlags.
|
||||
type RegisterFlagsOption interface {
|
||||
private()
|
||||
}
|
||||
|
||||
// RegisterFlags that directly applies feature gate statuses to a Registry.
|
||||
func (r *Registry) RegisterFlags(flagSet *flag.FlagSet, _ ...RegisterFlagsOption) {
|
||||
flagSet.Var(&flagValue{reg: r}, featureGatesFlag, featureGatesFlagDescription)
|
||||
}
|
||||
|
||||
// flagValue implements the flag.Value interface and directly applies feature gate statuses to a Registry.
|
||||
type flagValue struct {
|
||||
reg *Registry
|
||||
}
|
||||
|
||||
func (f *flagValue) String() string {
|
||||
// This function can be called by isZeroValue https://github.com/golang/go/blob/go1.23.3/src/flag/flag.go#L630
|
||||
// which creates an instance of flagValue using reflect.New. In this case, the field `reg` is nil.
|
||||
if f.reg == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
var ids []string
|
||||
f.reg.VisitAll(func(g *Gate) {
|
||||
id := g.ID()
|
||||
if !g.IsEnabled() {
|
||||
id = "-" + id
|
||||
}
|
||||
ids = append(ids, id)
|
||||
})
|
||||
return strings.Join(ids, ",")
|
||||
}
|
||||
|
||||
func (f *flagValue) Set(s string) error {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var errs error
|
||||
ids := strings.Split(s, ",")
|
||||
for i := range ids {
|
||||
id := ids[i]
|
||||
val := true
|
||||
switch id[0] {
|
||||
case '-':
|
||||
id = id[1:]
|
||||
val = false
|
||||
case '+':
|
||||
id = id[1:]
|
||||
}
|
||||
errs = multierr.Append(errs, f.reg.Set(id, val))
|
||||
}
|
||||
return errs
|
||||
}
|
||||
58
vendor/go.opentelemetry.io/collector/featuregate/gate.go
generated
vendored
Normal file
58
vendor/go.opentelemetry.io/collector/featuregate/gate.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package featuregate // import "go.opentelemetry.io/collector/featuregate"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
)
|
||||
|
||||
// Gate is an immutable object that is owned by the Registry and represents an individual feature that
|
||||
// may be enabled or disabled based on the lifecycle state of the feature and CLI flags specified by the user.
|
||||
type Gate struct {
|
||||
id string
|
||||
description string
|
||||
referenceURL string
|
||||
fromVersion *version.Version
|
||||
toVersion *version.Version
|
||||
stage Stage
|
||||
enabled *atomic.Bool
|
||||
}
|
||||
|
||||
// ID returns the id of the Gate.
|
||||
func (g *Gate) ID() string {
|
||||
return g.id
|
||||
}
|
||||
|
||||
// IsEnabled returns true if the feature described by the Gate is enabled.
|
||||
func (g *Gate) IsEnabled() bool {
|
||||
return g.enabled.Load()
|
||||
}
|
||||
|
||||
// Description returns the description for the Gate.
|
||||
func (g *Gate) Description() string {
|
||||
return g.description
|
||||
}
|
||||
|
||||
// Stage returns the Gate's lifecycle stage.
|
||||
func (g *Gate) Stage() Stage {
|
||||
return g.stage
|
||||
}
|
||||
|
||||
// ReferenceURL returns the URL to the contextual information about the Gate.
|
||||
func (g *Gate) ReferenceURL() string {
|
||||
return g.referenceURL
|
||||
}
|
||||
|
||||
// FromVersion returns the version information when the Gate's was added.
|
||||
func (g *Gate) FromVersion() string {
|
||||
return fmt.Sprintf("v%s", g.fromVersion)
|
||||
}
|
||||
|
||||
// ToVersion returns the version information when Gate's in StageStable.
|
||||
func (g *Gate) ToVersion() string {
|
||||
return fmt.Sprintf("v%s", g.toVersion)
|
||||
}
|
||||
211
vendor/go.opentelemetry.io/collector/featuregate/registry.go
generated
vendored
Normal file
211
vendor/go.opentelemetry.io/collector/featuregate/registry.go
generated
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package featuregate // import "go.opentelemetry.io/collector/featuregate"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
)
|
||||
|
||||
var (
|
||||
globalRegistry = NewRegistry()
|
||||
|
||||
// idRegexp is used to validate the ID of a Gate.
|
||||
// IDs' characters must be alphanumeric or dots.
|
||||
idRegexp = regexp.MustCompile(`^[0-9a-zA-Z\.]*$`)
|
||||
)
|
||||
|
||||
// ErrAlreadyRegistered is returned when adding a Gate that is already registered.
|
||||
var ErrAlreadyRegistered = errors.New("gate is already registered")
|
||||
|
||||
// GlobalRegistry returns the global Registry.
|
||||
func GlobalRegistry() *Registry {
|
||||
return globalRegistry
|
||||
}
|
||||
|
||||
type Registry struct {
|
||||
gates sync.Map
|
||||
}
|
||||
|
||||
// NewRegistry returns a new empty Registry.
|
||||
func NewRegistry() *Registry {
|
||||
return &Registry{}
|
||||
}
|
||||
|
||||
// RegisterOption allows to configure additional information about a Gate during registration.
|
||||
type RegisterOption interface {
|
||||
apply(g *Gate) error
|
||||
}
|
||||
|
||||
type registerOptionFunc func(g *Gate) error
|
||||
|
||||
func (ro registerOptionFunc) apply(g *Gate) error {
|
||||
return ro(g)
|
||||
}
|
||||
|
||||
// WithRegisterDescription adds description for the Gate.
|
||||
func WithRegisterDescription(description string) RegisterOption {
|
||||
return registerOptionFunc(func(g *Gate) error {
|
||||
g.description = description
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// WithRegisterReferenceURL adds a URL that has all the contextual information about the Gate.
|
||||
// referenceURL must be a valid URL as defined by `net/url.Parse`.
|
||||
func WithRegisterReferenceURL(referenceURL string) RegisterOption {
|
||||
return registerOptionFunc(func(g *Gate) error {
|
||||
if _, err := url.Parse(referenceURL); err != nil {
|
||||
return fmt.Errorf("WithRegisterReferenceURL: invalid reference URL %q: %w", referenceURL, err)
|
||||
}
|
||||
|
||||
g.referenceURL = referenceURL
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// WithRegisterFromVersion is used to set the Gate "FromVersion".
|
||||
// The "FromVersion" contains the Collector release when a feature is introduced.
|
||||
// fromVersion must be a valid version string: it may start with 'v' and must be in the format Major.Minor.Patch[-PreRelease].
|
||||
// PreRelease is optional and may have dashes, tildes and ASCII alphanumeric characters.
|
||||
func WithRegisterFromVersion(fromVersion string) RegisterOption {
|
||||
return registerOptionFunc(func(g *Gate) error {
|
||||
from, err := version.NewVersion(fromVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("WithRegisterFromVersion: invalid version %q: %w", fromVersion, err)
|
||||
}
|
||||
|
||||
g.fromVersion = from
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// WithRegisterToVersion is used to set the Gate "ToVersion".
|
||||
// The "ToVersion", if not empty, contains the last Collector release in which you can still use a feature gate.
|
||||
// If the feature stage is either "Deprecated" or "Stable", the "ToVersion" is the Collector release when the feature is removed.
|
||||
// toVersion must be a valid version string: it may start with 'v' and must be in the format Major.Minor.Patch[-PreRelease].
|
||||
// PreRelease is optional and may have dashes, tildes and ASCII alphanumeric characters.
|
||||
func WithRegisterToVersion(toVersion string) RegisterOption {
|
||||
return registerOptionFunc(func(g *Gate) error {
|
||||
to, err := version.NewVersion(toVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("WithRegisterToVersion: invalid version %q: %w", toVersion, err)
|
||||
}
|
||||
|
||||
g.toVersion = to
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// MustRegister like Register but panics if an invalid ID or gate options are provided.
|
||||
func (r *Registry) MustRegister(id string, stage Stage, opts ...RegisterOption) *Gate {
|
||||
g, err := r.Register(id, stage, opts...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
func validateID(id string) error {
|
||||
if id == "" {
|
||||
return errors.New("empty ID")
|
||||
}
|
||||
|
||||
if !idRegexp.MatchString(id) {
|
||||
return errors.New("invalid character(s) in ID")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Register a Gate and return it. The returned Gate can be used to check if is enabled or not.
|
||||
// id must be an ASCII alphanumeric nonempty string. Dots are allowed for namespacing.
|
||||
func (r *Registry) Register(id string, stage Stage, opts ...RegisterOption) (*Gate, error) {
|
||||
if err := validateID(id); err != nil {
|
||||
return nil, fmt.Errorf("invalid ID %q: %w", id, err)
|
||||
}
|
||||
|
||||
g := &Gate{
|
||||
id: id,
|
||||
stage: stage,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
err := opt.apply(g)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to apply option: %w", err)
|
||||
}
|
||||
}
|
||||
switch g.stage {
|
||||
case StageAlpha, StageDeprecated:
|
||||
g.enabled = &atomic.Bool{}
|
||||
case StageBeta, StageStable:
|
||||
enabled := &atomic.Bool{}
|
||||
enabled.Store(true)
|
||||
g.enabled = enabled
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown stage value %q for gate %q", stage, id)
|
||||
}
|
||||
if (g.stage == StageStable || g.stage == StageDeprecated) && g.toVersion == nil {
|
||||
return nil, fmt.Errorf("no removal version set for %v gate %q", g.stage.String(), id)
|
||||
}
|
||||
|
||||
if g.fromVersion != nil && g.toVersion != nil && g.toVersion.LessThan(g.fromVersion) {
|
||||
return nil, fmt.Errorf("toVersion %q is before fromVersion %q", g.toVersion, g.fromVersion)
|
||||
}
|
||||
|
||||
if _, loaded := r.gates.LoadOrStore(id, g); loaded {
|
||||
return nil, fmt.Errorf("failed to register %q: %w", id, ErrAlreadyRegistered)
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// Set the enabled valued for a Gate identified by the given id.
|
||||
func (r *Registry) Set(id string, enabled bool) error {
|
||||
v, ok := r.gates.Load(id)
|
||||
if !ok {
|
||||
validGates := []string{}
|
||||
r.VisitAll(func(g *Gate) {
|
||||
validGates = append(validGates, g.ID())
|
||||
})
|
||||
return fmt.Errorf("no such feature gate %q. valid gates: %v", id, validGates)
|
||||
}
|
||||
g := v.(*Gate)
|
||||
|
||||
switch g.stage {
|
||||
case StageStable:
|
||||
if !enabled {
|
||||
return fmt.Errorf("feature gate %q is stable, can not be disabled", id)
|
||||
}
|
||||
fmt.Printf("Feature gate %q is stable and already enabled. It will be removed in version %v and continued use of the gate after version %v will result in an error.\n", id, g.toVersion, g.toVersion)
|
||||
case StageDeprecated:
|
||||
if enabled {
|
||||
return fmt.Errorf("feature gate %q is deprecated, can not be enabled", id)
|
||||
}
|
||||
fmt.Printf("Feature gate %q is deprecated and already disabled. It will be removed in version %v and continued use of the gate after version %v will result in an error.\n", id, g.toVersion, g.toVersion)
|
||||
default:
|
||||
g.enabled.Store(enabled)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VisitAll visits all the gates in lexicographical order, calling fn for each.
|
||||
func (r *Registry) VisitAll(fn func(*Gate)) {
|
||||
var gates []*Gate
|
||||
r.gates.Range(func(_, value any) bool {
|
||||
gates = append(gates, value.(*Gate))
|
||||
return true
|
||||
})
|
||||
sort.Slice(gates, func(i, j int) bool {
|
||||
return gates[i].ID() < gates[j].ID()
|
||||
})
|
||||
for i := range gates {
|
||||
fn(gates[i])
|
||||
}
|
||||
}
|
||||
44
vendor/go.opentelemetry.io/collector/featuregate/stage.go
generated
vendored
Normal file
44
vendor/go.opentelemetry.io/collector/featuregate/stage.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package featuregate // import "go.opentelemetry.io/collector/featuregate"
|
||||
|
||||
// Stage represents the Gate's lifecycle and what is the expected state of it.
|
||||
type Stage int8
|
||||
|
||||
const (
|
||||
// StageAlpha is used when creating a new feature and the Gate must be explicitly enabled
|
||||
// by the operator.
|
||||
//
|
||||
// The Gate will be disabled by default.
|
||||
StageAlpha Stage = iota
|
||||
// StageBeta is used when the feature gate is well tested and is enabled by default,
|
||||
// but can be disabled by a Gate.
|
||||
//
|
||||
// The Gate will be enabled by default.
|
||||
StageBeta
|
||||
// StageStable is used when feature is permanently enabled and can not be disabled by a Gate.
|
||||
// This value is used to provide feedback to the user that the gate will be removed in the next versions.
|
||||
//
|
||||
// The Gate will be enabled by default and will return an error if disabled.
|
||||
StageStable
|
||||
// StageDeprecated is used when feature is permanently disabled and can not be enabled by a Gate.
|
||||
// This value is used to provide feedback to the user that the gate will be removed in the next versions.
|
||||
//
|
||||
// The Gate will be disabled by default and will return an error if modified.
|
||||
StageDeprecated
|
||||
)
|
||||
|
||||
func (s Stage) String() string {
|
||||
switch s {
|
||||
case StageAlpha:
|
||||
return "Alpha"
|
||||
case StageBeta:
|
||||
return "Beta"
|
||||
case StageStable:
|
||||
return "Stable"
|
||||
case StageDeprecated:
|
||||
return "Deprecated"
|
||||
}
|
||||
return "Unknown"
|
||||
}
|
||||
19
vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go
generated
vendored
19
vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go
generated
vendored
@@ -7,6 +7,9 @@
|
||||
package pcommon
|
||||
|
||||
import (
|
||||
"iter"
|
||||
"slices"
|
||||
|
||||
"go.opentelemetry.io/collector/pdata/internal"
|
||||
)
|
||||
|
||||
@@ -55,6 +58,17 @@ func (ms ByteSlice) At(i int) byte {
|
||||
return (*ms.getOrig())[i]
|
||||
}
|
||||
|
||||
// All returns an iterator over index-value pairs in the slice.
|
||||
func (ms ByteSlice) All() iter.Seq2[int, byte] {
|
||||
return func(yield func(int, byte) bool) {
|
||||
for i := 0; i < ms.Len(); i++ {
|
||||
if !yield(i, ms.At(i)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetAt sets byte item at particular index.
|
||||
// Equivalent of byteSlice[i] = val
|
||||
func (ms ByteSlice) SetAt(i int, val byte) {
|
||||
@@ -102,6 +116,11 @@ func (ms ByteSlice) CopyTo(dest ByteSlice) {
|
||||
*dest.getOrig() = copyByteSlice(*dest.getOrig(), *ms.getOrig())
|
||||
}
|
||||
|
||||
// Equal checks equality with another ByteSlice
|
||||
func (ms ByteSlice) Equal(val ByteSlice) bool {
|
||||
return slices.Equal(*ms.getOrig(), *val.getOrig())
|
||||
}
|
||||
|
||||
func copyByteSlice(dst, src []byte) []byte {
|
||||
dst = dst[:0]
|
||||
return append(dst, src...)
|
||||
|
||||
19
vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go
generated
vendored
19
vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go
generated
vendored
@@ -7,6 +7,9 @@
|
||||
package pcommon
|
||||
|
||||
import (
|
||||
"iter"
|
||||
"slices"
|
||||
|
||||
"go.opentelemetry.io/collector/pdata/internal"
|
||||
)
|
||||
|
||||
@@ -55,6 +58,17 @@ func (ms Float64Slice) At(i int) float64 {
|
||||
return (*ms.getOrig())[i]
|
||||
}
|
||||
|
||||
// All returns an iterator over index-value pairs in the slice.
|
||||
func (ms Float64Slice) All() iter.Seq2[int, float64] {
|
||||
return func(yield func(int, float64) bool) {
|
||||
for i := 0; i < ms.Len(); i++ {
|
||||
if !yield(i, ms.At(i)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetAt sets float64 item at particular index.
|
||||
// Equivalent of float64Slice[i] = val
|
||||
func (ms Float64Slice) SetAt(i int, val float64) {
|
||||
@@ -102,6 +116,11 @@ func (ms Float64Slice) CopyTo(dest Float64Slice) {
|
||||
*dest.getOrig() = copyFloat64Slice(*dest.getOrig(), *ms.getOrig())
|
||||
}
|
||||
|
||||
// Equal checks equality with another Float64Slice
|
||||
func (ms Float64Slice) Equal(val Float64Slice) bool {
|
||||
return slices.Equal(*ms.getOrig(), *val.getOrig())
|
||||
}
|
||||
|
||||
func copyFloat64Slice(dst, src []float64) []float64 {
|
||||
dst = dst[:0]
|
||||
return append(dst, src...)
|
||||
|
||||
19
vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go
generated
vendored
19
vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go
generated
vendored
@@ -7,6 +7,9 @@
|
||||
package pcommon
|
||||
|
||||
import (
|
||||
"iter"
|
||||
"slices"
|
||||
|
||||
"go.opentelemetry.io/collector/pdata/internal"
|
||||
)
|
||||
|
||||
@@ -55,6 +58,17 @@ func (ms Int32Slice) At(i int) int32 {
|
||||
return (*ms.getOrig())[i]
|
||||
}
|
||||
|
||||
// All returns an iterator over index-value pairs in the slice.
|
||||
func (ms Int32Slice) All() iter.Seq2[int, int32] {
|
||||
return func(yield func(int, int32) bool) {
|
||||
for i := 0; i < ms.Len(); i++ {
|
||||
if !yield(i, ms.At(i)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetAt sets int32 item at particular index.
|
||||
// Equivalent of int32Slice[i] = val
|
||||
func (ms Int32Slice) SetAt(i int, val int32) {
|
||||
@@ -102,6 +116,11 @@ func (ms Int32Slice) CopyTo(dest Int32Slice) {
|
||||
*dest.getOrig() = copyInt32Slice(*dest.getOrig(), *ms.getOrig())
|
||||
}
|
||||
|
||||
// Equal checks equality with another Int32Slice
|
||||
func (ms Int32Slice) Equal(val Int32Slice) bool {
|
||||
return slices.Equal(*ms.getOrig(), *val.getOrig())
|
||||
}
|
||||
|
||||
func copyInt32Slice(dst, src []int32) []int32 {
|
||||
dst = dst[:0]
|
||||
return append(dst, src...)
|
||||
|
||||
19
vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go
generated
vendored
19
vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go
generated
vendored
@@ -7,6 +7,9 @@
|
||||
package pcommon
|
||||
|
||||
import (
|
||||
"iter"
|
||||
"slices"
|
||||
|
||||
"go.opentelemetry.io/collector/pdata/internal"
|
||||
)
|
||||
|
||||
@@ -55,6 +58,17 @@ func (ms Int64Slice) At(i int) int64 {
|
||||
return (*ms.getOrig())[i]
|
||||
}
|
||||
|
||||
// All returns an iterator over index-value pairs in the slice.
|
||||
func (ms Int64Slice) All() iter.Seq2[int, int64] {
|
||||
return func(yield func(int, int64) bool) {
|
||||
for i := 0; i < ms.Len(); i++ {
|
||||
if !yield(i, ms.At(i)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetAt sets int64 item at particular index.
|
||||
// Equivalent of int64Slice[i] = val
|
||||
func (ms Int64Slice) SetAt(i int, val int64) {
|
||||
@@ -102,6 +116,11 @@ func (ms Int64Slice) CopyTo(dest Int64Slice) {
|
||||
*dest.getOrig() = copyInt64Slice(*dest.getOrig(), *ms.getOrig())
|
||||
}
|
||||
|
||||
// Equal checks equality with another Int64Slice
|
||||
func (ms Int64Slice) Equal(val Int64Slice) bool {
|
||||
return slices.Equal(*ms.getOrig(), *val.getOrig())
|
||||
}
|
||||
|
||||
func copyInt64Slice(dst, src []int64) []int64 {
|
||||
dst = dst[:0]
|
||||
return append(dst, src...)
|
||||
|
||||
19
vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go
generated
vendored
19
vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go
generated
vendored
@@ -7,6 +7,9 @@
|
||||
package pcommon
|
||||
|
||||
import (
|
||||
"iter"
|
||||
"slices"
|
||||
|
||||
"go.opentelemetry.io/collector/pdata/internal"
|
||||
)
|
||||
|
||||
@@ -55,6 +58,17 @@ func (ms StringSlice) At(i int) string {
|
||||
return (*ms.getOrig())[i]
|
||||
}
|
||||
|
||||
// All returns an iterator over index-value pairs in the slice.
|
||||
func (ms StringSlice) All() iter.Seq2[int, string] {
|
||||
return func(yield func(int, string) bool) {
|
||||
for i := 0; i < ms.Len(); i++ {
|
||||
if !yield(i, ms.At(i)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetAt sets string item at particular index.
|
||||
// Equivalent of stringSlice[i] = val
|
||||
func (ms StringSlice) SetAt(i int, val string) {
|
||||
@@ -102,6 +116,11 @@ func (ms StringSlice) CopyTo(dest StringSlice) {
|
||||
*dest.getOrig() = copyStringSlice(*dest.getOrig(), *ms.getOrig())
|
||||
}
|
||||
|
||||
// Equal checks equality with another StringSlice
|
||||
func (ms StringSlice) Equal(val StringSlice) bool {
|
||||
return slices.Equal(*ms.getOrig(), *val.getOrig())
|
||||
}
|
||||
|
||||
func copyStringSlice(dst, src []string) []string {
|
||||
dst = dst[:0]
|
||||
return append(dst, src...)
|
||||
|
||||
19
vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go
generated
vendored
19
vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go
generated
vendored
@@ -7,6 +7,9 @@
|
||||
package pcommon
|
||||
|
||||
import (
|
||||
"iter"
|
||||
"slices"
|
||||
|
||||
"go.opentelemetry.io/collector/pdata/internal"
|
||||
)
|
||||
|
||||
@@ -55,6 +58,17 @@ func (ms UInt64Slice) At(i int) uint64 {
|
||||
return (*ms.getOrig())[i]
|
||||
}
|
||||
|
||||
// All returns an iterator over index-value pairs in the slice.
|
||||
func (ms UInt64Slice) All() iter.Seq2[int, uint64] {
|
||||
return func(yield func(int, uint64) bool) {
|
||||
for i := 0; i < ms.Len(); i++ {
|
||||
if !yield(i, ms.At(i)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetAt sets uint64 item at particular index.
|
||||
// Equivalent of uInt64Slice[i] = val
|
||||
func (ms UInt64Slice) SetAt(i int, val uint64) {
|
||||
@@ -102,6 +116,11 @@ func (ms UInt64Slice) CopyTo(dest UInt64Slice) {
|
||||
*dest.getOrig() = copyUInt64Slice(*dest.getOrig(), *ms.getOrig())
|
||||
}
|
||||
|
||||
// Equal checks equality with another UInt64Slice
|
||||
func (ms UInt64Slice) Equal(val UInt64Slice) bool {
|
||||
return slices.Equal(*ms.getOrig(), *val.getOrig())
|
||||
}
|
||||
|
||||
func copyUInt64Slice(dst, src []uint64) []uint64 {
|
||||
dst = dst[:0]
|
||||
return append(dst, src...)
|
||||
|
||||
41
vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go
generated
vendored
41
vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go
generated
vendored
@@ -4,6 +4,8 @@
|
||||
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
|
||||
|
||||
import (
|
||||
"iter"
|
||||
|
||||
"go.uber.org/multierr"
|
||||
|
||||
"go.opentelemetry.io/collector/pdata/internal"
|
||||
@@ -225,6 +227,22 @@ func (m Map) Range(f func(k string, v Value) bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// All returns an iterator over key-value pairs in the Map.
|
||||
//
|
||||
// for k, v := range es.All() {
|
||||
// ... // Do something with key-value pair
|
||||
// }
|
||||
func (m Map) All() iter.Seq2[string, Value] {
|
||||
return func(yield func(string, Value) bool) {
|
||||
for i := range *m.getOrig() {
|
||||
kv := &(*m.getOrig())[i]
|
||||
if !yield(kv.Key, Value(internal.NewValue(&kv.Value, m.getState()))) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MoveTo moves all key/values from the current map overriding the destination and
|
||||
// resetting the current instance to its zero value
|
||||
func (m Map) MoveTo(dest Map) {
|
||||
@@ -290,3 +308,26 @@ func (m Map) FromRaw(rawMap map[string]any) error {
|
||||
*m.getOrig() = origs
|
||||
return errs
|
||||
}
|
||||
|
||||
// Equal checks equality with another Map
|
||||
func (m Map) Equal(val Map) bool {
|
||||
if m.Len() != val.Len() {
|
||||
return false
|
||||
}
|
||||
|
||||
fullEqual := true
|
||||
|
||||
m.Range(func(k string, v Value) bool {
|
||||
vv, ok := val.Get(k)
|
||||
if !ok {
|
||||
fullEqual = false
|
||||
return fullEqual
|
||||
}
|
||||
|
||||
if !v.Equal(vv) {
|
||||
fullEqual = false
|
||||
}
|
||||
return fullEqual
|
||||
})
|
||||
return fullEqual
|
||||
}
|
||||
|
||||
31
vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go
generated
vendored
31
vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go
generated
vendored
@@ -4,6 +4,8 @@
|
||||
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
|
||||
|
||||
import (
|
||||
"iter"
|
||||
|
||||
"go.uber.org/multierr"
|
||||
|
||||
"go.opentelemetry.io/collector/pdata/internal"
|
||||
@@ -58,6 +60,21 @@ func (es Slice) At(ix int) Value {
|
||||
return newValue(&(*es.getOrig())[ix], es.getState())
|
||||
}
|
||||
|
||||
// All returns an iterator over index-value pairs in the slice.
|
||||
//
|
||||
// for i, v := range es.All() {
|
||||
// ... // Do something with index-value pair
|
||||
// }
|
||||
func (es Slice) All() iter.Seq2[int, Value] {
|
||||
return func(yield func(int, Value) bool) {
|
||||
for i := 0; i < es.Len(); i++ {
|
||||
if !yield(i, es.At(i)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CopyTo copies all elements from the current slice overriding the destination.
|
||||
func (es Slice) CopyTo(dest Slice) {
|
||||
dest.getState().AssertMutable()
|
||||
@@ -164,3 +181,17 @@ func (es Slice) FromRaw(rawSlice []any) error {
|
||||
*es.getOrig() = origs
|
||||
return errs
|
||||
}
|
||||
|
||||
// Equal checks equality with another Slice
|
||||
func (es Slice) Equal(val Slice) bool {
|
||||
if es.Len() != val.Len() {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := 0; i < es.Len(); i++ {
|
||||
if !es.At(i).Equal(val.At(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user