Compare commits

...

2 Commits

Author SHA1 Message Date
Andrii Chubatiuk
d0fd4e0da4 apply review suggestions 2025-11-17 14:30:54 +02:00
Sylvain Rabot
cbf896b0da app/vmbakcup: support custom SSE KMS key id and ACL
Signed-off-by: Sylvain Rabot <sylvain@abstraction.fr>
2025-11-17 14:11:50 +02:00
6 changed files with 84 additions and 15 deletions

View File

@@ -26,6 +26,8 @@ See also [LTS releases](https://docs.victoriametrics.com/victoriametrics/lts-rel
## tip
* FEATURE: [vmbackup](https://docs.victoriametrics.com/victoriametrics/vmbackup/), [vmrestore](https://docs.victoriametrics.com/victoriametrics/vmrestore/), [vmbackupmanager](https://docs.victoriametrics.com/victoriametrics/vmbackupmanager/): add support for SSE KMS Key ID and ACL for use with S3-compatible storages.
## [v1.130.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.130.0)
Released at 2025-11-14

View File

@@ -489,10 +489,14 @@ Run `vmbackup -help` in order to see all the available options:
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#push-metrics . By default, metrics exposed at /metrics page aren't pushed to any remote storage
Supports an array of values separated by comma or specified via multiple flags.
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-s3ACL string
ACL to be set for uploaded objects to S3. Supported values are: private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control
-s3ForcePathStyle
Prefixing endpoint with bucket name when set false, true by default. (default true)
-s3ObjectTags string
S3 tags to be set for uploaded objects. Must be set in JSON format: {"param1":"value1",...,"paramN":"valueN"}.
-s3SSEKMSKeyId string
SSE KMS Key ID for use with S3-compatible storages.
-s3StorageClass string
The Storage Class applied to objects uploaded to AWS S3. Supported values are: GLACIER, DEEP_ARCHIVE, GLACIER_IR, INTELLIGENT_TIERING, ONEZONE_IA, OUTPOSTS, REDUCED_REDUNDANCY, STANDARD, STANDARD_IA.
See https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html

View File

@@ -651,10 +651,14 @@ command-line flags:
Disable validation of source backup presence and completeness when creating a restore mark.
-runOnStart
Upload backups immediately after start of the service. Otherwise the backup starts on new hour
-s3ACL string
ACL to be set for uploaded objects to S3. Supported values are: private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control
-s3ForcePathStyle
Prefixing endpoint with bucket name when set false, true by default. (default true)
-s3ObjectTags string
S3 tags to be set for uploaded objects. Must be set in JSON format: {"param1":"value1",...,"paramN":"valueN"}.
-s3SSEKMSKeyId string
SSE KMS Key ID for use with S3-compatible storages.
-s3StorageClass string
The Storage Class applied to objects uploaded to AWS S3. Supported values are: GLACIER, DEEP_ARCHIVE, GLACIER_IR, INTELLIGENT_TIERING, ONEZONE_IA, OUTPOSTS, REDUCED_REDUNDANCY, STANDARD, STANDARD_IA.
See https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html

View File

@@ -189,6 +189,8 @@ Run `vmrestore -help` in order to see all the available options:
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-s3ForcePathStyle
Prefixing endpoint with bucket name when set false, true by default. (default true)
-s3SSEKMSKeyId string
SSE KMS Key ID for use with S3-compatible storages.
-s3StorageClass string
The Storage Class applied to objects uploaded to AWS S3. Supported values are: GLACIER, DEEP_ARCHIVE, GLACIER_IR, INTELLIGENT_TIERING, ONEZONE_IA, OUTPOSTS, REDUCED_REDUNDANCY, STANDARD, STANDARD_IA.
See https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html

View File

@@ -27,12 +27,15 @@ var (
configProfile = flag.String("configProfile", "", "Profile name for S3 configs. If no set, the value of the environment variable will be loaded (AWS_PROFILE or AWS_DEFAULT_PROFILE), "+
"or if both not set, DefaultSharedConfigProfile is used")
customS3Endpoint = flag.String("customS3Endpoint", "", "Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO). S3 is used if not set")
s3ACL = flag.String("s3ACL", "bucket-owner-full-control", "ACL to be set for uploaded objects to S3.")
s3ForcePathStyle = flag.Bool("s3ForcePathStyle", true, "Prefixing endpoint with bucket name when set false, true by default.")
s3StorageClass = flag.String("s3StorageClass", "", "The Storage Class applied to objects uploaded to AWS S3. Supported values are: GLACIER, "+
"DEEP_ARCHIVE, GLACIER_IR, INTELLIGENT_TIERING, ONEZONE_IA, OUTPOSTS, REDUCED_REDUNDANCY, STANDARD, STANDARD_IA.\n"+
"See https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html")
s3ChecksumAlgorithm = flag.String("s3ChecksumAlgorithm", "", "Objects integrity checksum algorithm which is applied while uploading objects to AWS S3. "+
"Supported values are: SHA256, SHA1, CRC32C, CRC32")
s3SSEKMSKeyId = flag.String("s3SSEKMSKeyId", "", "SSE KMS Key ID for use with S3-compatible storages.")
s3SSEAlgorithm = flag.String("s3SSEAlgorithm", "aws:kms", "SSE KMS Key Algorithm for use with S3-compatible storages.")
s3TLSInsecureSkipVerify = flag.Bool("s3TLSInsecureSkipVerify", false, "Whether to skip TLS verification when connecting to the S3 endpoint.")
s3Tags = flag.String("s3ObjectTags", "", `S3 tags to be set for uploaded objects. Must be set in JSON format: {"param1":"value1",...,"paramN":"valueN"}.`)
)
@@ -267,6 +270,9 @@ func NewRemoteFS(ctx context.Context, path string) (common.RemoteFS, error) {
StorageClass: s3remote.StringToStorageClass(*s3StorageClass),
ChecksumAlgorithm: s3remote.StringToChecksumAlgorithm(*s3ChecksumAlgorithm),
S3ForcePathStyle: *s3ForcePathStyle,
ACL: s3remote.StringToObjectACL(*s3ACL),
SSEKMSKeyId: *s3SSEKMSKeyId,
SSEAlgorithm: s3remote.StringToEncryptionAlgorithm(*s3SSEAlgorithm),
ProfileName: *configProfile,
Bucket: bucket,
Dir: dir,

View File

@@ -8,6 +8,7 @@ import (
"io"
"net/http"
"path"
"slices"
"sort"
"strings"
"time"
@@ -26,34 +27,54 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
)
var (
supportedStorageClasses = []s3types.StorageClass{s3types.StorageClassGlacier, s3types.StorageClassDeepArchive, s3types.StorageClassGlacierIr, s3types.StorageClassIntelligentTiering, s3types.StorageClassOnezoneIa, s3types.StorageClassOutposts, s3types.StorageClassReducedRedundancy, s3types.StorageClassStandard, s3types.StorageClassStandardIa}
)
func validateStorageClass(storageClass s3types.StorageClass) error {
func validateStorageClass(v s3types.StorageClass) error {
// if no storageClass set, no need to validate against supported values
// backwards compatibility
if len(storageClass) == 0 {
if len(v) == 0 || slices.Contains(v.Values(), v) {
return nil
}
return fmt.Errorf("unsupported S3 storage class %q. Supported values: %v", v, v.Values())
}
for _, supported := range supportedStorageClasses {
if supported == storageClass {
return nil
}
func validateObjectACL(v s3types.ObjectCannedACL) error {
if len(v) == 0 || slices.Contains(v.Values(), v) {
return nil
}
return fmt.Errorf("unsupported S3 object ACL %q. Supported values: %v", v, v.Values())
}
return fmt.Errorf("unsupported S3 storage class: %s. Supported values: %v", storageClass, supportedStorageClasses)
func validateChecksumAlgorithm(v s3types.ChecksumAlgorithm) error {
if len(v) == 0 || slices.Contains(v.Values(), v) {
return nil
}
return fmt.Errorf("unsupported S3 checksum algorithm %q. Supported values: %v", v, v.Values())
}
func validateSSEAlgorithm(v s3types.ServerSideEncryption) error {
if len(v) == 0 || slices.Contains(v.Values(), v) {
return nil
}
return fmt.Errorf("unsupported S3 server-side algorithm %q. Supported values: %v", v, v.Values())
}
// StringToStorageClass converts string types to AWS S3 StorageClass type for value comparison
func StringToStorageClass(sc string) s3types.StorageClass {
return s3types.StorageClass(sc)
func StringToStorageClass(s string) s3types.StorageClass {
return s3types.StorageClass(s)
}
// StringToChecksumAlgorithm converts string types to AWS S3 ChecksumAlgorithm type for value comparison
func StringToChecksumAlgorithm(alg string) s3types.ChecksumAlgorithm {
return s3types.ChecksumAlgorithm(alg)
func StringToChecksumAlgorithm(s string) s3types.ChecksumAlgorithm {
return s3types.ChecksumAlgorithm(s)
}
// StringToObjectACL converts string types to AWS S3 ACL type for value comparison
func StringToObjectACL(s string) s3types.ObjectCannedACL {
return s3types.ObjectCannedACL(s)
}
// StringToEncryptionAlgorithm converts string types to AWS S3 server-side encryption type for value comparison
func StringToEncryptionAlgorithm(s string) s3types.ServerSideEncryption {
return s3types.ServerSideEncryption(s)
}
// FS represents filesystem for backups in S3.
@@ -90,6 +111,11 @@ type FS struct {
// Whether to use HTTP client with tls.InsecureSkipVerify setting
TLSInsecureSkipVerify bool
// SSEKMSKeyId
SSEKMSKeyId string
SSEAlgorithm s3types.ServerSideEncryption
ACL s3types.ObjectCannedACL
s3 *s3.Client
uploader *manager.Uploader
@@ -169,6 +195,15 @@ func (fs *FS) Init(ctx context.Context) error {
if err = validateStorageClass(fs.StorageClass); err != nil {
return err
}
if err = validateChecksumAlgorithm(fs.ChecksumAlgorithm); err != nil {
return err
}
if err = validateObjectACL(fs.ACL); err != nil {
return err
}
if err = validateSSEAlgorithm(fs.SSEAlgorithm); err != nil {
return err
}
// Use AWS client in order to allow SDK to override transport configuration
// based on additional configuration from environment variables.
@@ -322,6 +357,11 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
Metadata: fs.Metadata,
MetadataDirective: s3types.MetadataDirectiveReplace,
Tagging: fs.tags,
ACL: fs.ACL,
}
if len(fs.SSEKMSKeyId) > 0 {
input.SSEKMSKeyId = aws.String(fs.SSEKMSKeyId)
input.ServerSideEncryption = fs.SSEAlgorithm
}
_, err := fs.s3.CopyObject(fs.ctx, input)
@@ -370,6 +410,11 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
Metadata: fs.Metadata,
ChecksumAlgorithm: fs.ChecksumAlgorithm,
Tagging: fs.tags,
ACL: fs.ACL,
}
if len(fs.SSEKMSKeyId) > 0 {
input.SSEKMSKeyId = aws.String(fs.SSEKMSKeyId)
input.ServerSideEncryption = fs.SSEAlgorithm
}
_, err := fs.uploader.Upload(fs.ctx, input)
@@ -463,7 +508,13 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
Metadata: fs.Metadata,
ChecksumAlgorithm: fs.ChecksumAlgorithm,
Tagging: fs.tags,
ACL: fs.ACL,
}
if len(fs.SSEKMSKeyId) > 0 {
input.SSEKMSKeyId = aws.String(fs.SSEKMSKeyId)
input.ServerSideEncryption = fs.SSEAlgorithm
}
_, err := fs.uploader.Upload(fs.ctx, input)
if err != nil {
return fmt.Errorf("cannot upload data to %q at %s (remote path %q): %w", filePath, fs, path, err)