feat: v2.0 Sprint 2 - Cloud Storage Support (Part 1)
- Add AWS SDK v2 for S3 integration - Implement cloud.Backend interface for multi-provider support - Add full S3 backend with upload/download/list/delete - Support MinIO and Backblaze B2 (S3-compatible) - Implement progress tracking for uploads/downloads - Add cloud commands: upload, download, list, delete New commands: - dbbackup cloud upload [files] - Upload backups to cloud - dbbackup cloud download [remote] [local] - Download from cloud - dbbackup cloud list [prefix] - List cloud backups - dbbackup cloud delete [remote] - Delete from cloud Configuration via flags or environment: - --cloud-provider, --cloud-bucket, --cloud-region - --cloud-endpoint (for MinIO/B2) - --cloud-access-key, --cloud-secret-key New packages: - internal/cloud - Cloud storage abstraction layer
This commit is contained in:
394
cmd/cloud.go
Normal file
394
cmd/cloud.go
Normal file
@@ -0,0 +1,394 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"dbbackup/internal/cloud"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cloudCmd = &cobra.Command{
|
||||||
|
Use: "cloud",
|
||||||
|
Short: "Cloud storage operations",
|
||||||
|
Long: `Manage backups in cloud storage (S3, MinIO, Backblaze B2).
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- AWS S3
|
||||||
|
- MinIO (S3-compatible)
|
||||||
|
- Backblaze B2 (S3-compatible)
|
||||||
|
- Any S3-compatible storage
|
||||||
|
|
||||||
|
Configuration via flags or environment variables:
|
||||||
|
--cloud-provider DBBACKUP_CLOUD_PROVIDER
|
||||||
|
--cloud-bucket DBBACKUP_CLOUD_BUCKET
|
||||||
|
--cloud-region DBBACKUP_CLOUD_REGION
|
||||||
|
--cloud-endpoint DBBACKUP_CLOUD_ENDPOINT
|
||||||
|
--cloud-access-key DBBACKUP_CLOUD_ACCESS_KEY (or AWS_ACCESS_KEY_ID)
|
||||||
|
--cloud-secret-key DBBACKUP_CLOUD_SECRET_KEY (or AWS_SECRET_ACCESS_KEY)`,
|
||||||
|
}
|
||||||
|
|
||||||
|
var cloudUploadCmd = &cobra.Command{
|
||||||
|
Use: "upload [backup-file]",
|
||||||
|
Short: "Upload backup to cloud storage",
|
||||||
|
Long: `Upload one or more backup files to cloud storage.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
# Upload single backup
|
||||||
|
dbbackup cloud upload /backups/mydb.dump
|
||||||
|
|
||||||
|
# Upload with progress
|
||||||
|
dbbackup cloud upload /backups/mydb.dump --verbose
|
||||||
|
|
||||||
|
# Upload multiple files
|
||||||
|
dbbackup cloud upload /backups/*.dump`,
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
RunE: runCloudUpload,
|
||||||
|
}
|
||||||
|
|
||||||
|
var cloudDownloadCmd = &cobra.Command{
|
||||||
|
Use: "download [remote-file] [local-path]",
|
||||||
|
Short: "Download backup from cloud storage",
|
||||||
|
Long: `Download a backup file from cloud storage.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
# Download to current directory
|
||||||
|
dbbackup cloud download mydb.dump .
|
||||||
|
|
||||||
|
# Download to specific path
|
||||||
|
dbbackup cloud download mydb.dump /backups/mydb.dump
|
||||||
|
|
||||||
|
# Download with progress
|
||||||
|
dbbackup cloud download mydb.dump . --verbose`,
|
||||||
|
Args: cobra.ExactArgs(2),
|
||||||
|
RunE: runCloudDownload,
|
||||||
|
}
|
||||||
|
|
||||||
|
var cloudListCmd = &cobra.Command{
|
||||||
|
Use: "list [prefix]",
|
||||||
|
Short: "List backups in cloud storage",
|
||||||
|
Long: `List all backup files in cloud storage.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
# List all backups
|
||||||
|
dbbackup cloud list
|
||||||
|
|
||||||
|
# List backups with prefix
|
||||||
|
dbbackup cloud list mydb_
|
||||||
|
|
||||||
|
# List with detailed information
|
||||||
|
dbbackup cloud list --verbose`,
|
||||||
|
Args: cobra.MaximumNArgs(1),
|
||||||
|
RunE: runCloudList,
|
||||||
|
}
|
||||||
|
|
||||||
|
var cloudDeleteCmd = &cobra.Command{
|
||||||
|
Use: "delete [remote-file]",
|
||||||
|
Short: "Delete backup from cloud storage",
|
||||||
|
Long: `Delete a backup file from cloud storage.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
# Delete single backup
|
||||||
|
dbbackup cloud delete mydb_20251125.dump
|
||||||
|
|
||||||
|
# Delete with confirmation
|
||||||
|
dbbackup cloud delete mydb.dump --confirm`,
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: runCloudDelete,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
cloudProvider string
|
||||||
|
cloudBucket string
|
||||||
|
cloudRegion string
|
||||||
|
cloudEndpoint string
|
||||||
|
cloudAccessKey string
|
||||||
|
cloudSecretKey string
|
||||||
|
cloudPrefix string
|
||||||
|
cloudVerbose bool
|
||||||
|
cloudConfirm bool
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(cloudCmd)
|
||||||
|
cloudCmd.AddCommand(cloudUploadCmd, cloudDownloadCmd, cloudListCmd, cloudDeleteCmd)
|
||||||
|
|
||||||
|
// Cloud configuration flags
|
||||||
|
for _, cmd := range []*cobra.Command{cloudUploadCmd, cloudDownloadCmd, cloudListCmd, cloudDeleteCmd} {
|
||||||
|
cmd.Flags().StringVar(&cloudProvider, "cloud-provider", getEnv("DBBACKUP_CLOUD_PROVIDER", "s3"), "Cloud provider (s3, minio, b2)")
|
||||||
|
cmd.Flags().StringVar(&cloudBucket, "cloud-bucket", getEnv("DBBACKUP_CLOUD_BUCKET", ""), "Bucket name")
|
||||||
|
cmd.Flags().StringVar(&cloudRegion, "cloud-region", getEnv("DBBACKUP_CLOUD_REGION", "us-east-1"), "Region")
|
||||||
|
cmd.Flags().StringVar(&cloudEndpoint, "cloud-endpoint", getEnv("DBBACKUP_CLOUD_ENDPOINT", ""), "Custom endpoint (for MinIO)")
|
||||||
|
cmd.Flags().StringVar(&cloudAccessKey, "cloud-access-key", getEnv("DBBACKUP_CLOUD_ACCESS_KEY", getEnv("AWS_ACCESS_KEY_ID", "")), "Access key")
|
||||||
|
cmd.Flags().StringVar(&cloudSecretKey, "cloud-secret-key", getEnv("DBBACKUP_CLOUD_SECRET_KEY", getEnv("AWS_SECRET_ACCESS_KEY", "")), "Secret key")
|
||||||
|
cmd.Flags().StringVar(&cloudPrefix, "cloud-prefix", getEnv("DBBACKUP_CLOUD_PREFIX", ""), "Key prefix")
|
||||||
|
cmd.Flags().BoolVarP(&cloudVerbose, "verbose", "v", false, "Verbose output")
|
||||||
|
}
|
||||||
|
|
||||||
|
cloudDeleteCmd.Flags().BoolVar(&cloudConfirm, "confirm", false, "Skip confirmation prompt")
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEnv(key, defaultValue string) string {
|
||||||
|
if value := os.Getenv(key); value != "" {
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
return defaultValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCloudBackend() (cloud.Backend, error) {
|
||||||
|
cfg := &cloud.Config{
|
||||||
|
Provider: cloudProvider,
|
||||||
|
Bucket: cloudBucket,
|
||||||
|
Region: cloudRegion,
|
||||||
|
Endpoint: cloudEndpoint,
|
||||||
|
AccessKey: cloudAccessKey,
|
||||||
|
SecretKey: cloudSecretKey,
|
||||||
|
Prefix: cloudPrefix,
|
||||||
|
UseSSL: true,
|
||||||
|
PathStyle: cloudProvider == "minio",
|
||||||
|
Timeout: 300,
|
||||||
|
MaxRetries: 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Bucket == "" {
|
||||||
|
return nil, fmt.Errorf("bucket name is required (use --cloud-bucket or DBBACKUP_CLOUD_BUCKET)")
|
||||||
|
}
|
||||||
|
|
||||||
|
backend, err := cloud.NewBackend(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create cloud backend: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return backend, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runCloudUpload(cmd *cobra.Command, args []string) error {
|
||||||
|
backend, err := getCloudBackend()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Expand glob patterns
|
||||||
|
var files []string
|
||||||
|
for _, pattern := range args {
|
||||||
|
matches, err := filepath.Glob(pattern)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid pattern %s: %w", pattern, err)
|
||||||
|
}
|
||||||
|
if len(matches) == 0 {
|
||||||
|
files = append(files, pattern)
|
||||||
|
} else {
|
||||||
|
files = append(files, matches...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("☁️ Uploading %d file(s) to %s...\n\n", len(files), backend.Name())
|
||||||
|
|
||||||
|
successCount := 0
|
||||||
|
for _, localPath := range files {
|
||||||
|
filename := filepath.Base(localPath)
|
||||||
|
fmt.Printf("📤 %s\n", filename)
|
||||||
|
|
||||||
|
// Progress callback
|
||||||
|
var lastPercent int
|
||||||
|
progress := func(transferred, total int64) {
|
||||||
|
if !cloudVerbose {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
percent := int(float64(transferred) / float64(total) * 100)
|
||||||
|
if percent != lastPercent && percent%10 == 0 {
|
||||||
|
fmt.Printf(" Progress: %d%% (%s / %s)\n",
|
||||||
|
percent,
|
||||||
|
cloud.FormatSize(transferred),
|
||||||
|
cloud.FormatSize(total))
|
||||||
|
lastPercent = percent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := backend.Upload(ctx, localPath, filename, progress)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf(" ❌ Failed: %v\n\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get file size
|
||||||
|
if info, err := os.Stat(localPath); err == nil {
|
||||||
|
fmt.Printf(" ✅ Uploaded (%s)\n\n", cloud.FormatSize(info.Size()))
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" ✅ Uploaded\n\n")
|
||||||
|
}
|
||||||
|
successCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(strings.Repeat("─", 50))
|
||||||
|
fmt.Printf("✅ Successfully uploaded %d/%d file(s)\n", successCount, len(files))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runCloudDownload(cmd *cobra.Command, args []string) error {
|
||||||
|
backend, err := getCloudBackend()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
remotePath := args[0]
|
||||||
|
localPath := args[1]
|
||||||
|
|
||||||
|
// If localPath is a directory, use the remote filename
|
||||||
|
if info, err := os.Stat(localPath); err == nil && info.IsDir() {
|
||||||
|
localPath = filepath.Join(localPath, filepath.Base(remotePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("☁️ Downloading from %s...\n\n", backend.Name())
|
||||||
|
fmt.Printf("📥 %s → %s\n", remotePath, localPath)
|
||||||
|
|
||||||
|
// Progress callback
|
||||||
|
var lastPercent int
|
||||||
|
progress := func(transferred, total int64) {
|
||||||
|
if !cloudVerbose {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
percent := int(float64(transferred) / float64(total) * 100)
|
||||||
|
if percent != lastPercent && percent%10 == 0 {
|
||||||
|
fmt.Printf(" Progress: %d%% (%s / %s)\n",
|
||||||
|
percent,
|
||||||
|
cloud.FormatSize(transferred),
|
||||||
|
cloud.FormatSize(total))
|
||||||
|
lastPercent = percent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = backend.Download(ctx, remotePath, localPath, progress)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("download failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get file size
|
||||||
|
if info, err := os.Stat(localPath); err == nil {
|
||||||
|
fmt.Printf(" ✅ Downloaded (%s)\n", cloud.FormatSize(info.Size()))
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" ✅ Downloaded\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runCloudList(cmd *cobra.Command, args []string) error {
|
||||||
|
backend, err := getCloudBackend()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
prefix := ""
|
||||||
|
if len(args) > 0 {
|
||||||
|
prefix = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("☁️ Listing backups in %s/%s...\n\n", backend.Name(), cloudBucket)
|
||||||
|
|
||||||
|
backups, err := backend.List(ctx, prefix)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list backups: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(backups) == 0 {
|
||||||
|
fmt.Println("No backups found")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var totalSize int64
|
||||||
|
for _, backup := range backups {
|
||||||
|
totalSize += backup.Size
|
||||||
|
|
||||||
|
if cloudVerbose {
|
||||||
|
fmt.Printf("📦 %s\n", backup.Name)
|
||||||
|
fmt.Printf(" Size: %s\n", cloud.FormatSize(backup.Size))
|
||||||
|
fmt.Printf(" Modified: %s\n", backup.LastModified.Format(time.RFC3339))
|
||||||
|
if backup.StorageClass != "" {
|
||||||
|
fmt.Printf(" Storage: %s\n", backup.StorageClass)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
} else {
|
||||||
|
age := time.Since(backup.LastModified)
|
||||||
|
ageStr := formatAge(age)
|
||||||
|
fmt.Printf("%-50s %12s %s\n",
|
||||||
|
backup.Name,
|
||||||
|
cloud.FormatSize(backup.Size),
|
||||||
|
ageStr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(strings.Repeat("─", 50))
|
||||||
|
fmt.Printf("Total: %d backup(s), %s\n", len(backups), cloud.FormatSize(totalSize))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runCloudDelete(cmd *cobra.Command, args []string) error {
|
||||||
|
backend, err := getCloudBackend()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
remotePath := args[0]
|
||||||
|
|
||||||
|
// Check if file exists
|
||||||
|
exists, err := backend.Exists(ctx, remotePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check file: %w", err)
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
return fmt.Errorf("file not found: %s", remotePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get file info
|
||||||
|
size, err := backend.GetSize(ctx, remotePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get file info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Confirmation prompt
|
||||||
|
if !cloudConfirm {
|
||||||
|
fmt.Printf("⚠️ Delete %s (%s) from cloud storage?\n", remotePath, cloud.FormatSize(size))
|
||||||
|
fmt.Print("Type 'yes' to confirm: ")
|
||||||
|
var response string
|
||||||
|
fmt.Scanln(&response)
|
||||||
|
if response != "yes" {
|
||||||
|
fmt.Println("Cancelled")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("🗑️ Deleting %s...\n", remotePath)
|
||||||
|
|
||||||
|
err = backend.Delete(ctx, remotePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("delete failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✅ Deleted %s (%s)\n", remotePath, cloud.FormatSize(size))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatAge(d time.Duration) string {
|
||||||
|
if d < time.Minute {
|
||||||
|
return "just now"
|
||||||
|
} else if d < time.Hour {
|
||||||
|
return fmt.Sprintf("%d min ago", int(d.Minutes()))
|
||||||
|
} else if d < 24*time.Hour {
|
||||||
|
return fmt.Sprintf("%d hours ago", int(d.Hours()))
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("%d days ago", int(d.Hours()/24))
|
||||||
|
}
|
||||||
|
}
|
||||||
19
go.mod
19
go.mod
@@ -18,6 +18,25 @@ require (
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
filippo.io/edwards25519 v1.1.0 // indirect
|
filippo.io/edwards25519 v1.1.0 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/config v1.32.1 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.1 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.14 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.5 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.14 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.92.0 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.1 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.4 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.9 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.1 // indirect
|
||||||
|
github.com/aws/smithy-go v1.23.2 // indirect
|
||||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||||
github.com/charmbracelet/x/ansi v0.10.1 // indirect
|
github.com/charmbracelet/x/ansi v0.10.1 // indirect
|
||||||
|
|||||||
38
go.sum
38
go.sum
@@ -2,6 +2,44 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
|||||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
|
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
|
||||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
|
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
|
||||||
|
github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc=
|
||||||
|
github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
|
||||||
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 h1:DHctwEM8P8iTXFxC/QK0MRjwEpWQeM9yzidCRjldUz0=
|
||||||
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3/go.mod h1:xdCzcZEtnSTKVDOmUZs4l/j3pSV6rpo1WXl5ugNsL8Y=
|
||||||
|
github.com/aws/aws-sdk-go-v2/config v1.32.1 h1:iODUDLgk3q8/flEC7ymhmxjfoAnBDwEEYEVyKZ9mzjU=
|
||||||
|
github.com/aws/aws-sdk-go-v2/config v1.32.1/go.mod h1:xoAgo17AGrPpJBSLg81W+ikM0cpOZG8ad04T2r+d5P0=
|
||||||
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.1 h1:JeW+EwmtTE0yXFK8SmklrFh/cGTTXsQJumgMZNlbxfM=
|
||||||
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.1/go.mod h1:BOoXiStwTF+fT2XufhO0Efssbi1CNIO/ZXpZu87N0pw=
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA=
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.14 h1:ITi7qiDSv/mSGDSWNpZ4k4Ve0DQR6Ug2SJQ8zEHoDXg=
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.14/go.mod h1:k1xtME53H1b6YpZt74YmwlONMWf4ecM+lut1WQLAF/U=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.5 h1:Hjkh7kE6D81PgrHlE/m9gx+4TyyeLHuY8xJs7yXN5C4=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.5/go.mod h1:nPRXgyCfAurhyaTMoBMwRBYBhaHI4lNPAnJmjM0Tslc=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.14 h1:FzQE21lNtUor0Fb7QNgnEyiRCBlolLTX/Z1j65S7teM=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.14/go.mod h1:s1ydyWG9pm3ZwmmYN21HKyG9WzAZhYVW85wMHs5FV6w=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.92.0 h1:8FshVvnV2sr9kOSAbOnc/vwVmmAwMjOedKH6JW2ddPM=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.92.0/go.mod h1:wYNqY3L02Z3IgRYxOBPH9I1zD9Cjh9hI5QOy/eOjQvw=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.1 h1:BDgIUYGEo5TkayOWv/oBLPphWwNm/A91AebUjAu5L5g=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.1/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.4 h1:U//SlnkE1wOQiIImxzdY5PXat4Wq+8rlfVEw4Y7J8as=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.4/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.9 h1:LU8S9W/mPDAU9q0FjCLi0TrCheLMGwzbRpvUMwYspcA=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.9/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.1 h1:GdGmKtG+/Krag7VfyOXV17xjTCz0i9NT+JnqLTOI5nA=
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.1/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso=
|
||||||
|
github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
|
||||||
|
github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||||
github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs=
|
github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs=
|
||||||
|
|||||||
167
internal/cloud/interface.go
Normal file
167
internal/cloud/interface.go
Normal file
@@ -0,0 +1,167 @@
|
|||||||
|
package cloud
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Backend defines the interface for cloud storage providers
|
||||||
|
type Backend interface {
|
||||||
|
// Upload uploads a file to cloud storage
|
||||||
|
Upload(ctx context.Context, localPath, remotePath string, progress ProgressCallback) error
|
||||||
|
|
||||||
|
// Download downloads a file from cloud storage
|
||||||
|
Download(ctx context.Context, remotePath, localPath string, progress ProgressCallback) error
|
||||||
|
|
||||||
|
// List lists all backup files in cloud storage
|
||||||
|
List(ctx context.Context, prefix string) ([]BackupInfo, error)
|
||||||
|
|
||||||
|
// Delete deletes a file from cloud storage
|
||||||
|
Delete(ctx context.Context, remotePath string) error
|
||||||
|
|
||||||
|
// Exists checks if a file exists in cloud storage
|
||||||
|
Exists(ctx context.Context, remotePath string) (bool, error)
|
||||||
|
|
||||||
|
// GetSize returns the size of a remote file
|
||||||
|
GetSize(ctx context.Context, remotePath string) (int64, error)
|
||||||
|
|
||||||
|
// Name returns the backend name (e.g., "s3", "azure", "gcs")
|
||||||
|
Name() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackupInfo contains information about a backup in cloud storage
|
||||||
|
type BackupInfo struct {
|
||||||
|
Key string // Full path/key in cloud storage
|
||||||
|
Name string // Base filename
|
||||||
|
Size int64 // Size in bytes
|
||||||
|
LastModified time.Time // Last modification time
|
||||||
|
ETag string // Entity tag (version identifier)
|
||||||
|
StorageClass string // Storage class (e.g., STANDARD, GLACIER)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProgressCallback is called during upload/download to report progress
|
||||||
|
type ProgressCallback func(bytesTransferred, totalBytes int64)
|
||||||
|
|
||||||
|
// Config contains common configuration for cloud backends
|
||||||
|
type Config struct {
|
||||||
|
Provider string // "s3", "minio", "azure", "gcs", "b2"
|
||||||
|
Bucket string // Bucket or container name
|
||||||
|
Region string // Region (for S3)
|
||||||
|
Endpoint string // Custom endpoint (for MinIO, S3-compatible)
|
||||||
|
AccessKey string // Access key or account ID
|
||||||
|
SecretKey string // Secret key or access token
|
||||||
|
UseSSL bool // Use SSL/TLS (default: true)
|
||||||
|
PathStyle bool // Use path-style addressing (for MinIO)
|
||||||
|
Prefix string // Prefix for all operations (e.g., "backups/")
|
||||||
|
Timeout int // Timeout in seconds (default: 300)
|
||||||
|
MaxRetries int // Maximum retry attempts (default: 3)
|
||||||
|
Concurrency int // Upload/download concurrency (default: 5)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBackend creates a new cloud storage backend based on the provider
|
||||||
|
func NewBackend(cfg *Config) (Backend, error) {
|
||||||
|
switch cfg.Provider {
|
||||||
|
case "s3", "aws":
|
||||||
|
return NewS3Backend(cfg)
|
||||||
|
case "minio":
|
||||||
|
// MinIO uses S3 backend with custom endpoint
|
||||||
|
cfg.PathStyle = true
|
||||||
|
if cfg.Endpoint == "" {
|
||||||
|
return nil, fmt.Errorf("endpoint required for MinIO")
|
||||||
|
}
|
||||||
|
return NewS3Backend(cfg)
|
||||||
|
case "b2", "backblaze":
|
||||||
|
// Backblaze B2 uses S3-compatible API
|
||||||
|
cfg.PathStyle = false
|
||||||
|
if cfg.Endpoint == "" {
|
||||||
|
return nil, fmt.Errorf("endpoint required for Backblaze B2")
|
||||||
|
}
|
||||||
|
return NewS3Backend(cfg)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported cloud provider: %s (supported: s3, minio, b2)", cfg.Provider)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatSize returns human-readable size
|
||||||
|
func FormatSize(bytes int64) string {
|
||||||
|
const unit = 1024
|
||||||
|
if bytes < unit {
|
||||||
|
return fmt.Sprintf("%d B", bytes)
|
||||||
|
}
|
||||||
|
div, exp := int64(unit), 0
|
||||||
|
for n := bytes / unit; n >= unit; n /= unit {
|
||||||
|
div *= unit
|
||||||
|
exp++
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%.1f %ciB", float64(bytes)/float64(div), "KMGTPE"[exp])
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultConfig returns a config with sensible defaults
|
||||||
|
func DefaultConfig() *Config {
|
||||||
|
return &Config{
|
||||||
|
Provider: "s3",
|
||||||
|
UseSSL: true,
|
||||||
|
PathStyle: false,
|
||||||
|
Timeout: 300,
|
||||||
|
MaxRetries: 3,
|
||||||
|
Concurrency: 5,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks if the configuration is valid
|
||||||
|
func (c *Config) Validate() error {
|
||||||
|
if c.Provider == "" {
|
||||||
|
return fmt.Errorf("provider is required")
|
||||||
|
}
|
||||||
|
if c.Bucket == "" {
|
||||||
|
return fmt.Errorf("bucket name is required")
|
||||||
|
}
|
||||||
|
if c.Provider == "s3" || c.Provider == "aws" {
|
||||||
|
if c.Region == "" && c.Endpoint == "" {
|
||||||
|
return fmt.Errorf("region or endpoint is required for S3")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.Provider == "minio" || c.Provider == "b2" {
|
||||||
|
if c.Endpoint == "" {
|
||||||
|
return fmt.Errorf("endpoint is required for %s", c.Provider)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProgressReader wraps an io.Reader to track progress
|
||||||
|
type ProgressReader struct {
|
||||||
|
reader io.Reader
|
||||||
|
total int64
|
||||||
|
read int64
|
||||||
|
callback ProgressCallback
|
||||||
|
lastReport time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProgressReader creates a progress tracking reader
|
||||||
|
func NewProgressReader(r io.Reader, total int64, callback ProgressCallback) *ProgressReader {
|
||||||
|
return &ProgressReader{
|
||||||
|
reader: r,
|
||||||
|
total: total,
|
||||||
|
callback: callback,
|
||||||
|
lastReport: time.Now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pr *ProgressReader) Read(p []byte) (int, error) {
|
||||||
|
n, err := pr.reader.Read(p)
|
||||||
|
pr.read += int64(n)
|
||||||
|
|
||||||
|
// Report progress every 100ms or when complete
|
||||||
|
now := time.Now()
|
||||||
|
if now.Sub(pr.lastReport) > 100*time.Millisecond || err == io.EOF {
|
||||||
|
if pr.callback != nil {
|
||||||
|
pr.callback(pr.read, pr.total)
|
||||||
|
}
|
||||||
|
pr.lastReport = now
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
324
internal/cloud/s3.go
Normal file
324
internal/cloud/s3.go
Normal file
@@ -0,0 +1,324 @@
|
|||||||
|
package cloud
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/config"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||||
|
)
|
||||||
|
|
||||||
|
// S3Backend implements the Backend interface for AWS S3 and compatible services
|
||||||
|
type S3Backend struct {
|
||||||
|
client *s3.Client
|
||||||
|
bucket string
|
||||||
|
prefix string
|
||||||
|
config *Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewS3Backend creates a new S3 backend
|
||||||
|
func NewS3Backend(cfg *Config) (*S3Backend, error) {
|
||||||
|
if err := cfg.Validate(); err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Build AWS config
|
||||||
|
var awsCfg aws.Config
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if cfg.AccessKey != "" && cfg.SecretKey != "" {
|
||||||
|
// Use explicit credentials
|
||||||
|
credsProvider := credentials.NewStaticCredentialsProvider(
|
||||||
|
cfg.AccessKey,
|
||||||
|
cfg.SecretKey,
|
||||||
|
"",
|
||||||
|
)
|
||||||
|
|
||||||
|
awsCfg, err = config.LoadDefaultConfig(ctx,
|
||||||
|
config.WithCredentialsProvider(credsProvider),
|
||||||
|
config.WithRegion(cfg.Region),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
// Use default credential chain (environment, IAM role, etc.)
|
||||||
|
awsCfg, err = config.LoadDefaultConfig(ctx,
|
||||||
|
config.WithRegion(cfg.Region),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to load AWS config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create S3 client with custom options
|
||||||
|
clientOptions := []func(*s3.Options){
|
||||||
|
func(o *s3.Options) {
|
||||||
|
if cfg.Endpoint != "" {
|
||||||
|
o.BaseEndpoint = aws.String(cfg.Endpoint)
|
||||||
|
}
|
||||||
|
if cfg.PathStyle {
|
||||||
|
o.UsePathStyle = true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
client := s3.NewFromConfig(awsCfg, clientOptions...)
|
||||||
|
|
||||||
|
return &S3Backend{
|
||||||
|
client: client,
|
||||||
|
bucket: cfg.Bucket,
|
||||||
|
prefix: cfg.Prefix,
|
||||||
|
config: cfg,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the backend name
|
||||||
|
func (s *S3Backend) Name() string {
|
||||||
|
return "s3"
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildKey creates the full S3 key from filename
|
||||||
|
func (s *S3Backend) buildKey(filename string) string {
|
||||||
|
if s.prefix == "" {
|
||||||
|
return filename
|
||||||
|
}
|
||||||
|
return filepath.Join(s.prefix, filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload uploads a file to S3
|
||||||
|
func (s *S3Backend) Upload(ctx context.Context, localPath, remotePath string, progress ProgressCallback) error {
|
||||||
|
// Open local file
|
||||||
|
file, err := os.Open(localPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open file: %w", err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
// Get file size
|
||||||
|
stat, err := file.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to stat file: %w", err)
|
||||||
|
}
|
||||||
|
fileSize := stat.Size()
|
||||||
|
|
||||||
|
// Create progress reader
|
||||||
|
var reader io.Reader = file
|
||||||
|
if progress != nil {
|
||||||
|
reader = NewProgressReader(file, fileSize, progress)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build S3 key
|
||||||
|
key := s.buildKey(remotePath)
|
||||||
|
|
||||||
|
// Upload to S3
|
||||||
|
_, err = s.client.PutObject(ctx, &s3.PutObjectInput{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
Body: reader,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to upload to S3: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download downloads a file from S3
|
||||||
|
func (s *S3Backend) Download(ctx context.Context, remotePath, localPath string, progress ProgressCallback) error {
|
||||||
|
// Build S3 key
|
||||||
|
key := s.buildKey(remotePath)
|
||||||
|
|
||||||
|
// Get object size first
|
||||||
|
size, err := s.GetSize(ctx, remotePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get object size: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download from S3
|
||||||
|
result, err := s.client.GetObject(ctx, &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to download from S3: %w", err)
|
||||||
|
}
|
||||||
|
defer result.Body.Close()
|
||||||
|
|
||||||
|
// Create local file
|
||||||
|
if err := os.MkdirAll(filepath.Dir(localPath), 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to create directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
outFile, err := os.Create(localPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create local file: %w", err)
|
||||||
|
}
|
||||||
|
defer outFile.Close()
|
||||||
|
|
||||||
|
// Copy with progress tracking
|
||||||
|
var reader io.Reader = result.Body
|
||||||
|
if progress != nil {
|
||||||
|
reader = NewProgressReader(result.Body, size, progress)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(outFile, reader)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to write file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List lists all backup files in S3
|
||||||
|
func (s *S3Backend) List(ctx context.Context, prefix string) ([]BackupInfo, error) {
|
||||||
|
// Build full prefix
|
||||||
|
fullPrefix := s.buildKey(prefix)
|
||||||
|
|
||||||
|
// List objects
|
||||||
|
result, err := s.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
Prefix: aws.String(fullPrefix),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to list objects: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to BackupInfo
|
||||||
|
var backups []BackupInfo
|
||||||
|
for _, obj := range result.Contents {
|
||||||
|
if obj.Key == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
key := *obj.Key
|
||||||
|
name := filepath.Base(key)
|
||||||
|
|
||||||
|
// Skip if it's just a directory marker
|
||||||
|
if strings.HasSuffix(key, "/") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
info := BackupInfo{
|
||||||
|
Key: key,
|
||||||
|
Name: name,
|
||||||
|
Size: *obj.Size,
|
||||||
|
LastModified: *obj.LastModified,
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.ETag != nil {
|
||||||
|
info.ETag = *obj.ETag
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.StorageClass != "" {
|
||||||
|
info.StorageClass = string(obj.StorageClass)
|
||||||
|
} else {
|
||||||
|
info.StorageClass = "STANDARD"
|
||||||
|
}
|
||||||
|
|
||||||
|
backups = append(backups, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
return backups, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes a file from S3
|
||||||
|
func (s *S3Backend) Delete(ctx context.Context, remotePath string) error {
|
||||||
|
key := s.buildKey(remotePath)
|
||||||
|
|
||||||
|
_, err := s.client.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to delete object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exists checks if a file exists in S3
|
||||||
|
func (s *S3Backend) Exists(ctx context.Context, remotePath string) (bool, error) {
|
||||||
|
key := s.buildKey(remotePath)
|
||||||
|
|
||||||
|
_, err := s.client.HeadObject(ctx, &s3.HeadObjectInput{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// Check if it's a "not found" error
|
||||||
|
if strings.Contains(err.Error(), "NotFound") || strings.Contains(err.Error(), "404") {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, fmt.Errorf("failed to check object existence: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSize returns the size of a remote file
|
||||||
|
func (s *S3Backend) GetSize(ctx context.Context, remotePath string) (int64, error) {
|
||||||
|
key := s.buildKey(remotePath)
|
||||||
|
|
||||||
|
result, err := s.client.HeadObject(ctx, &s3.HeadObjectInput{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to get object metadata: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if result.ContentLength == nil {
|
||||||
|
return 0, fmt.Errorf("content length not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
return *result.ContentLength, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketExists checks if the bucket exists and is accessible
|
||||||
|
func (s *S3Backend) BucketExists(ctx context.Context) (bool, error) {
|
||||||
|
_, err := s.client.HeadBucket(ctx, &s3.HeadBucketInput{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "NotFound") || strings.Contains(err.Error(), "404") {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, fmt.Errorf("failed to check bucket: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBucket creates the bucket if it doesn't exist
|
||||||
|
func (s *S3Backend) CreateBucket(ctx context.Context) error {
|
||||||
|
exists, err := s.BucketExists(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if exists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = s.client.CreateBucket(ctx, &s3.CreateBucketInput{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create bucket: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user