feat: v2.0 Sprint 2 - Auto-Upload to Cloud (Part 2)

- Add cloud configuration to Config struct
- Integrate automatic upload into backup flow
- Add --cloud-auto-upload flag to all backup commands
- Support environment variables for cloud credentials
- Upload both backup file and metadata to cloud
- Non-blocking: backup succeeds even if cloud upload fails

Usage:
  dbbackup backup single mydb --cloud-auto-upload \
    --cloud-bucket my-backups \
    --cloud-provider s3

Or via environment:
  export CLOUD_ENABLED=true
  export CLOUD_AUTO_UPLOAD=true
  export CLOUD_BUCKET=my-backups
  export AWS_ACCESS_KEY_ID=...
  export AWS_SECRET_ACCESS_KEY=...
  dbbackup backup single mydb

Credentials from AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
This commit is contained in:
2025-11-25 19:44:52 +00:00
parent ae3ed1fea1
commit 20b7f1ec04
3 changed files with 150 additions and 0 deletions

View File

@@ -17,6 +17,7 @@ import (
"time"
"dbbackup/internal/checks"
"dbbackup/internal/cloud"
"dbbackup/internal/config"
"dbbackup/internal/database"
"dbbackup/internal/security"
@@ -234,6 +235,14 @@ func (e *Engine) BackupSingle(ctx context.Context, databaseName string) error {
metrics.GlobalMetrics.RecordOperation("backup_single", databaseName, time.Now().Add(-time.Minute), info.Size(), true, 0)
}
// Cloud upload if enabled
if e.cfg.CloudEnabled && e.cfg.CloudAutoUpload {
if err := e.uploadToCloud(ctx, outputFile, tracker); err != nil {
e.log.Warn("Cloud upload failed", "error", err)
// Don't fail the backup if cloud upload fails
}
}
// Complete operation
tracker.UpdateProgress(100, "Backup operation completed successfully")
tracker.Complete(fmt.Sprintf("Single database backup completed: %s", filepath.Base(outputFile)))
@@ -1080,6 +1089,74 @@ func (e *Engine) createClusterMetadata(backupFile string, databases []string, su
return nil
}
// uploadToCloud uploads a backup file to cloud storage
func (e *Engine) uploadToCloud(ctx context.Context, backupFile string, tracker *progress.OperationTracker) error {
uploadStep := tracker.AddStep("cloud_upload", "Uploading to cloud storage")
// Create cloud backend
cloudCfg := &cloud.Config{
Provider: e.cfg.CloudProvider,
Bucket: e.cfg.CloudBucket,
Region: e.cfg.CloudRegion,
Endpoint: e.cfg.CloudEndpoint,
AccessKey: e.cfg.CloudAccessKey,
SecretKey: e.cfg.CloudSecretKey,
Prefix: e.cfg.CloudPrefix,
UseSSL: true,
PathStyle: e.cfg.CloudProvider == "minio",
Timeout: 300,
MaxRetries: 3,
}
backend, err := cloud.NewBackend(cloudCfg)
if err != nil {
uploadStep.Fail(fmt.Errorf("failed to create cloud backend: %w", err))
return err
}
// Get file info
info, err := os.Stat(backupFile)
if err != nil {
uploadStep.Fail(fmt.Errorf("failed to stat backup file: %w", err))
return err
}
filename := filepath.Base(backupFile)
e.log.Info("Uploading backup to cloud", "file", filename, "size", cloud.FormatSize(info.Size()))
// Progress callback
var lastPercent int
progressCallback := func(transferred, total int64) {
percent := int(float64(transferred) / float64(total) * 100)
if percent != lastPercent && percent%10 == 0 {
e.log.Debug("Upload progress", "percent", percent, "transferred", cloud.FormatSize(transferred), "total", cloud.FormatSize(total))
lastPercent = percent
}
}
// Upload to cloud
err = backend.Upload(ctx, backupFile, filename, progressCallback)
if err != nil {
uploadStep.Fail(fmt.Errorf("cloud upload failed: %w", err))
return err
}
// Also upload metadata file
metaFile := backupFile + ".meta.json"
if _, err := os.Stat(metaFile); err == nil {
metaFilename := filepath.Base(metaFile)
if err := backend.Upload(ctx, metaFile, metaFilename, nil); err != nil {
e.log.Warn("Failed to upload metadata file", "error", err)
// Don't fail if metadata upload fails
}
}
uploadStep.Complete(fmt.Sprintf("Uploaded to %s/%s/%s", backend.Name(), e.cfg.CloudBucket, filename))
e.log.Info("Backup uploaded to cloud", "provider", backend.Name(), "bucket", e.cfg.CloudBucket, "file", filename)
return nil
}
// executeCommand executes a backup command (optimized for huge databases)
func (e *Engine) executeCommand(ctx context.Context, cmdArgs []string, outputFile string) error {
if len(cmdArgs) == 0 {

View File

@@ -85,6 +85,17 @@ type Config struct {
TUIDryRun bool // TUI dry-run mode (simulate without execution)
TUIVerbose bool // Verbose TUI logging
TUILogFile string // TUI event log file path
// Cloud storage options (v2.0)
CloudEnabled bool // Enable cloud storage integration
CloudProvider string // "s3", "minio", "b2"
CloudBucket string // Bucket name
CloudRegion string // Region (for S3)
CloudEndpoint string // Custom endpoint (for MinIO, B2)
CloudAccessKey string // Access key
CloudSecretKey string // Secret key
CloudPrefix string // Key prefix
CloudAutoUpload bool // Automatically upload after backup
}
// New creates a new configuration with default values
@@ -192,6 +203,17 @@ func New() *Config {
TUIDryRun: getEnvBool("TUI_DRY_RUN", false), // Execute by default
TUIVerbose: getEnvBool("TUI_VERBOSE", false), // Quiet by default
TUILogFile: getEnvString("TUI_LOG_FILE", ""), // No log file by default
// Cloud storage defaults (v2.0)
CloudEnabled: getEnvBool("CLOUD_ENABLED", false),
CloudProvider: getEnvString("CLOUD_PROVIDER", "s3"),
CloudBucket: getEnvString("CLOUD_BUCKET", ""),
CloudRegion: getEnvString("CLOUD_REGION", "us-east-1"),
CloudEndpoint: getEnvString("CLOUD_ENDPOINT", ""),
CloudAccessKey: getEnvString("CLOUD_ACCESS_KEY", getEnvString("AWS_ACCESS_KEY_ID", "")),
CloudSecretKey: getEnvString("CLOUD_SECRET_KEY", getEnvString("AWS_SECRET_ACCESS_KEY", "")),
CloudPrefix: getEnvString("CLOUD_PREFIX", ""),
CloudAutoUpload: getEnvBool("CLOUD_AUTO_UPLOAD", false),
}
// Ensure canonical defaults are enforced