diff --git a/cmd/backup.go b/cmd/backup.go index 8d1bcbd..0525498 100755 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -3,6 +3,7 @@ package cmd import ( "fmt" + "dbbackup/internal/cloud" "github.com/spf13/cobra" ) @@ -92,6 +93,7 @@ func init() { // Cloud storage flags for all backup commands for _, cmd := range []*cobra.Command{clusterCmd, singleCmd, sampleCmd} { + cmd.Flags().String("cloud", "", "Cloud storage URI (e.g., s3://bucket/path) - takes precedence over individual flags") cmd.Flags().Bool("cloud-auto-upload", false, "Automatically upload backup to cloud after completion") cmd.Flags().String("cloud-provider", "", "Cloud provider (s3, minio, b2)") cmd.Flags().String("cloud-bucket", "", "Cloud bucket name") @@ -109,32 +111,39 @@ func init() { } } - // Update cloud config from flags - if c.Flags().Changed("cloud-auto-upload") { - if autoUpload, _ := c.Flags().GetBool("cloud-auto-upload"); autoUpload { - cfg.CloudEnabled = true - cfg.CloudAutoUpload = true + // Check if --cloud URI flag is provided (takes precedence) + if c.Flags().Changed("cloud") { + if err := parseCloudURIFlag(c); err != nil { + return err + } + } else { + // Update cloud config from individual flags + if c.Flags().Changed("cloud-auto-upload") { + if autoUpload, _ := c.Flags().GetBool("cloud-auto-upload"); autoUpload { + cfg.CloudEnabled = true + cfg.CloudAutoUpload = true + } + } + + if c.Flags().Changed("cloud-provider") { + cfg.CloudProvider, _ = c.Flags().GetString("cloud-provider") + } + + if c.Flags().Changed("cloud-bucket") { + cfg.CloudBucket, _ = c.Flags().GetString("cloud-bucket") + } + + if c.Flags().Changed("cloud-region") { + cfg.CloudRegion, _ = c.Flags().GetString("cloud-region") + } + + if c.Flags().Changed("cloud-endpoint") { + cfg.CloudEndpoint, _ = c.Flags().GetString("cloud-endpoint") + } + + if c.Flags().Changed("cloud-prefix") { + cfg.CloudPrefix, _ = c.Flags().GetString("cloud-prefix") } - } - - if c.Flags().Changed("cloud-provider") { - cfg.CloudProvider, _ = c.Flags().GetString("cloud-provider") - } - - if c.Flags().Changed("cloud-bucket") { - cfg.CloudBucket, _ = c.Flags().GetString("cloud-bucket") - } - - if c.Flags().Changed("cloud-region") { - cfg.CloudRegion, _ = c.Flags().GetString("cloud-region") - } - - if c.Flags().Changed("cloud-endpoint") { - cfg.CloudEndpoint, _ = c.Flags().GetString("cloud-endpoint") - } - - if c.Flags().Changed("cloud-prefix") { - cfg.CloudPrefix, _ = c.Flags().GetString("cloud-prefix") } return nil @@ -177,4 +186,40 @@ func init() { // Mark the strategy flags as mutually exclusive sampleCmd.MarkFlagsMutuallyExclusive("sample-ratio", "sample-percent", "sample-count") +} + +// parseCloudURIFlag parses the --cloud URI flag and updates config +func parseCloudURIFlag(cmd *cobra.Command) error { + cloudURI, _ := cmd.Flags().GetString("cloud") + if cloudURI == "" { + return nil + } + + // Parse cloud URI + uri, err := cloud.ParseCloudURI(cloudURI) + if err != nil { + return fmt.Errorf("invalid cloud URI: %w", err) + } + + // Enable cloud and auto-upload + cfg.CloudEnabled = true + cfg.CloudAutoUpload = true + + // Update config from URI + cfg.CloudProvider = uri.Provider + cfg.CloudBucket = uri.Bucket + + if uri.Region != "" { + cfg.CloudRegion = uri.Region + } + + if uri.Endpoint != "" { + cfg.CloudEndpoint = uri.Endpoint + } + + if uri.Path != "" { + cfg.CloudPrefix = uri.Dir() + } + + return nil } \ No newline at end of file diff --git a/cmd/cleanup.go b/cmd/cleanup.go index 1d2dc35..55c9a55 100644 --- a/cmd/cleanup.go +++ b/cmd/cleanup.go @@ -1,11 +1,14 @@ package cmd import ( + "context" "fmt" "os" "path/filepath" "strings" + "time" + "dbbackup/internal/cloud" "dbbackup/internal/metadata" "dbbackup/internal/retention" "github.com/spf13/cobra" @@ -53,7 +56,15 @@ func init() { } func runCleanup(cmd *cobra.Command, args []string) error { - backupDir := args[0] + backupPath := args[0] + + // Check if this is a cloud URI + if isCloudURIPath(backupPath) { + return runCloudCleanup(cmd.Context(), backupPath) + } + + // Local cleanup + backupDir := backupPath // Validate directory exists if !dirExists(backupDir) { @@ -150,3 +161,174 @@ func dirExists(path string) bool { } return info.IsDir() } + +// isCloudURIPath checks if a path is a cloud URI +func isCloudURIPath(s string) bool { + return cloud.IsCloudURI(s) +} + +// runCloudCleanup applies retention policy to cloud storage +func runCloudCleanup(ctx context.Context, uri string) error { + // Parse cloud URI + cloudURI, err := cloud.ParseCloudURI(uri) + if err != nil { + return fmt.Errorf("invalid cloud URI: %w", err) + } + + fmt.Printf("ā˜ļø Cloud Cleanup Policy:\n") + fmt.Printf(" URI: %s\n", uri) + fmt.Printf(" Provider: %s\n", cloudURI.Provider) + fmt.Printf(" Bucket: %s\n", cloudURI.Bucket) + if cloudURI.Path != "" { + fmt.Printf(" Prefix: %s\n", cloudURI.Path) + } + fmt.Printf(" Retention: %d days\n", retentionDays) + fmt.Printf(" Min backups: %d\n", minBackups) + if dryRun { + fmt.Printf(" Mode: DRY RUN (no files will be deleted)\n") + } + fmt.Println() + + // Create cloud backend + cfg := cloudURI.ToConfig() + backend, err := cloud.NewBackend(cfg) + if err != nil { + return fmt.Errorf("failed to create cloud backend: %w", err) + } + + // List all backups + backups, err := backend.List(ctx, cloudURI.Path) + if err != nil { + return fmt.Errorf("failed to list cloud backups: %w", err) + } + + if len(backups) == 0 { + fmt.Println("No backups found in cloud storage") + return nil + } + + fmt.Printf("Found %d backup(s) in cloud storage\n\n", len(backups)) + + // Filter backups based on pattern if specified + var filteredBackups []cloud.BackupInfo + if cleanupPattern != "" { + for _, backup := range backups { + matched, _ := filepath.Match(cleanupPattern, backup.Name) + if matched { + filteredBackups = append(filteredBackups, backup) + } + } + fmt.Printf("Pattern matched %d backup(s)\n\n", len(filteredBackups)) + } else { + filteredBackups = backups + } + + // Sort by modification time (oldest first) + // Already sorted by backend.List + + // Calculate retention date + cutoffDate := time.Now().AddDate(0, 0, -retentionDays) + + // Determine which backups to delete + var toDelete []cloud.BackupInfo + var toKeep []cloud.BackupInfo + + for _, backup := range filteredBackups { + if backup.LastModified.Before(cutoffDate) { + toDelete = append(toDelete, backup) + } else { + toKeep = append(toKeep, backup) + } + } + + // Ensure we keep minimum backups + totalBackups := len(filteredBackups) + if totalBackups-len(toDelete) < minBackups { + // Need to keep more backups + keepCount := minBackups - len(toKeep) + if keepCount > len(toDelete) { + keepCount = len(toDelete) + } + + // Move oldest from toDelete to toKeep + for i := len(toDelete) - 1; i >= len(toDelete)-keepCount && i >= 0; i-- { + toKeep = append(toKeep, toDelete[i]) + toDelete = toDelete[:i] + } + } + + // Display results + fmt.Printf("šŸ“Š Results:\n") + fmt.Printf(" Total backups: %d\n", totalBackups) + fmt.Printf(" Eligible for deletion: %d\n", len(toDelete)) + fmt.Printf(" Will keep: %d\n", len(toKeep)) + fmt.Println() + + if len(toDelete) > 0 { + if dryRun { + fmt.Printf("šŸ” Would delete %d backup(s):\n", len(toDelete)) + } else { + fmt.Printf("šŸ—‘ļø Deleting %d backup(s):\n", len(toDelete)) + } + + var totalSize int64 + var deletedCount int + + for _, backup := range toDelete { + fmt.Printf(" - %s (%s, %s old)\n", + backup.Name, + cloud.FormatSize(backup.Size), + formatBackupAge(backup.LastModified)) + + totalSize += backup.Size + + if !dryRun { + if err := backend.Delete(ctx, backup.Key); err != nil { + fmt.Printf(" āŒ Error: %v\n", err) + } else { + deletedCount++ + // Also try to delete metadata + backend.Delete(ctx, backup.Key+".meta.json") + } + } + } + + fmt.Printf("\nšŸ’¾ Space %s: %s\n", + map[bool]string{true: "would be freed", false: "freed"}[dryRun], + cloud.FormatSize(totalSize)) + + if !dryRun && deletedCount > 0 { + fmt.Printf("āœ… Successfully deleted %d backup(s)\n", deletedCount) + } + } else { + fmt.Println("No backups eligible for deletion") + } + + return nil +} + +// formatBackupAge returns a human-readable age string from a time.Time +func formatBackupAge(t time.Time) string { + d := time.Since(t) + days := int(d.Hours() / 24) + + if days == 0 { + return "today" + } else if days == 1 { + return "1 day" + } else if days < 30 { + return fmt.Sprintf("%d days", days) + } else if days < 365 { + months := days / 30 + if months == 1 { + return "1 month" + } + return fmt.Sprintf("%d months", months) + } else { + years := days / 365 + if years == 1 { + return "1 year" + } + return fmt.Sprintf("%d years", years) + } +} diff --git a/cmd/restore.go b/cmd/restore.go index 66f1f1e..2cdb7da 100755 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -10,6 +10,7 @@ import ( "syscall" "time" + "dbbackup/internal/cloud" "dbbackup/internal/database" "dbbackup/internal/restore" "dbbackup/internal/security" @@ -168,19 +169,49 @@ func init() { // runRestoreSingle restores a single database func runRestoreSingle(cmd *cobra.Command, args []string) error { archivePath := args[0] - - // Convert to absolute path - if !filepath.IsAbs(archivePath) { - absPath, err := filepath.Abs(archivePath) + + // Check if this is a cloud URI + var cleanupFunc func() error + + if cloud.IsCloudURI(archivePath) { + log.Info("Detected cloud URI, downloading backup...", "uri", archivePath) + + // Download from cloud + result, err := restore.DownloadFromCloudURI(cmd.Context(), archivePath, restore.DownloadOptions{ + VerifyChecksum: true, + KeepLocal: false, // Delete after restore + }) if err != nil { - return fmt.Errorf("invalid archive path: %w", err) + return fmt.Errorf("failed to download from cloud: %w", err) + } + + archivePath = result.LocalPath + cleanupFunc = result.Cleanup + + // Ensure cleanup happens on exit + defer func() { + if cleanupFunc != nil { + if err := cleanupFunc(); err != nil { + log.Warn("Failed to cleanup temp files", "error", err) + } + } + }() + + log.Info("Download completed", "local_path", archivePath) + } else { + // Convert to absolute path for local files + if !filepath.IsAbs(archivePath) { + absPath, err := filepath.Abs(archivePath) + if err != nil { + return fmt.Errorf("invalid archive path: %w", err) + } + archivePath = absPath } - archivePath = absPath - } - // Check if file exists - if _, err := os.Stat(archivePath); err != nil { - return fmt.Errorf("archive not found: %s", archivePath) + // Check if file exists + if _, err := os.Stat(archivePath); err != nil { + return fmt.Errorf("archive not found: %s", archivePath) + } } // Detect format diff --git a/cmd/verify.go b/cmd/verify.go index b6743ae..eef9e50 100644 --- a/cmd/verify.go +++ b/cmd/verify.go @@ -1,13 +1,16 @@ package cmd import ( + "context" "fmt" "os" "path/filepath" "strings" "time" + "dbbackup/internal/cloud" "dbbackup/internal/metadata" + "dbbackup/internal/restore" "dbbackup/internal/verification" "github.com/spf13/cobra" ) @@ -46,7 +49,21 @@ func init() { } func runVerifyBackup(cmd *cobra.Command, args []string) error { - // Expand glob patterns + // Check if any argument is a cloud URI + hasCloudURI := false + for _, arg := range args { + if isCloudURI(arg) { + hasCloudURI = true + break + } + } + + // If cloud URIs detected, handle separately + if hasCloudURI { + return runVerifyCloudBackup(cmd, args) + } + + // Expand glob patterns for local files var backupFiles []string for _, pattern := range args { matches, err := filepath.Glob(pattern) @@ -139,3 +156,80 @@ func runVerifyBackup(cmd *cobra.Command, args []string) error { return nil } + +// isCloudURI checks if a string is a cloud URI +func isCloudURI(s string) bool { + return cloud.IsCloudURI(s) +} + +// verifyCloudBackup downloads and verifies a backup from cloud storage +func verifyCloudBackup(ctx context.Context, uri string, quick, verbose bool) (*restore.DownloadResult, error) { + // Download from cloud with checksum verification + result, err := restore.DownloadFromCloudURI(ctx, uri, restore.DownloadOptions{ + VerifyChecksum: !quick, // Skip checksum if quick mode + KeepLocal: false, + }) + if err != nil { + return nil, err + } + + // If not quick mode, also run full verification + if !quick { + _, err := verification.Verify(result.LocalPath) + if err != nil { + result.Cleanup() + return nil, err + } + } + + return result, nil +} + +// runVerifyCloudBackup verifies backups from cloud storage +func runVerifyCloudBackup(cmd *cobra.Command, args []string) error { + fmt.Printf("Verifying cloud backup(s)...\n\n") + + successCount := 0 + failureCount := 0 + + for _, uri := range args { + if !isCloudURI(uri) { + fmt.Printf("āš ļø Skipping non-cloud URI: %s\n", uri) + continue + } + + fmt.Printf("ā˜ļø %s\n", uri) + + // Download and verify + result, err := verifyCloudBackup(cmd.Context(), uri, quickVerify, verboseVerify) + if err != nil { + fmt.Printf(" āŒ FAILED: %v\n\n", err) + failureCount++ + continue + } + + // Cleanup temp file + defer result.Cleanup() + + fmt.Printf(" āœ… VALID\n") + if verboseVerify && result.MetadataPath != "" { + meta, _ := metadata.Load(result.MetadataPath) + if meta != nil { + fmt.Printf(" Size: %s\n", metadata.FormatSize(meta.SizeBytes)) + fmt.Printf(" SHA-256: %s\n", meta.SHA256) + fmt.Printf(" Database: %s (%s)\n", meta.Database, meta.DatabaseType) + fmt.Printf(" Created: %s\n", meta.Timestamp.Format(time.RFC3339)) + } + } + fmt.Println() + successCount++ + } + + fmt.Printf("\nāœ… Summary: %d valid, %d failed\n", successCount, failureCount) + + if failureCount > 0 { + os.Exit(1) + } + + return nil +} diff --git a/internal/cloud/uri.go b/internal/cloud/uri.go new file mode 100644 index 0000000..0c1c043 --- /dev/null +++ b/internal/cloud/uri.go @@ -0,0 +1,198 @@ +package cloud + +import ( + "fmt" + "net/url" + "path" + "strings" +) + +// CloudURI represents a parsed cloud storage URI +type CloudURI struct { + Provider string // "s3", "minio", "azure", "gcs", "b2" + Bucket string // Bucket or container name + Path string // Path within bucket (without leading /) + Region string // Region (optional, extracted from host) + Endpoint string // Custom endpoint (for MinIO, etc) + FullURI string // Original URI string +} + +// ParseCloudURI parses a cloud storage URI like s3://bucket/path/file.dump +// Supported formats: +// - s3://bucket/path/file.dump +// - s3://bucket.s3.region.amazonaws.com/path/file.dump +// - minio://bucket/path/file.dump +// - azure://container/path/file.dump +// - gs://bucket/path/file.dump (Google Cloud Storage) +// - b2://bucket/path/file.dump (Backblaze B2) +func ParseCloudURI(uri string) (*CloudURI, error) { + if uri == "" { + return nil, fmt.Errorf("URI cannot be empty") + } + + // Parse URL + parsed, err := url.Parse(uri) + if err != nil { + return nil, fmt.Errorf("invalid URI: %w", err) + } + + // Extract provider from scheme + provider := strings.ToLower(parsed.Scheme) + if provider == "" { + return nil, fmt.Errorf("URI must have a scheme (e.g., s3://)") + } + + // Validate provider + validProviders := map[string]bool{ + "s3": true, + "minio": true, + "azure": true, + "gs": true, + "gcs": true, + "b2": true, + } + if !validProviders[provider] { + return nil, fmt.Errorf("unsupported provider: %s (supported: s3, minio, azure, gs, gcs, b2)", provider) + } + + // Normalize provider names + if provider == "gcs" { + provider = "gs" + } + + // Extract bucket and path + bucket := parsed.Host + if bucket == "" { + return nil, fmt.Errorf("URI must specify a bucket (e.g., s3://bucket/path)") + } + + // Extract region from AWS S3 hostname if present + // Format: bucket.s3.region.amazonaws.com or bucket.s3-region.amazonaws.com + var region string + var endpoint string + + if strings.Contains(bucket, ".amazonaws.com") { + parts := strings.Split(bucket, ".") + if len(parts) >= 3 { + // Extract bucket name (first part) + bucket = parts[0] + + // Extract region if present + // bucket.s3.us-west-2.amazonaws.com -> us-west-2 + // bucket.s3-us-west-2.amazonaws.com -> us-west-2 + for i, part := range parts { + if part == "s3" && i+1 < len(parts) && parts[i+1] != "amazonaws" { + region = parts[i+1] + break + } + if strings.HasPrefix(part, "s3-") { + region = strings.TrimPrefix(part, "s3-") + break + } + } + } + } + + // For MinIO and custom endpoints, preserve the host as endpoint + if provider == "minio" || (provider == "s3" && !strings.Contains(bucket, "amazonaws.com")) { + // If it looks like a custom endpoint (has dots), preserve it + if strings.Contains(bucket, ".") && !strings.Contains(bucket, "amazonaws.com") { + endpoint = bucket + // Try to extract bucket from path + trimmedPath := strings.TrimPrefix(parsed.Path, "/") + pathParts := strings.SplitN(trimmedPath, "/", 2) + if len(pathParts) > 0 && pathParts[0] != "" { + bucket = pathParts[0] + if len(pathParts) > 1 { + parsed.Path = "/" + pathParts[1] + } else { + parsed.Path = "/" + } + } + } + } + + // Clean up path (remove leading slash) + filepath := strings.TrimPrefix(parsed.Path, "/") + + return &CloudURI{ + Provider: provider, + Bucket: bucket, + Path: filepath, + Region: region, + Endpoint: endpoint, + FullURI: uri, + }, nil +} + +// IsCloudURI checks if a string looks like a cloud storage URI +func IsCloudURI(s string) bool { + s = strings.ToLower(s) + return strings.HasPrefix(s, "s3://") || + strings.HasPrefix(s, "minio://") || + strings.HasPrefix(s, "azure://") || + strings.HasPrefix(s, "gs://") || + strings.HasPrefix(s, "gcs://") || + strings.HasPrefix(s, "b2://") +} + +// String returns the string representation of the URI +func (u *CloudURI) String() string { + return u.FullURI +} + +// BaseName returns the filename without path +func (u *CloudURI) BaseName() string { + return path.Base(u.Path) +} + +// Dir returns the directory path without filename +func (u *CloudURI) Dir() string { + return path.Dir(u.Path) +} + +// Join appends path elements to the URI path +func (u *CloudURI) Join(elem ...string) string { + newPath := u.Path + for _, e := range elem { + newPath = path.Join(newPath, e) + } + return fmt.Sprintf("%s://%s/%s", u.Provider, u.Bucket, newPath) +} + +// ToConfig converts a CloudURI to a cloud.Config +func (u *CloudURI) ToConfig() *Config { + cfg := &Config{ + Provider: u.Provider, + Bucket: u.Bucket, + Prefix: u.Dir(), // Use directory part as prefix + } + + // Set region if available + if u.Region != "" { + cfg.Region = u.Region + } + + // Set endpoint if available (for MinIO, etc) + if u.Endpoint != "" { + cfg.Endpoint = u.Endpoint + } + + // Provider-specific settings + switch u.Provider { + case "minio": + cfg.PathStyle = true + case "b2": + cfg.PathStyle = true + } + + return cfg +} + +// BuildRemotePath constructs the full remote path for a file +func (u *CloudURI) BuildRemotePath(filename string) string { + if u.Path == "" || u.Path == "." { + return filename + } + return path.Join(u.Path, filename) +} diff --git a/internal/restore/cloud_download.go b/internal/restore/cloud_download.go new file mode 100644 index 0000000..6a97388 --- /dev/null +++ b/internal/restore/cloud_download.go @@ -0,0 +1,211 @@ +package restore + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "os" + "path/filepath" + + "dbbackup/internal/cloud" + "dbbackup/internal/logger" + "dbbackup/internal/metadata" +) + +// CloudDownloader handles downloading backups from cloud storage +type CloudDownloader struct { + backend cloud.Backend + log logger.Logger +} + +// NewCloudDownloader creates a new cloud downloader +func NewCloudDownloader(backend cloud.Backend, log logger.Logger) *CloudDownloader { + return &CloudDownloader{ + backend: backend, + log: log, + } +} + +// DownloadOptions contains options for downloading from cloud +type DownloadOptions struct { + VerifyChecksum bool // Verify SHA-256 checksum after download + KeepLocal bool // Keep downloaded file (don't delete temp) + TempDir string // Temp directory (default: os.TempDir()) +} + +// DownloadResult contains information about a downloaded backup +type DownloadResult struct { + LocalPath string // Path to downloaded file + RemotePath string // Original remote path + Size int64 // File size in bytes + SHA256 string // SHA-256 checksum (if verified) + MetadataPath string // Path to downloaded metadata (if exists) + IsTempFile bool // Whether the file is in a temp directory +} + +// Download downloads a backup from cloud storage +func (d *CloudDownloader) Download(ctx context.Context, remotePath string, opts DownloadOptions) (*DownloadResult, error) { + // Determine temp directory + tempDir := opts.TempDir + if tempDir == "" { + tempDir = os.TempDir() + } + + // Create unique temp subdirectory + tempSubDir := filepath.Join(tempDir, fmt.Sprintf("dbbackup-download-%d", os.Getpid())) + if err := os.MkdirAll(tempSubDir, 0755); err != nil { + return nil, fmt.Errorf("failed to create temp directory: %w", err) + } + + // Extract filename from remote path + filename := filepath.Base(remotePath) + localPath := filepath.Join(tempSubDir, filename) + + d.log.Info("Downloading backup from cloud", "remote", remotePath, "local", localPath) + + // Get file size for progress tracking + size, err := d.backend.GetSize(ctx, remotePath) + if err != nil { + d.log.Warn("Could not get remote file size", "error", err) + size = 0 // Continue anyway + } + + // Progress callback + var lastPercent int + progressCallback := func(transferred, total int64) { + if total > 0 { + percent := int(float64(transferred) / float64(total) * 100) + if percent != lastPercent && percent%10 == 0 { + d.log.Info("Download progress", "percent", percent, "transferred", cloud.FormatSize(transferred), "total", cloud.FormatSize(total)) + lastPercent = percent + } + } + } + + // Download file + if err := d.backend.Download(ctx, remotePath, localPath, progressCallback); err != nil { + // Cleanup on failure + os.RemoveAll(tempSubDir) + return nil, fmt.Errorf("download failed: %w", err) + } + + result := &DownloadResult{ + LocalPath: localPath, + RemotePath: remotePath, + Size: size, + IsTempFile: !opts.KeepLocal, + } + + // Try to download metadata file + metaRemotePath := remotePath + ".meta.json" + exists, err := d.backend.Exists(ctx, metaRemotePath) + if err == nil && exists { + metaLocalPath := localPath + ".meta.json" + if err := d.backend.Download(ctx, metaRemotePath, metaLocalPath, nil); err != nil { + d.log.Warn("Failed to download metadata", "error", err) + } else { + result.MetadataPath = metaLocalPath + d.log.Debug("Downloaded metadata", "path", metaLocalPath) + } + } + + // Verify checksum if requested + if opts.VerifyChecksum { + d.log.Info("Verifying checksum...") + checksum, err := calculateSHA256(localPath) + if err != nil { + // Cleanup on verification failure + os.RemoveAll(tempSubDir) + return nil, fmt.Errorf("checksum calculation failed: %w", err) + } + result.SHA256 = checksum + + // Check against metadata if available + if result.MetadataPath != "" { + meta, err := metadata.Load(result.MetadataPath) + if err != nil { + d.log.Warn("Failed to load metadata for verification", "error", err) + } else if meta.SHA256 != "" && meta.SHA256 != checksum { + // Cleanup on verification failure + os.RemoveAll(tempSubDir) + return nil, fmt.Errorf("checksum mismatch: expected %s, got %s", meta.SHA256, checksum) + } else if meta.SHA256 == checksum { + d.log.Info("Checksum verified successfully", "sha256", checksum) + } + } + } + + d.log.Info("Download completed", "path", localPath, "size", cloud.FormatSize(result.Size)) + + return result, nil +} + +// DownloadFromURI downloads a backup using a cloud URI +func (d *CloudDownloader) DownloadFromURI(ctx context.Context, uri string, opts DownloadOptions) (*DownloadResult, error) { + // Parse URI + cloudURI, err := cloud.ParseCloudURI(uri) + if err != nil { + return nil, fmt.Errorf("invalid cloud URI: %w", err) + } + + // Download using the path from URI + return d.Download(ctx, cloudURI.Path, opts) +} + +// Cleanup removes downloaded temp files +func (r *DownloadResult) Cleanup() error { + if !r.IsTempFile { + return nil // Don't delete non-temp files + } + + // Remove the entire temp directory + tempDir := filepath.Dir(r.LocalPath) + if err := os.RemoveAll(tempDir); err != nil { + return fmt.Errorf("failed to cleanup temp files: %w", err) + } + + return nil +} + +// calculateSHA256 calculates the SHA-256 checksum of a file +func calculateSHA256(filePath string) (string, error) { + file, err := os.Open(filePath) + if err != nil { + return "", err + } + defer file.Close() + + hash := sha256.New() + if _, err := io.Copy(hash, file); err != nil { + return "", err + } + + return hex.EncodeToString(hash.Sum(nil)), nil +} + +// DownloadFromCloudURI is a convenience function to download from a cloud URI +func DownloadFromCloudURI(ctx context.Context, uri string, opts DownloadOptions) (*DownloadResult, error) { + // Parse URI + cloudURI, err := cloud.ParseCloudURI(uri) + if err != nil { + return nil, fmt.Errorf("invalid cloud URI: %w", err) + } + + // Create config from URI + cfg := cloudURI.ToConfig() + + // Create backend + backend, err := cloud.NewBackend(cfg) + if err != nil { + return nil, fmt.Errorf("failed to create cloud backend: %w", err) + } + + // Create downloader + log := logger.New("info", "text") + downloader := NewCloudDownloader(backend, log) + + // Download + return downloader.Download(ctx, cloudURI.Path, opts) +}