feat: v2.0 Sprint 3 - Cloud URI Support & Command Integration (Part 1)
Sprint 3 Implementation - Cloud URI Support: New Features: ✅ Cloud URI parser (s3://bucket/path) ✅ Backup command with --cloud URI flag ✅ Restore from cloud URIs ✅ Verify cloud backups ✅ Cleanup cloud storage with retention policy New Files: - internal/cloud/uri.go - Cloud URI parser - internal/restore/ - Cloud download module - internal/restore/cloud_download.go - Download & verify helper Modified Commands: - cmd/backup.go - Added --cloud s3://bucket/path flag - cmd/restore.go - Auto-detect & download from cloud URIs - cmd/verify.go - Verify backups from cloud storage - cmd/cleanup.go - Apply retention policy to cloud storage URI Support: - s3://bucket/path/file.dump - AWS S3 - minio://bucket/path/file.dump - MinIO - b2://bucket/path/file.dump - Backblaze B2 - gs://bucket/path/file.dump - Google Cloud Storage Examples: # Backup with cloud URI dbbackup backup single mydb --cloud s3://my-bucket/backups/ # Restore from cloud dbbackup restore single s3://my-bucket/backups/mydb.dump --confirm # Verify cloud backup dbbackup verify-backup s3://my-bucket/backups/mydb.dump # Cleanup old cloud backups dbbackup cleanup s3://my-bucket/backups/ --retention-days 30 Features: - Automatic download to temp directory - SHA-256 verification after download - Automatic temp file cleanup - Progress tracking for downloads - Metadata synchronization - Retention policy for cloud storage Sprint 3 Part 1 COMPLETE!
This commit is contained in:
@@ -3,6 +3,7 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"dbbackup/internal/cloud"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -92,6 +93,7 @@ func init() {
|
||||
|
||||
// Cloud storage flags for all backup commands
|
||||
for _, cmd := range []*cobra.Command{clusterCmd, singleCmd, sampleCmd} {
|
||||
cmd.Flags().String("cloud", "", "Cloud storage URI (e.g., s3://bucket/path) - takes precedence over individual flags")
|
||||
cmd.Flags().Bool("cloud-auto-upload", false, "Automatically upload backup to cloud after completion")
|
||||
cmd.Flags().String("cloud-provider", "", "Cloud provider (s3, minio, b2)")
|
||||
cmd.Flags().String("cloud-bucket", "", "Cloud bucket name")
|
||||
@@ -109,7 +111,13 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
// Update cloud config from flags
|
||||
// Check if --cloud URI flag is provided (takes precedence)
|
||||
if c.Flags().Changed("cloud") {
|
||||
if err := parseCloudURIFlag(c); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Update cloud config from individual flags
|
||||
if c.Flags().Changed("cloud-auto-upload") {
|
||||
if autoUpload, _ := c.Flags().GetBool("cloud-auto-upload"); autoUpload {
|
||||
cfg.CloudEnabled = true
|
||||
@@ -136,6 +144,7 @@ func init() {
|
||||
if c.Flags().Changed("cloud-prefix") {
|
||||
cfg.CloudPrefix, _ = c.Flags().GetString("cloud-prefix")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -178,3 +187,39 @@ func init() {
|
||||
// Mark the strategy flags as mutually exclusive
|
||||
sampleCmd.MarkFlagsMutuallyExclusive("sample-ratio", "sample-percent", "sample-count")
|
||||
}
|
||||
|
||||
// parseCloudURIFlag parses the --cloud URI flag and updates config
|
||||
func parseCloudURIFlag(cmd *cobra.Command) error {
|
||||
cloudURI, _ := cmd.Flags().GetString("cloud")
|
||||
if cloudURI == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse cloud URI
|
||||
uri, err := cloud.ParseCloudURI(cloudURI)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid cloud URI: %w", err)
|
||||
}
|
||||
|
||||
// Enable cloud and auto-upload
|
||||
cfg.CloudEnabled = true
|
||||
cfg.CloudAutoUpload = true
|
||||
|
||||
// Update config from URI
|
||||
cfg.CloudProvider = uri.Provider
|
||||
cfg.CloudBucket = uri.Bucket
|
||||
|
||||
if uri.Region != "" {
|
||||
cfg.CloudRegion = uri.Region
|
||||
}
|
||||
|
||||
if uri.Endpoint != "" {
|
||||
cfg.CloudEndpoint = uri.Endpoint
|
||||
}
|
||||
|
||||
if uri.Path != "" {
|
||||
cfg.CloudPrefix = uri.Dir()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
184
cmd/cleanup.go
184
cmd/cleanup.go
@@ -1,11 +1,14 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"dbbackup/internal/cloud"
|
||||
"dbbackup/internal/metadata"
|
||||
"dbbackup/internal/retention"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -53,7 +56,15 @@ func init() {
|
||||
}
|
||||
|
||||
func runCleanup(cmd *cobra.Command, args []string) error {
|
||||
backupDir := args[0]
|
||||
backupPath := args[0]
|
||||
|
||||
// Check if this is a cloud URI
|
||||
if isCloudURIPath(backupPath) {
|
||||
return runCloudCleanup(cmd.Context(), backupPath)
|
||||
}
|
||||
|
||||
// Local cleanup
|
||||
backupDir := backupPath
|
||||
|
||||
// Validate directory exists
|
||||
if !dirExists(backupDir) {
|
||||
@@ -150,3 +161,174 @@ func dirExists(path string) bool {
|
||||
}
|
||||
return info.IsDir()
|
||||
}
|
||||
|
||||
// isCloudURIPath checks if a path is a cloud URI
|
||||
func isCloudURIPath(s string) bool {
|
||||
return cloud.IsCloudURI(s)
|
||||
}
|
||||
|
||||
// runCloudCleanup applies retention policy to cloud storage
|
||||
func runCloudCleanup(ctx context.Context, uri string) error {
|
||||
// Parse cloud URI
|
||||
cloudURI, err := cloud.ParseCloudURI(uri)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid cloud URI: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("☁️ Cloud Cleanup Policy:\n")
|
||||
fmt.Printf(" URI: %s\n", uri)
|
||||
fmt.Printf(" Provider: %s\n", cloudURI.Provider)
|
||||
fmt.Printf(" Bucket: %s\n", cloudURI.Bucket)
|
||||
if cloudURI.Path != "" {
|
||||
fmt.Printf(" Prefix: %s\n", cloudURI.Path)
|
||||
}
|
||||
fmt.Printf(" Retention: %d days\n", retentionDays)
|
||||
fmt.Printf(" Min backups: %d\n", minBackups)
|
||||
if dryRun {
|
||||
fmt.Printf(" Mode: DRY RUN (no files will be deleted)\n")
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Create cloud backend
|
||||
cfg := cloudURI.ToConfig()
|
||||
backend, err := cloud.NewBackend(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create cloud backend: %w", err)
|
||||
}
|
||||
|
||||
// List all backups
|
||||
backups, err := backend.List(ctx, cloudURI.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list cloud backups: %w", err)
|
||||
}
|
||||
|
||||
if len(backups) == 0 {
|
||||
fmt.Println("No backups found in cloud storage")
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("Found %d backup(s) in cloud storage\n\n", len(backups))
|
||||
|
||||
// Filter backups based on pattern if specified
|
||||
var filteredBackups []cloud.BackupInfo
|
||||
if cleanupPattern != "" {
|
||||
for _, backup := range backups {
|
||||
matched, _ := filepath.Match(cleanupPattern, backup.Name)
|
||||
if matched {
|
||||
filteredBackups = append(filteredBackups, backup)
|
||||
}
|
||||
}
|
||||
fmt.Printf("Pattern matched %d backup(s)\n\n", len(filteredBackups))
|
||||
} else {
|
||||
filteredBackups = backups
|
||||
}
|
||||
|
||||
// Sort by modification time (oldest first)
|
||||
// Already sorted by backend.List
|
||||
|
||||
// Calculate retention date
|
||||
cutoffDate := time.Now().AddDate(0, 0, -retentionDays)
|
||||
|
||||
// Determine which backups to delete
|
||||
var toDelete []cloud.BackupInfo
|
||||
var toKeep []cloud.BackupInfo
|
||||
|
||||
for _, backup := range filteredBackups {
|
||||
if backup.LastModified.Before(cutoffDate) {
|
||||
toDelete = append(toDelete, backup)
|
||||
} else {
|
||||
toKeep = append(toKeep, backup)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure we keep minimum backups
|
||||
totalBackups := len(filteredBackups)
|
||||
if totalBackups-len(toDelete) < minBackups {
|
||||
// Need to keep more backups
|
||||
keepCount := minBackups - len(toKeep)
|
||||
if keepCount > len(toDelete) {
|
||||
keepCount = len(toDelete)
|
||||
}
|
||||
|
||||
// Move oldest from toDelete to toKeep
|
||||
for i := len(toDelete) - 1; i >= len(toDelete)-keepCount && i >= 0; i-- {
|
||||
toKeep = append(toKeep, toDelete[i])
|
||||
toDelete = toDelete[:i]
|
||||
}
|
||||
}
|
||||
|
||||
// Display results
|
||||
fmt.Printf("📊 Results:\n")
|
||||
fmt.Printf(" Total backups: %d\n", totalBackups)
|
||||
fmt.Printf(" Eligible for deletion: %d\n", len(toDelete))
|
||||
fmt.Printf(" Will keep: %d\n", len(toKeep))
|
||||
fmt.Println()
|
||||
|
||||
if len(toDelete) > 0 {
|
||||
if dryRun {
|
||||
fmt.Printf("🔍 Would delete %d backup(s):\n", len(toDelete))
|
||||
} else {
|
||||
fmt.Printf("🗑️ Deleting %d backup(s):\n", len(toDelete))
|
||||
}
|
||||
|
||||
var totalSize int64
|
||||
var deletedCount int
|
||||
|
||||
for _, backup := range toDelete {
|
||||
fmt.Printf(" - %s (%s, %s old)\n",
|
||||
backup.Name,
|
||||
cloud.FormatSize(backup.Size),
|
||||
formatBackupAge(backup.LastModified))
|
||||
|
||||
totalSize += backup.Size
|
||||
|
||||
if !dryRun {
|
||||
if err := backend.Delete(ctx, backup.Key); err != nil {
|
||||
fmt.Printf(" ❌ Error: %v\n", err)
|
||||
} else {
|
||||
deletedCount++
|
||||
// Also try to delete metadata
|
||||
backend.Delete(ctx, backup.Key+".meta.json")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\n💾 Space %s: %s\n",
|
||||
map[bool]string{true: "would be freed", false: "freed"}[dryRun],
|
||||
cloud.FormatSize(totalSize))
|
||||
|
||||
if !dryRun && deletedCount > 0 {
|
||||
fmt.Printf("✅ Successfully deleted %d backup(s)\n", deletedCount)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("No backups eligible for deletion")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// formatBackupAge returns a human-readable age string from a time.Time
|
||||
func formatBackupAge(t time.Time) string {
|
||||
d := time.Since(t)
|
||||
days := int(d.Hours() / 24)
|
||||
|
||||
if days == 0 {
|
||||
return "today"
|
||||
} else if days == 1 {
|
||||
return "1 day"
|
||||
} else if days < 30 {
|
||||
return fmt.Sprintf("%d days", days)
|
||||
} else if days < 365 {
|
||||
months := days / 30
|
||||
if months == 1 {
|
||||
return "1 month"
|
||||
}
|
||||
return fmt.Sprintf("%d months", months)
|
||||
} else {
|
||||
years := days / 365
|
||||
if years == 1 {
|
||||
return "1 year"
|
||||
}
|
||||
return fmt.Sprintf("%d years", years)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"dbbackup/internal/cloud"
|
||||
"dbbackup/internal/database"
|
||||
"dbbackup/internal/restore"
|
||||
"dbbackup/internal/security"
|
||||
@@ -169,7 +170,36 @@ func init() {
|
||||
func runRestoreSingle(cmd *cobra.Command, args []string) error {
|
||||
archivePath := args[0]
|
||||
|
||||
// Convert to absolute path
|
||||
// Check if this is a cloud URI
|
||||
var cleanupFunc func() error
|
||||
|
||||
if cloud.IsCloudURI(archivePath) {
|
||||
log.Info("Detected cloud URI, downloading backup...", "uri", archivePath)
|
||||
|
||||
// Download from cloud
|
||||
result, err := restore.DownloadFromCloudURI(cmd.Context(), archivePath, restore.DownloadOptions{
|
||||
VerifyChecksum: true,
|
||||
KeepLocal: false, // Delete after restore
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to download from cloud: %w", err)
|
||||
}
|
||||
|
||||
archivePath = result.LocalPath
|
||||
cleanupFunc = result.Cleanup
|
||||
|
||||
// Ensure cleanup happens on exit
|
||||
defer func() {
|
||||
if cleanupFunc != nil {
|
||||
if err := cleanupFunc(); err != nil {
|
||||
log.Warn("Failed to cleanup temp files", "error", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("Download completed", "local_path", archivePath)
|
||||
} else {
|
||||
// Convert to absolute path for local files
|
||||
if !filepath.IsAbs(archivePath) {
|
||||
absPath, err := filepath.Abs(archivePath)
|
||||
if err != nil {
|
||||
@@ -182,6 +212,7 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
|
||||
if _, err := os.Stat(archivePath); err != nil {
|
||||
return fmt.Errorf("archive not found: %s", archivePath)
|
||||
}
|
||||
}
|
||||
|
||||
// Detect format
|
||||
format := restore.DetectArchiveFormat(archivePath)
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"dbbackup/internal/cloud"
|
||||
"dbbackup/internal/metadata"
|
||||
"dbbackup/internal/restore"
|
||||
"dbbackup/internal/verification"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -46,7 +49,21 @@ func init() {
|
||||
}
|
||||
|
||||
func runVerifyBackup(cmd *cobra.Command, args []string) error {
|
||||
// Expand glob patterns
|
||||
// Check if any argument is a cloud URI
|
||||
hasCloudURI := false
|
||||
for _, arg := range args {
|
||||
if isCloudURI(arg) {
|
||||
hasCloudURI = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If cloud URIs detected, handle separately
|
||||
if hasCloudURI {
|
||||
return runVerifyCloudBackup(cmd, args)
|
||||
}
|
||||
|
||||
// Expand glob patterns for local files
|
||||
var backupFiles []string
|
||||
for _, pattern := range args {
|
||||
matches, err := filepath.Glob(pattern)
|
||||
@@ -139,3 +156,80 @@ func runVerifyBackup(cmd *cobra.Command, args []string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isCloudURI checks if a string is a cloud URI
|
||||
func isCloudURI(s string) bool {
|
||||
return cloud.IsCloudURI(s)
|
||||
}
|
||||
|
||||
// verifyCloudBackup downloads and verifies a backup from cloud storage
|
||||
func verifyCloudBackup(ctx context.Context, uri string, quick, verbose bool) (*restore.DownloadResult, error) {
|
||||
// Download from cloud with checksum verification
|
||||
result, err := restore.DownloadFromCloudURI(ctx, uri, restore.DownloadOptions{
|
||||
VerifyChecksum: !quick, // Skip checksum if quick mode
|
||||
KeepLocal: false,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If not quick mode, also run full verification
|
||||
if !quick {
|
||||
_, err := verification.Verify(result.LocalPath)
|
||||
if err != nil {
|
||||
result.Cleanup()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// runVerifyCloudBackup verifies backups from cloud storage
|
||||
func runVerifyCloudBackup(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("Verifying cloud backup(s)...\n\n")
|
||||
|
||||
successCount := 0
|
||||
failureCount := 0
|
||||
|
||||
for _, uri := range args {
|
||||
if !isCloudURI(uri) {
|
||||
fmt.Printf("⚠️ Skipping non-cloud URI: %s\n", uri)
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf("☁️ %s\n", uri)
|
||||
|
||||
// Download and verify
|
||||
result, err := verifyCloudBackup(cmd.Context(), uri, quickVerify, verboseVerify)
|
||||
if err != nil {
|
||||
fmt.Printf(" ❌ FAILED: %v\n\n", err)
|
||||
failureCount++
|
||||
continue
|
||||
}
|
||||
|
||||
// Cleanup temp file
|
||||
defer result.Cleanup()
|
||||
|
||||
fmt.Printf(" ✅ VALID\n")
|
||||
if verboseVerify && result.MetadataPath != "" {
|
||||
meta, _ := metadata.Load(result.MetadataPath)
|
||||
if meta != nil {
|
||||
fmt.Printf(" Size: %s\n", metadata.FormatSize(meta.SizeBytes))
|
||||
fmt.Printf(" SHA-256: %s\n", meta.SHA256)
|
||||
fmt.Printf(" Database: %s (%s)\n", meta.Database, meta.DatabaseType)
|
||||
fmt.Printf(" Created: %s\n", meta.Timestamp.Format(time.RFC3339))
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
successCount++
|
||||
}
|
||||
|
||||
fmt.Printf("\n✅ Summary: %d valid, %d failed\n", successCount, failureCount)
|
||||
|
||||
if failureCount > 0 {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
198
internal/cloud/uri.go
Normal file
198
internal/cloud/uri.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CloudURI represents a parsed cloud storage URI
|
||||
type CloudURI struct {
|
||||
Provider string // "s3", "minio", "azure", "gcs", "b2"
|
||||
Bucket string // Bucket or container name
|
||||
Path string // Path within bucket (without leading /)
|
||||
Region string // Region (optional, extracted from host)
|
||||
Endpoint string // Custom endpoint (for MinIO, etc)
|
||||
FullURI string // Original URI string
|
||||
}
|
||||
|
||||
// ParseCloudURI parses a cloud storage URI like s3://bucket/path/file.dump
|
||||
// Supported formats:
|
||||
// - s3://bucket/path/file.dump
|
||||
// - s3://bucket.s3.region.amazonaws.com/path/file.dump
|
||||
// - minio://bucket/path/file.dump
|
||||
// - azure://container/path/file.dump
|
||||
// - gs://bucket/path/file.dump (Google Cloud Storage)
|
||||
// - b2://bucket/path/file.dump (Backblaze B2)
|
||||
func ParseCloudURI(uri string) (*CloudURI, error) {
|
||||
if uri == "" {
|
||||
return nil, fmt.Errorf("URI cannot be empty")
|
||||
}
|
||||
|
||||
// Parse URL
|
||||
parsed, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid URI: %w", err)
|
||||
}
|
||||
|
||||
// Extract provider from scheme
|
||||
provider := strings.ToLower(parsed.Scheme)
|
||||
if provider == "" {
|
||||
return nil, fmt.Errorf("URI must have a scheme (e.g., s3://)")
|
||||
}
|
||||
|
||||
// Validate provider
|
||||
validProviders := map[string]bool{
|
||||
"s3": true,
|
||||
"minio": true,
|
||||
"azure": true,
|
||||
"gs": true,
|
||||
"gcs": true,
|
||||
"b2": true,
|
||||
}
|
||||
if !validProviders[provider] {
|
||||
return nil, fmt.Errorf("unsupported provider: %s (supported: s3, minio, azure, gs, gcs, b2)", provider)
|
||||
}
|
||||
|
||||
// Normalize provider names
|
||||
if provider == "gcs" {
|
||||
provider = "gs"
|
||||
}
|
||||
|
||||
// Extract bucket and path
|
||||
bucket := parsed.Host
|
||||
if bucket == "" {
|
||||
return nil, fmt.Errorf("URI must specify a bucket (e.g., s3://bucket/path)")
|
||||
}
|
||||
|
||||
// Extract region from AWS S3 hostname if present
|
||||
// Format: bucket.s3.region.amazonaws.com or bucket.s3-region.amazonaws.com
|
||||
var region string
|
||||
var endpoint string
|
||||
|
||||
if strings.Contains(bucket, ".amazonaws.com") {
|
||||
parts := strings.Split(bucket, ".")
|
||||
if len(parts) >= 3 {
|
||||
// Extract bucket name (first part)
|
||||
bucket = parts[0]
|
||||
|
||||
// Extract region if present
|
||||
// bucket.s3.us-west-2.amazonaws.com -> us-west-2
|
||||
// bucket.s3-us-west-2.amazonaws.com -> us-west-2
|
||||
for i, part := range parts {
|
||||
if part == "s3" && i+1 < len(parts) && parts[i+1] != "amazonaws" {
|
||||
region = parts[i+1]
|
||||
break
|
||||
}
|
||||
if strings.HasPrefix(part, "s3-") {
|
||||
region = strings.TrimPrefix(part, "s3-")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For MinIO and custom endpoints, preserve the host as endpoint
|
||||
if provider == "minio" || (provider == "s3" && !strings.Contains(bucket, "amazonaws.com")) {
|
||||
// If it looks like a custom endpoint (has dots), preserve it
|
||||
if strings.Contains(bucket, ".") && !strings.Contains(bucket, "amazonaws.com") {
|
||||
endpoint = bucket
|
||||
// Try to extract bucket from path
|
||||
trimmedPath := strings.TrimPrefix(parsed.Path, "/")
|
||||
pathParts := strings.SplitN(trimmedPath, "/", 2)
|
||||
if len(pathParts) > 0 && pathParts[0] != "" {
|
||||
bucket = pathParts[0]
|
||||
if len(pathParts) > 1 {
|
||||
parsed.Path = "/" + pathParts[1]
|
||||
} else {
|
||||
parsed.Path = "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up path (remove leading slash)
|
||||
filepath := strings.TrimPrefix(parsed.Path, "/")
|
||||
|
||||
return &CloudURI{
|
||||
Provider: provider,
|
||||
Bucket: bucket,
|
||||
Path: filepath,
|
||||
Region: region,
|
||||
Endpoint: endpoint,
|
||||
FullURI: uri,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsCloudURI checks if a string looks like a cloud storage URI
|
||||
func IsCloudURI(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
return strings.HasPrefix(s, "s3://") ||
|
||||
strings.HasPrefix(s, "minio://") ||
|
||||
strings.HasPrefix(s, "azure://") ||
|
||||
strings.HasPrefix(s, "gs://") ||
|
||||
strings.HasPrefix(s, "gcs://") ||
|
||||
strings.HasPrefix(s, "b2://")
|
||||
}
|
||||
|
||||
// String returns the string representation of the URI
|
||||
func (u *CloudURI) String() string {
|
||||
return u.FullURI
|
||||
}
|
||||
|
||||
// BaseName returns the filename without path
|
||||
func (u *CloudURI) BaseName() string {
|
||||
return path.Base(u.Path)
|
||||
}
|
||||
|
||||
// Dir returns the directory path without filename
|
||||
func (u *CloudURI) Dir() string {
|
||||
return path.Dir(u.Path)
|
||||
}
|
||||
|
||||
// Join appends path elements to the URI path
|
||||
func (u *CloudURI) Join(elem ...string) string {
|
||||
newPath := u.Path
|
||||
for _, e := range elem {
|
||||
newPath = path.Join(newPath, e)
|
||||
}
|
||||
return fmt.Sprintf("%s://%s/%s", u.Provider, u.Bucket, newPath)
|
||||
}
|
||||
|
||||
// ToConfig converts a CloudURI to a cloud.Config
|
||||
func (u *CloudURI) ToConfig() *Config {
|
||||
cfg := &Config{
|
||||
Provider: u.Provider,
|
||||
Bucket: u.Bucket,
|
||||
Prefix: u.Dir(), // Use directory part as prefix
|
||||
}
|
||||
|
||||
// Set region if available
|
||||
if u.Region != "" {
|
||||
cfg.Region = u.Region
|
||||
}
|
||||
|
||||
// Set endpoint if available (for MinIO, etc)
|
||||
if u.Endpoint != "" {
|
||||
cfg.Endpoint = u.Endpoint
|
||||
}
|
||||
|
||||
// Provider-specific settings
|
||||
switch u.Provider {
|
||||
case "minio":
|
||||
cfg.PathStyle = true
|
||||
case "b2":
|
||||
cfg.PathStyle = true
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
// BuildRemotePath constructs the full remote path for a file
|
||||
func (u *CloudURI) BuildRemotePath(filename string) string {
|
||||
if u.Path == "" || u.Path == "." {
|
||||
return filename
|
||||
}
|
||||
return path.Join(u.Path, filename)
|
||||
}
|
||||
211
internal/restore/cloud_download.go
Normal file
211
internal/restore/cloud_download.go
Normal file
@@ -0,0 +1,211 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"dbbackup/internal/cloud"
|
||||
"dbbackup/internal/logger"
|
||||
"dbbackup/internal/metadata"
|
||||
)
|
||||
|
||||
// CloudDownloader handles downloading backups from cloud storage
|
||||
type CloudDownloader struct {
|
||||
backend cloud.Backend
|
||||
log logger.Logger
|
||||
}
|
||||
|
||||
// NewCloudDownloader creates a new cloud downloader
|
||||
func NewCloudDownloader(backend cloud.Backend, log logger.Logger) *CloudDownloader {
|
||||
return &CloudDownloader{
|
||||
backend: backend,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
// DownloadOptions contains options for downloading from cloud
|
||||
type DownloadOptions struct {
|
||||
VerifyChecksum bool // Verify SHA-256 checksum after download
|
||||
KeepLocal bool // Keep downloaded file (don't delete temp)
|
||||
TempDir string // Temp directory (default: os.TempDir())
|
||||
}
|
||||
|
||||
// DownloadResult contains information about a downloaded backup
|
||||
type DownloadResult struct {
|
||||
LocalPath string // Path to downloaded file
|
||||
RemotePath string // Original remote path
|
||||
Size int64 // File size in bytes
|
||||
SHA256 string // SHA-256 checksum (if verified)
|
||||
MetadataPath string // Path to downloaded metadata (if exists)
|
||||
IsTempFile bool // Whether the file is in a temp directory
|
||||
}
|
||||
|
||||
// Download downloads a backup from cloud storage
|
||||
func (d *CloudDownloader) Download(ctx context.Context, remotePath string, opts DownloadOptions) (*DownloadResult, error) {
|
||||
// Determine temp directory
|
||||
tempDir := opts.TempDir
|
||||
if tempDir == "" {
|
||||
tempDir = os.TempDir()
|
||||
}
|
||||
|
||||
// Create unique temp subdirectory
|
||||
tempSubDir := filepath.Join(tempDir, fmt.Sprintf("dbbackup-download-%d", os.Getpid()))
|
||||
if err := os.MkdirAll(tempSubDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp directory: %w", err)
|
||||
}
|
||||
|
||||
// Extract filename from remote path
|
||||
filename := filepath.Base(remotePath)
|
||||
localPath := filepath.Join(tempSubDir, filename)
|
||||
|
||||
d.log.Info("Downloading backup from cloud", "remote", remotePath, "local", localPath)
|
||||
|
||||
// Get file size for progress tracking
|
||||
size, err := d.backend.GetSize(ctx, remotePath)
|
||||
if err != nil {
|
||||
d.log.Warn("Could not get remote file size", "error", err)
|
||||
size = 0 // Continue anyway
|
||||
}
|
||||
|
||||
// Progress callback
|
||||
var lastPercent int
|
||||
progressCallback := func(transferred, total int64) {
|
||||
if total > 0 {
|
||||
percent := int(float64(transferred) / float64(total) * 100)
|
||||
if percent != lastPercent && percent%10 == 0 {
|
||||
d.log.Info("Download progress", "percent", percent, "transferred", cloud.FormatSize(transferred), "total", cloud.FormatSize(total))
|
||||
lastPercent = percent
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Download file
|
||||
if err := d.backend.Download(ctx, remotePath, localPath, progressCallback); err != nil {
|
||||
// Cleanup on failure
|
||||
os.RemoveAll(tempSubDir)
|
||||
return nil, fmt.Errorf("download failed: %w", err)
|
||||
}
|
||||
|
||||
result := &DownloadResult{
|
||||
LocalPath: localPath,
|
||||
RemotePath: remotePath,
|
||||
Size: size,
|
||||
IsTempFile: !opts.KeepLocal,
|
||||
}
|
||||
|
||||
// Try to download metadata file
|
||||
metaRemotePath := remotePath + ".meta.json"
|
||||
exists, err := d.backend.Exists(ctx, metaRemotePath)
|
||||
if err == nil && exists {
|
||||
metaLocalPath := localPath + ".meta.json"
|
||||
if err := d.backend.Download(ctx, metaRemotePath, metaLocalPath, nil); err != nil {
|
||||
d.log.Warn("Failed to download metadata", "error", err)
|
||||
} else {
|
||||
result.MetadataPath = metaLocalPath
|
||||
d.log.Debug("Downloaded metadata", "path", metaLocalPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify checksum if requested
|
||||
if opts.VerifyChecksum {
|
||||
d.log.Info("Verifying checksum...")
|
||||
checksum, err := calculateSHA256(localPath)
|
||||
if err != nil {
|
||||
// Cleanup on verification failure
|
||||
os.RemoveAll(tempSubDir)
|
||||
return nil, fmt.Errorf("checksum calculation failed: %w", err)
|
||||
}
|
||||
result.SHA256 = checksum
|
||||
|
||||
// Check against metadata if available
|
||||
if result.MetadataPath != "" {
|
||||
meta, err := metadata.Load(result.MetadataPath)
|
||||
if err != nil {
|
||||
d.log.Warn("Failed to load metadata for verification", "error", err)
|
||||
} else if meta.SHA256 != "" && meta.SHA256 != checksum {
|
||||
// Cleanup on verification failure
|
||||
os.RemoveAll(tempSubDir)
|
||||
return nil, fmt.Errorf("checksum mismatch: expected %s, got %s", meta.SHA256, checksum)
|
||||
} else if meta.SHA256 == checksum {
|
||||
d.log.Info("Checksum verified successfully", "sha256", checksum)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d.log.Info("Download completed", "path", localPath, "size", cloud.FormatSize(result.Size))
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// DownloadFromURI downloads a backup using a cloud URI
|
||||
func (d *CloudDownloader) DownloadFromURI(ctx context.Context, uri string, opts DownloadOptions) (*DownloadResult, error) {
|
||||
// Parse URI
|
||||
cloudURI, err := cloud.ParseCloudURI(uri)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid cloud URI: %w", err)
|
||||
}
|
||||
|
||||
// Download using the path from URI
|
||||
return d.Download(ctx, cloudURI.Path, opts)
|
||||
}
|
||||
|
||||
// Cleanup removes downloaded temp files
|
||||
func (r *DownloadResult) Cleanup() error {
|
||||
if !r.IsTempFile {
|
||||
return nil // Don't delete non-temp files
|
||||
}
|
||||
|
||||
// Remove the entire temp directory
|
||||
tempDir := filepath.Dir(r.LocalPath)
|
||||
if err := os.RemoveAll(tempDir); err != nil {
|
||||
return fmt.Errorf("failed to cleanup temp files: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// calculateSHA256 calculates the SHA-256 checksum of a file
|
||||
func calculateSHA256(filePath string) (string, error) {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
hash := sha256.New()
|
||||
if _, err := io.Copy(hash, file); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return hex.EncodeToString(hash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// DownloadFromCloudURI is a convenience function to download from a cloud URI
|
||||
func DownloadFromCloudURI(ctx context.Context, uri string, opts DownloadOptions) (*DownloadResult, error) {
|
||||
// Parse URI
|
||||
cloudURI, err := cloud.ParseCloudURI(uri)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid cloud URI: %w", err)
|
||||
}
|
||||
|
||||
// Create config from URI
|
||||
cfg := cloudURI.ToConfig()
|
||||
|
||||
// Create backend
|
||||
backend, err := cloud.NewBackend(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create cloud backend: %w", err)
|
||||
}
|
||||
|
||||
// Create downloader
|
||||
log := logger.New("info", "text")
|
||||
downloader := NewCloudDownloader(backend, log)
|
||||
|
||||
// Download
|
||||
return downloader.Download(ctx, cloudURI.Path, opts)
|
||||
}
|
||||
Reference in New Issue
Block a user