ci: add golangci-lint config and fix formatting

- Add .golangci.yml with minimal linters (govet, ineffassign)
- Run gofmt -s and goimports on all files to fix formatting
- Disable fieldalignment and copylocks checks in govet
This commit is contained in:
2025-12-11 17:53:28 +01:00
parent 6b66ae5429
commit 914307ac8f
89 changed files with 1516 additions and 1618 deletions

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"dbbackup/internal/cloud"
"github.com/spf13/cobra"
)
@@ -42,11 +43,11 @@ var clusterCmd = &cobra.Command{
// Global variables for backup flags (to avoid initialization cycle)
var (
backupTypeFlag string
baseBackupFlag string
encryptBackupFlag bool
encryptionKeyFile string
encryptionKeyEnv string
backupTypeFlag string
baseBackupFlag string
encryptBackupFlag bool
encryptionKeyFile string
encryptionKeyEnv string
)
var singleCmd = &cobra.Command{
@@ -74,7 +75,7 @@ Examples:
} else {
return fmt.Errorf("database name required (provide as argument or set SINGLE_DB_NAME)")
}
return runSingleBackup(cmd.Context(), dbName)
},
}
@@ -100,7 +101,7 @@ Warning: Sample backups may break referential integrity due to sampling!`,
} else {
return fmt.Errorf("database name required (provide as argument or set SAMPLE_DB_NAME)")
}
return runSampleBackup(cmd.Context(), dbName)
},
}
@@ -110,18 +111,18 @@ func init() {
backupCmd.AddCommand(clusterCmd)
backupCmd.AddCommand(singleCmd)
backupCmd.AddCommand(sampleCmd)
// Incremental backup flags (single backup only) - using global vars to avoid initialization cycle
singleCmd.Flags().StringVar(&backupTypeFlag, "backup-type", "full", "Backup type: full or incremental [incremental NOT IMPLEMENTED]")
singleCmd.Flags().StringVar(&baseBackupFlag, "base-backup", "", "Path to base backup (required for incremental)")
// Encryption flags for all backup commands
for _, cmd := range []*cobra.Command{clusterCmd, singleCmd, sampleCmd} {
cmd.Flags().BoolVar(&encryptBackupFlag, "encrypt", false, "Encrypt backup with AES-256-GCM")
cmd.Flags().StringVar(&encryptionKeyFile, "encryption-key-file", "", "Path to encryption key file (32 bytes)")
cmd.Flags().StringVar(&encryptionKeyEnv, "encryption-key-env", "DBBACKUP_ENCRYPTION_KEY", "Environment variable containing encryption key/passphrase")
}
// Cloud storage flags for all backup commands
for _, cmd := range []*cobra.Command{clusterCmd, singleCmd, sampleCmd} {
cmd.Flags().String("cloud", "", "Cloud storage URI (e.g., s3://bucket/path) - takes precedence over individual flags")
@@ -131,7 +132,7 @@ func init() {
cmd.Flags().String("cloud-region", "us-east-1", "Cloud region")
cmd.Flags().String("cloud-endpoint", "", "Cloud endpoint (for MinIO/B2)")
cmd.Flags().String("cloud-prefix", "", "Cloud key prefix")
// Add PreRunE to update config from flags
originalPreRun := cmd.PreRunE
cmd.PreRunE = func(c *cobra.Command, args []string) error {
@@ -141,7 +142,7 @@ func init() {
return err
}
}
// Check if --cloud URI flag is provided (takes precedence)
if c.Flags().Changed("cloud") {
if err := parseCloudURIFlag(c); err != nil {
@@ -155,45 +156,45 @@ func init() {
cfg.CloudAutoUpload = true
}
}
if c.Flags().Changed("cloud-provider") {
cfg.CloudProvider, _ = c.Flags().GetString("cloud-provider")
}
if c.Flags().Changed("cloud-bucket") {
cfg.CloudBucket, _ = c.Flags().GetString("cloud-bucket")
}
if c.Flags().Changed("cloud-region") {
cfg.CloudRegion, _ = c.Flags().GetString("cloud-region")
}
if c.Flags().Changed("cloud-endpoint") {
cfg.CloudEndpoint, _ = c.Flags().GetString("cloud-endpoint")
}
if c.Flags().Changed("cloud-prefix") {
cfg.CloudPrefix, _ = c.Flags().GetString("cloud-prefix")
}
}
return nil
}
}
// Sample backup flags - use local variables to avoid cfg access during init
var sampleStrategy string
var sampleValue int
var sampleRatio int
var samplePercent int
var sampleCount int
sampleCmd.Flags().StringVar(&sampleStrategy, "sample-strategy", "ratio", "Sampling strategy (ratio|percent|count)")
sampleCmd.Flags().IntVar(&sampleValue, "sample-value", 10, "Sampling value")
sampleCmd.Flags().IntVar(&sampleRatio, "sample-ratio", 0, "Take every Nth record")
sampleCmd.Flags().IntVar(&samplePercent, "sample-percent", 0, "Take N% of records")
sampleCmd.Flags().IntVar(&sampleCount, "sample-count", 0, "Take first N records")
// Set up pre-run hook to handle convenience flags and update cfg
sampleCmd.PreRunE = func(cmd *cobra.Command, args []string) error {
// Update cfg with flag values
@@ -214,7 +215,7 @@ func init() {
}
return nil
}
// Mark the strategy flags as mutually exclusive
sampleCmd.MarkFlagsMutuallyExclusive("sample-ratio", "sample-percent", "sample-count")
}
@@ -225,32 +226,32 @@ func parseCloudURIFlag(cmd *cobra.Command) error {
if cloudURI == "" {
return nil
}
// Parse cloud URI
uri, err := cloud.ParseCloudURI(cloudURI)
if err != nil {
return fmt.Errorf("invalid cloud URI: %w", err)
}
// Enable cloud and auto-upload
cfg.CloudEnabled = true
cfg.CloudAutoUpload = true
// Update config from URI
cfg.CloudProvider = uri.Provider
cfg.CloudBucket = uri.Bucket
if uri.Region != "" {
cfg.CloudRegion = uri.Region
}
if uri.Endpoint != "" {
cfg.CloudEndpoint = uri.Endpoint
}
if uri.Path != "" {
cfg.CloudPrefix = uri.Dir()
}
return nil
}
}

View File

@@ -19,21 +19,21 @@ func runClusterBackup(ctx context.Context) error {
if !cfg.IsPostgreSQL() {
return fmt.Errorf("cluster backup requires PostgreSQL (detected: %s). Use 'backup single' for individual database backups", cfg.DisplayDatabaseType())
}
// Update config from environment
cfg.UpdateFromEnvironment()
// Validate configuration
if err := cfg.Validate(); err != nil {
return fmt.Errorf("configuration error: %w", err)
}
// Check privileges
privChecker := security.NewPrivilegeChecker(log)
if err := privChecker.CheckAndWarn(cfg.AllowRoot); err != nil {
return err
}
// Check resource limits
if cfg.CheckResources {
resChecker := security.NewResourceChecker(log)
@@ -41,23 +41,23 @@ func runClusterBackup(ctx context.Context) error {
log.Warn("Failed to check resource limits", "error", err)
}
}
log.Info("Starting cluster backup",
"host", cfg.Host,
log.Info("Starting cluster backup",
"host", cfg.Host,
"port", cfg.Port,
"backup_dir", cfg.BackupDir)
// Audit log: backup start
user := security.GetCurrentUser()
auditLogger.LogBackupStart(user, "all_databases", "cluster")
// Rate limit connection attempts
host := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port)
if err := rateLimiter.CheckAndWait(host); err != nil {
auditLogger.LogBackupFailed(user, "all_databases", err)
return fmt.Errorf("rate limit exceeded for %s. Too many connection attempts. Wait 60s or check credentials: %w", host, err)
}
// Create database instance
db, err := database.New(cfg, log)
if err != nil {
@@ -65,7 +65,7 @@ func runClusterBackup(ctx context.Context) error {
return fmt.Errorf("failed to create database instance: %w", err)
}
defer db.Close()
// Connect to database
if err := db.Connect(ctx); err != nil {
rateLimiter.RecordFailure(host)
@@ -73,16 +73,16 @@ func runClusterBackup(ctx context.Context) error {
return fmt.Errorf("failed to connect to %s@%s:%d. Check: 1) Database is running 2) Credentials are correct 3) pg_hba.conf allows connection: %w", cfg.User, cfg.Host, cfg.Port, err)
}
rateLimiter.RecordSuccess(host)
// Create backup engine
engine := backup.New(cfg, log, db)
// Perform cluster backup
if err := engine.BackupCluster(ctx); err != nil {
auditLogger.LogBackupFailed(user, "all_databases", err)
return err
}
// Apply encryption if requested
if isEncryptionEnabled() {
if err := encryptLatestClusterBackup(); err != nil {
@@ -91,10 +91,10 @@ func runClusterBackup(ctx context.Context) error {
}
log.Info("Cluster backup encrypted successfully")
}
// Audit log: backup success
auditLogger.LogBackupComplete(user, "all_databases", cfg.BackupDir, 0)
// Cleanup old backups if retention policy is enabled
if cfg.RetentionDays > 0 {
retentionPolicy := security.NewRetentionPolicy(cfg.RetentionDays, cfg.MinBackups, log)
@@ -104,7 +104,7 @@ func runClusterBackup(ctx context.Context) error {
log.Info("Cleaned up old backups", "deleted", deleted, "freed_mb", freed/1024/1024)
}
}
// Save configuration for future use (unless disabled)
if !cfg.NoSaveConfig {
localCfg := config.ConfigFromConfig(cfg)
@@ -115,7 +115,7 @@ func runClusterBackup(ctx context.Context) error {
auditLogger.LogConfigChange(user, "config_file", "", ".dbbackup.conf")
}
}
return nil
}
@@ -123,17 +123,17 @@ func runClusterBackup(ctx context.Context) error {
func runSingleBackup(ctx context.Context, databaseName string) error {
// Update config from environment
cfg.UpdateFromEnvironment()
// Get backup type and base backup from command line flags (set via global vars in PreRunE)
// These are populated by cobra flag binding in cmd/backup.go
backupType := "full" // Default to full backup if not specified
baseBackup := "" // Base backup path for incremental backups
backupType := "full" // Default to full backup if not specified
baseBackup := "" // Base backup path for incremental backups
// Validate backup type
if backupType != "full" && backupType != "incremental" {
return fmt.Errorf("invalid backup type: %s (must be 'full' or 'incremental')", backupType)
}
// Validate incremental backup requirements
if backupType == "incremental" {
if !cfg.IsPostgreSQL() && !cfg.IsMySQL() {
@@ -147,41 +147,41 @@ func runSingleBackup(ctx context.Context, databaseName string) error {
return fmt.Errorf("base backup file not found at %s. Ensure path is correct and file exists", baseBackup)
}
}
// Validate configuration
if err := cfg.Validate(); err != nil {
return fmt.Errorf("configuration error: %w", err)
}
// Check privileges
privChecker := security.NewPrivilegeChecker(log)
if err := privChecker.CheckAndWarn(cfg.AllowRoot); err != nil {
return err
}
log.Info("Starting single database backup",
log.Info("Starting single database backup",
"database", databaseName,
"db_type", cfg.DatabaseType,
"backup_type", backupType,
"host", cfg.Host,
"host", cfg.Host,
"port", cfg.Port,
"backup_dir", cfg.BackupDir)
if backupType == "incremental" {
log.Info("Incremental backup", "base_backup", baseBackup)
}
// Audit log: backup start
user := security.GetCurrentUser()
auditLogger.LogBackupStart(user, databaseName, "single")
// Rate limit connection attempts
host := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port)
if err := rateLimiter.CheckAndWait(host); err != nil {
auditLogger.LogBackupFailed(user, databaseName, err)
return fmt.Errorf("rate limit exceeded: %w", err)
}
// Create database instance
db, err := database.New(cfg, log)
if err != nil {
@@ -189,7 +189,7 @@ func runSingleBackup(ctx context.Context, databaseName string) error {
return fmt.Errorf("failed to create database instance: %w", err)
}
defer db.Close()
// Connect to database
if err := db.Connect(ctx); err != nil {
rateLimiter.RecordFailure(host)
@@ -197,7 +197,7 @@ func runSingleBackup(ctx context.Context, databaseName string) error {
return fmt.Errorf("failed to connect to database: %w", err)
}
rateLimiter.RecordSuccess(host)
// Verify database exists
exists, err := db.DatabaseExists(ctx, databaseName)
if err != nil {
@@ -209,57 +209,57 @@ func runSingleBackup(ctx context.Context, databaseName string) error {
auditLogger.LogBackupFailed(user, databaseName, err)
return err
}
// Create backup engine
engine := backup.New(cfg, log, db)
// Perform backup based on type
var backupErr error
if backupType == "incremental" {
// Incremental backup - supported for PostgreSQL and MySQL
log.Info("Creating incremental backup", "base_backup", baseBackup)
// Create appropriate incremental engine based on database type
var incrEngine interface {
FindChangedFiles(context.Context, *backup.IncrementalBackupConfig) ([]backup.ChangedFile, error)
CreateIncrementalBackup(context.Context, *backup.IncrementalBackupConfig, []backup.ChangedFile) error
}
if cfg.IsPostgreSQL() {
incrEngine = backup.NewPostgresIncrementalEngine(log)
} else {
incrEngine = backup.NewMySQLIncrementalEngine(log)
}
// Configure incremental backup
incrConfig := &backup.IncrementalBackupConfig{
BaseBackupPath: baseBackup,
DataDirectory: cfg.BackupDir, // Note: This should be the actual data directory
CompressionLevel: cfg.CompressionLevel,
}
// Find changed files
changedFiles, err := incrEngine.FindChangedFiles(ctx, incrConfig)
if err != nil {
return fmt.Errorf("failed to find changed files: %w", err)
}
// Create incremental backup
if err := incrEngine.CreateIncrementalBackup(ctx, incrConfig, changedFiles); err != nil {
return fmt.Errorf("failed to create incremental backup: %w", err)
}
log.Info("Incremental backup completed", "changed_files", len(changedFiles))
} else {
// Full backup
backupErr = engine.BackupSingle(ctx, databaseName)
}
if backupErr != nil {
auditLogger.LogBackupFailed(user, databaseName, backupErr)
return backupErr
}
// Apply encryption if requested
if isEncryptionEnabled() {
if err := encryptLatestBackup(databaseName); err != nil {
@@ -268,10 +268,10 @@ func runSingleBackup(ctx context.Context, databaseName string) error {
}
log.Info("Backup encrypted successfully")
}
// Audit log: backup success
auditLogger.LogBackupComplete(user, databaseName, cfg.BackupDir, 0)
// Cleanup old backups if retention policy is enabled
if cfg.RetentionDays > 0 {
retentionPolicy := security.NewRetentionPolicy(cfg.RetentionDays, cfg.MinBackups, log)
@@ -281,7 +281,7 @@ func runSingleBackup(ctx context.Context, databaseName string) error {
log.Info("Cleaned up old backups", "deleted", deleted, "freed_mb", freed/1024/1024)
}
}
// Save configuration for future use (unless disabled)
if !cfg.NoSaveConfig {
localCfg := config.ConfigFromConfig(cfg)
@@ -292,7 +292,7 @@ func runSingleBackup(ctx context.Context, databaseName string) error {
auditLogger.LogConfigChange(user, "config_file", "", ".dbbackup.conf")
}
}
return nil
}
@@ -300,23 +300,23 @@ func runSingleBackup(ctx context.Context, databaseName string) error {
func runSampleBackup(ctx context.Context, databaseName string) error {
// Update config from environment
cfg.UpdateFromEnvironment()
// Validate configuration
if err := cfg.Validate(); err != nil {
return fmt.Errorf("configuration error: %w", err)
}
// Check privileges
privChecker := security.NewPrivilegeChecker(log)
if err := privChecker.CheckAndWarn(cfg.AllowRoot); err != nil {
return err
}
// Validate sample parameters
if cfg.SampleValue <= 0 {
return fmt.Errorf("sample value must be greater than 0")
}
switch cfg.SampleStrategy {
case "percent":
if cfg.SampleValue > 100 {
@@ -331,27 +331,27 @@ func runSampleBackup(ctx context.Context, databaseName string) error {
default:
return fmt.Errorf("invalid sampling strategy: %s (must be ratio, percent, or count)", cfg.SampleStrategy)
}
log.Info("Starting sample database backup",
log.Info("Starting sample database backup",
"database", databaseName,
"db_type", cfg.DatabaseType,
"strategy", cfg.SampleStrategy,
"value", cfg.SampleValue,
"host", cfg.Host,
"host", cfg.Host,
"port", cfg.Port,
"backup_dir", cfg.BackupDir)
// Audit log: backup start
user := security.GetCurrentUser()
auditLogger.LogBackupStart(user, databaseName, "sample")
// Rate limit connection attempts
host := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port)
if err := rateLimiter.CheckAndWait(host); err != nil {
auditLogger.LogBackupFailed(user, databaseName, err)
return fmt.Errorf("rate limit exceeded: %w", err)
}
// Create database instance
db, err := database.New(cfg, log)
if err != nil {
@@ -359,7 +359,7 @@ func runSampleBackup(ctx context.Context, databaseName string) error {
return fmt.Errorf("failed to create database instance: %w", err)
}
defer db.Close()
// Connect to database
if err := db.Connect(ctx); err != nil {
rateLimiter.RecordFailure(host)
@@ -367,7 +367,7 @@ func runSampleBackup(ctx context.Context, databaseName string) error {
return fmt.Errorf("failed to connect to database: %w", err)
}
rateLimiter.RecordSuccess(host)
// Verify database exists
exists, err := db.DatabaseExists(ctx, databaseName)
if err != nil {
@@ -379,16 +379,16 @@ func runSampleBackup(ctx context.Context, databaseName string) error {
auditLogger.LogBackupFailed(user, databaseName, err)
return err
}
// Create backup engine
engine := backup.New(cfg, log, db)
// Perform sample backup
if err := engine.BackupSample(ctx, databaseName); err != nil {
auditLogger.LogBackupFailed(user, databaseName, err)
return err
}
// Apply encryption if requested
if isEncryptionEnabled() {
if err := encryptLatestBackup(databaseName); err != nil {
@@ -397,10 +397,10 @@ func runSampleBackup(ctx context.Context, databaseName string) error {
}
log.Info("Sample backup encrypted successfully")
}
// Audit log: backup success
auditLogger.LogBackupComplete(user, databaseName, cfg.BackupDir, 0)
// Save configuration for future use (unless disabled)
if !cfg.NoSaveConfig {
localCfg := config.ConfigFromConfig(cfg)
@@ -411,9 +411,10 @@ func runSampleBackup(ctx context.Context, databaseName string) error {
auditLogger.LogConfigChange(user, "config_file", "", ".dbbackup.conf")
}
}
return nil
}
// encryptLatestBackup finds and encrypts the most recent backup for a database
func encryptLatestBackup(databaseName string) error {
// Load encryption key
@@ -452,86 +453,86 @@ func encryptLatestClusterBackup() error {
// findLatestBackup finds the most recently created backup file for a database
func findLatestBackup(backupDir, databaseName string) (string, error) {
entries, err := os.ReadDir(backupDir)
if err != nil {
return "", fmt.Errorf("failed to read backup directory: %w", err)
}
entries, err := os.ReadDir(backupDir)
if err != nil {
return "", fmt.Errorf("failed to read backup directory: %w", err)
}
var latestPath string
var latestTime time.Time
var latestPath string
var latestTime time.Time
prefix := "db_" + databaseName + "_"
for _, entry := range entries {
if entry.IsDir() {
continue
}
prefix := "db_" + databaseName + "_"
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := entry.Name()
// Skip metadata files and already encrypted files
if strings.HasSuffix(name, ".meta.json") || strings.HasSuffix(name, ".encrypted") {
continue
}
name := entry.Name()
// Skip metadata files and already encrypted files
if strings.HasSuffix(name, ".meta.json") || strings.HasSuffix(name, ".encrypted") {
continue
}
// Match database backup files
if strings.HasPrefix(name, prefix) && (strings.HasSuffix(name, ".dump") ||
strings.HasSuffix(name, ".dump.gz") || strings.HasSuffix(name, ".sql.gz")) {
info, err := entry.Info()
if err != nil {
continue
}
// Match database backup files
if strings.HasPrefix(name, prefix) && (strings.HasSuffix(name, ".dump") ||
strings.HasSuffix(name, ".dump.gz") || strings.HasSuffix(name, ".sql.gz")) {
info, err := entry.Info()
if err != nil {
continue
}
if info.ModTime().After(latestTime) {
latestTime = info.ModTime()
latestPath = filepath.Join(backupDir, name)
}
}
}
if info.ModTime().After(latestTime) {
latestTime = info.ModTime()
latestPath = filepath.Join(backupDir, name)
}
}
}
if latestPath == "" {
return "", fmt.Errorf("no backup found for database: %s", databaseName)
}
if latestPath == "" {
return "", fmt.Errorf("no backup found for database: %s", databaseName)
}
return latestPath, nil
return latestPath, nil
}
// findLatestClusterBackup finds the most recently created cluster backup
func findLatestClusterBackup(backupDir string) (string, error) {
entries, err := os.ReadDir(backupDir)
if err != nil {
return "", fmt.Errorf("failed to read backup directory: %w", err)
}
entries, err := os.ReadDir(backupDir)
if err != nil {
return "", fmt.Errorf("failed to read backup directory: %w", err)
}
var latestPath string
var latestTime time.Time
var latestPath string
var latestTime time.Time
for _, entry := range entries {
if entry.IsDir() {
continue
}
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := entry.Name()
// Skip metadata files and already encrypted files
if strings.HasSuffix(name, ".meta.json") || strings.HasSuffix(name, ".encrypted") {
continue
}
name := entry.Name()
// Skip metadata files and already encrypted files
if strings.HasSuffix(name, ".meta.json") || strings.HasSuffix(name, ".encrypted") {
continue
}
// Match cluster backup files
if strings.HasPrefix(name, "cluster_") && strings.HasSuffix(name, ".tar.gz") {
info, err := entry.Info()
if err != nil {
continue
}
// Match cluster backup files
if strings.HasPrefix(name, "cluster_") && strings.HasSuffix(name, ".tar.gz") {
info, err := entry.Info()
if err != nil {
continue
}
if info.ModTime().After(latestTime) {
latestTime = info.ModTime()
latestPath = filepath.Join(backupDir, name)
}
}
}
if info.ModTime().After(latestTime) {
latestTime = info.ModTime()
latestPath = filepath.Join(backupDir, name)
}
}
}
if latestPath == "" {
return "", fmt.Errorf("no cluster backup found")
}
if latestPath == "" {
return "", fmt.Errorf("no cluster backup found")
}
return latestPath, nil
return latestPath, nil
}

View File

@@ -11,6 +11,7 @@ import (
"dbbackup/internal/cloud"
"dbbackup/internal/metadata"
"dbbackup/internal/retention"
"github.com/spf13/cobra"
)
@@ -41,9 +42,9 @@ Examples:
}
var (
retentionDays int
minBackups int
dryRun bool
retentionDays int
minBackups int
dryRun bool
cleanupPattern string
)
@@ -57,7 +58,7 @@ func init() {
func runCleanup(cmd *cobra.Command, args []string) error {
backupPath := args[0]
// Check if this is a cloud URI
if isCloudURIPath(backupPath) {
return runCloudCleanup(cmd.Context(), backupPath)
@@ -108,7 +109,7 @@ func runCleanup(cmd *cobra.Command, args []string) error {
fmt.Printf("📊 Results:\n")
fmt.Printf(" Total backups: %d\n", result.TotalBackups)
fmt.Printf(" Eligible for deletion: %d\n", result.EligibleForDeletion)
if len(result.Deleted) > 0 {
fmt.Printf("\n")
if dryRun {
@@ -142,7 +143,7 @@ func runCleanup(cmd *cobra.Command, args []string) error {
}
fmt.Println(strings.Repeat("─", 50))
if dryRun {
fmt.Println("✅ Dry run completed (no files were deleted)")
} else if len(result.Deleted) > 0 {
@@ -174,7 +175,7 @@ func runCloudCleanup(ctx context.Context, uri string) error {
if err != nil {
return fmt.Errorf("invalid cloud URI: %w", err)
}
fmt.Printf("☁️ Cloud Cleanup Policy:\n")
fmt.Printf(" URI: %s\n", uri)
fmt.Printf(" Provider: %s\n", cloudURI.Provider)
@@ -188,27 +189,27 @@ func runCloudCleanup(ctx context.Context, uri string) error {
fmt.Printf(" Mode: DRY RUN (no files will be deleted)\n")
}
fmt.Println()
// Create cloud backend
cfg := cloudURI.ToConfig()
backend, err := cloud.NewBackend(cfg)
if err != nil {
return fmt.Errorf("failed to create cloud backend: %w", err)
}
// List all backups
backups, err := backend.List(ctx, cloudURI.Path)
if err != nil {
return fmt.Errorf("failed to list cloud backups: %w", err)
}
if len(backups) == 0 {
fmt.Println("No backups found in cloud storage")
return nil
}
fmt.Printf("Found %d backup(s) in cloud storage\n\n", len(backups))
// Filter backups based on pattern if specified
var filteredBackups []cloud.BackupInfo
if cleanupPattern != "" {
@@ -222,17 +223,17 @@ func runCloudCleanup(ctx context.Context, uri string) error {
} else {
filteredBackups = backups
}
// Sort by modification time (oldest first)
// Already sorted by backend.List
// Calculate retention date
cutoffDate := time.Now().AddDate(0, 0, -retentionDays)
// Determine which backups to delete
var toDelete []cloud.BackupInfo
var toKeep []cloud.BackupInfo
for _, backup := range filteredBackups {
if backup.LastModified.Before(cutoffDate) {
toDelete = append(toDelete, backup)
@@ -240,7 +241,7 @@ func runCloudCleanup(ctx context.Context, uri string) error {
toKeep = append(toKeep, backup)
}
}
// Ensure we keep minimum backups
totalBackups := len(filteredBackups)
if totalBackups-len(toDelete) < minBackups {
@@ -249,39 +250,39 @@ func runCloudCleanup(ctx context.Context, uri string) error {
if keepCount > len(toDelete) {
keepCount = len(toDelete)
}
// Move oldest from toDelete to toKeep
for i := len(toDelete) - 1; i >= len(toDelete)-keepCount && i >= 0; i-- {
toKeep = append(toKeep, toDelete[i])
toDelete = toDelete[:i]
}
}
// Display results
fmt.Printf("📊 Results:\n")
fmt.Printf(" Total backups: %d\n", totalBackups)
fmt.Printf(" Eligible for deletion: %d\n", len(toDelete))
fmt.Printf(" Will keep: %d\n", len(toKeep))
fmt.Println()
if len(toDelete) > 0 {
if dryRun {
fmt.Printf("🔍 Would delete %d backup(s):\n", len(toDelete))
} else {
fmt.Printf("🗑️ Deleting %d backup(s):\n", len(toDelete))
}
var totalSize int64
var deletedCount int
for _, backup := range toDelete {
fmt.Printf(" - %s (%s, %s old)\n",
backup.Name,
fmt.Printf(" - %s (%s, %s old)\n",
backup.Name,
cloud.FormatSize(backup.Size),
formatBackupAge(backup.LastModified))
totalSize += backup.Size
if !dryRun {
if err := backend.Delete(ctx, backup.Key); err != nil {
fmt.Printf(" ❌ Error: %v\n", err)
@@ -292,18 +293,18 @@ func runCloudCleanup(ctx context.Context, uri string) error {
}
}
}
fmt.Printf("\n💾 Space %s: %s\n",
fmt.Printf("\n💾 Space %s: %s\n",
map[bool]string{true: "would be freed", false: "freed"}[dryRun],
cloud.FormatSize(totalSize))
if !dryRun && deletedCount > 0 {
fmt.Printf("✅ Successfully deleted %d backup(s)\n", deletedCount)
}
} else {
fmt.Println("No backups eligible for deletion")
}
return nil
}
@@ -311,7 +312,7 @@ func runCloudCleanup(ctx context.Context, uri string) error {
func formatBackupAge(t time.Time) string {
d := time.Since(t)
days := int(d.Hours() / 24)
if days == 0 {
return "today"
} else if days == 1 {

View File

@@ -9,6 +9,7 @@ import (
"time"
"dbbackup/internal/cloud"
"github.com/spf13/cobra"
)
@@ -203,9 +204,9 @@ func runCloudUpload(cmd *cobra.Command, args []string) error {
}
percent := int(float64(transferred) / float64(total) * 100)
if percent != lastPercent && percent%10 == 0 {
fmt.Printf(" Progress: %d%% (%s / %s)\n",
percent,
cloud.FormatSize(transferred),
fmt.Printf(" Progress: %d%% (%s / %s)\n",
percent,
cloud.FormatSize(transferred),
cloud.FormatSize(total))
lastPercent = percent
}
@@ -258,9 +259,9 @@ func runCloudDownload(cmd *cobra.Command, args []string) error {
}
percent := int(float64(transferred) / float64(total) * 100)
if percent != lastPercent && percent%10 == 0 {
fmt.Printf(" Progress: %d%% (%s / %s)\n",
percent,
cloud.FormatSize(transferred),
fmt.Printf(" Progress: %d%% (%s / %s)\n",
percent,
cloud.FormatSize(transferred),
cloud.FormatSize(total))
lastPercent = percent
}
@@ -308,7 +309,7 @@ func runCloudList(cmd *cobra.Command, args []string) error {
var totalSize int64
for _, backup := range backups {
totalSize += backup.Size
if cloudVerbose {
fmt.Printf("📦 %s\n", backup.Name)
fmt.Printf(" Size: %s\n", cloud.FormatSize(backup.Size))
@@ -320,8 +321,8 @@ func runCloudList(cmd *cobra.Command, args []string) error {
} else {
age := time.Since(backup.LastModified)
ageStr := formatAge(age)
fmt.Printf("%-50s %12s %s\n",
backup.Name,
fmt.Printf("%-50s %12s %s\n",
backup.Name,
cloud.FormatSize(backup.Size),
ageStr)
}

View File

@@ -18,30 +18,30 @@ var cpuCmd = &cobra.Command{
func runCPUInfo(ctx context.Context) error {
log.Info("Detecting CPU information...")
// Optimize CPU settings if auto-detect is enabled
if cfg.AutoDetectCores {
if err := cfg.OptimizeForCPU(); err != nil {
log.Warn("CPU optimization failed", "error", err)
}
}
// Get CPU information
cpuInfo, err := cfg.GetCPUInfo()
if err != nil {
return fmt.Errorf("failed to detect CPU: %w", err)
}
fmt.Println("=== CPU Information ===")
fmt.Print(cpuInfo.FormatCPUInfo())
fmt.Println("\n=== Current Configuration ===")
fmt.Printf("Auto-detect cores: %t\n", cfg.AutoDetectCores)
fmt.Printf("CPU workload type: %s\n", cfg.CPUWorkloadType)
fmt.Printf("Parallel jobs (restore): %d\n", cfg.Jobs)
fmt.Printf("Dump jobs (backup): %d\n", cfg.DumpJobs)
fmt.Printf("Maximum cores limit: %d\n", cfg.MaxCores)
// Show optimization recommendations
fmt.Println("\n=== Optimization Recommendations ===")
if cpuInfo.PhysicalCores > 1 {
@@ -58,7 +58,7 @@ func runCPUInfo(ctx context.Context) error {
fmt.Printf("Recommended jobs (CPU intensive): %d\n", optimal)
}
}
// Show current vs optimal
if cfg.AutoDetectCores {
fmt.Println("\n✅ CPU optimization is enabled")
@@ -67,10 +67,10 @@ func runCPUInfo(ctx context.Context) error {
fmt.Println("\n⚠ CPU optimization is disabled")
fmt.Println("Consider enabling --auto-detect-cores for better performance")
}
return nil
}
func init() {
rootCmd.AddCommand(cpuCmd)
}
}

View File

@@ -17,17 +17,17 @@ func loadEncryptionKey(keyFile, keyEnvVar string) ([]byte, error) {
if err != nil {
return nil, fmt.Errorf("failed to read encryption key file: %w", err)
}
// Try to decode as base64 first
if decoded, err := base64.StdEncoding.DecodeString(strings.TrimSpace(string(keyData))); err == nil && len(decoded) == crypto.KeySize {
return decoded, nil
}
// Use raw bytes if exactly 32 bytes
if len(keyData) == crypto.KeySize {
return keyData, nil
}
// Otherwise treat as passphrase and derive key
salt, err := crypto.GenerateSalt()
if err != nil {
@@ -36,19 +36,19 @@ func loadEncryptionKey(keyFile, keyEnvVar string) ([]byte, error) {
key := crypto.DeriveKey([]byte(strings.TrimSpace(string(keyData))), salt)
return key, nil
}
// Priority 2: Environment variable
if keyEnvVar != "" {
keyData := os.Getenv(keyEnvVar)
if keyData == "" {
return nil, fmt.Errorf("encryption enabled but %s environment variable not set", keyEnvVar)
}
// Try to decode as base64 first
if decoded, err := base64.StdEncoding.DecodeString(strings.TrimSpace(keyData)); err == nil && len(decoded) == crypto.KeySize {
return decoded, nil
}
// Otherwise treat as passphrase and derive key
salt, err := crypto.GenerateSalt()
if err != nil {
@@ -57,7 +57,7 @@ func loadEncryptionKey(keyFile, keyEnvVar string) ([]byte, error) {
key := crypto.DeriveKey([]byte(strings.TrimSpace(keyData)), salt)
return key, nil
}
return nil, fmt.Errorf("encryption enabled but no key source specified (use --encryption-key-file or set %s)", keyEnvVar)
}

View File

@@ -298,7 +298,7 @@ func runPITRStatus(cmd *cobra.Command, args []string) error {
fmt.Printf("WAL Level: %s\n", config.WALLevel)
fmt.Printf("Archive Mode: %s\n", config.ArchiveMode)
fmt.Printf("Archive Command: %s\n", config.ArchiveCommand)
if config.MaxWALSenders > 0 {
fmt.Printf("Max WAL Senders: %d\n", config.MaxWALSenders)
}
@@ -386,7 +386,7 @@ func runWALList(cmd *cobra.Command, args []string) error {
for _, archive := range archives {
size := formatWALSize(archive.ArchivedSize)
timeStr := archive.ArchivedAt.Format("2006-01-02 15:04")
flags := ""
if archive.Compressed {
flags += "C"

View File

@@ -14,6 +14,7 @@ import (
"dbbackup/internal/auth"
"dbbackup/internal/logger"
"dbbackup/internal/tui"
"github.com/spf13/cobra"
)
@@ -42,9 +43,9 @@ var listCmd = &cobra.Command{
}
var interactiveCmd = &cobra.Command{
Use: "interactive",
Short: "Start interactive menu mode",
Long: `Start the interactive menu system for guided backup operations.
Use: "interactive",
Short: "Start interactive menu mode",
Long: `Start the interactive menu system for guided backup operations.
TUI Automation Flags (for testing and CI/CD):
--auto-select <index> Automatically select menu option (0-13)
@@ -64,7 +65,7 @@ TUI Automation Flags (for testing and CI/CD):
cfg.TUIDryRun, _ = cmd.Flags().GetBool("dry-run")
cfg.TUIVerbose, _ = cmd.Flags().GetBool("verbose-tui")
cfg.TUILogFile, _ = cmd.Flags().GetString("tui-log-file")
// Check authentication before starting TUI
if cfg.IsPostgreSQL() {
if mismatch, msg := auth.CheckAuthenticationMismatch(cfg); mismatch {
@@ -72,7 +73,7 @@ TUI Automation Flags (for testing and CI/CD):
return fmt.Errorf("authentication configuration required")
}
}
// Use verbose logger if TUI verbose mode enabled
var interactiveLog logger.Logger
if cfg.TUIVerbose {
@@ -80,7 +81,7 @@ TUI Automation Flags (for testing and CI/CD):
} else {
interactiveLog = logger.NewSilent()
}
// Start the interactive TUI
return tui.RunInteractiveMenu(cfg, interactiveLog)
},
@@ -768,12 +769,12 @@ func containsSQLKeywords(content string) bool {
func mysqlRestoreCommand(archivePath string, compressed bool) string {
parts := []string{"mysql"}
// Only add -h flag if host is not localhost (to use Unix socket)
if cfg.Host != "localhost" && cfg.Host != "127.0.0.1" && cfg.Host != "" {
parts = append(parts, "-h", cfg.Host)
}
parts = append(parts,
"-P", fmt.Sprintf("%d", cfg.Port),
"-u", cfg.User,

View File

@@ -22,22 +22,22 @@ import (
)
var (
restoreConfirm bool
restoreDryRun bool
restoreForce bool
restoreClean bool
restoreCreate bool
restoreJobs int
restoreTarget string
restoreVerbose bool
restoreNoProgress bool
restoreWorkdir string
restoreConfirm bool
restoreDryRun bool
restoreForce bool
restoreClean bool
restoreCreate bool
restoreJobs int
restoreTarget string
restoreVerbose bool
restoreNoProgress bool
restoreWorkdir string
restoreCleanCluster bool
// Encryption flags
restoreEncryptionKeyFile string
restoreEncryptionKeyEnv string = "DBBACKUP_ENCRYPTION_KEY"
// PITR restore flags (additional to pitr.go)
pitrBaseBackup string
pitrWALArchive string
@@ -244,7 +244,7 @@ func init() {
restoreClusterCmd.Flags().BoolVar(&restoreNoProgress, "no-progress", false, "Disable progress indicators")
restoreClusterCmd.Flags().StringVar(&restoreEncryptionKeyFile, "encryption-key-file", "", "Path to encryption key file (required for encrypted backups)")
restoreClusterCmd.Flags().StringVar(&restoreEncryptionKeyEnv, "encryption-key-env", "DBBACKUP_ENCRYPTION_KEY", "Environment variable containing encryption key")
// PITR restore flags
restorePITRCmd.Flags().StringVar(&pitrBaseBackup, "base-backup", "", "Path to base backup file (.tar.gz) (required)")
restorePITRCmd.Flags().StringVar(&pitrWALArchive, "wal-archive", "", "Path to WAL archive directory (required)")
@@ -260,7 +260,7 @@ func init() {
restorePITRCmd.Flags().BoolVar(&pitrSkipExtract, "skip-extraction", false, "Skip base backup extraction (data dir exists)")
restorePITRCmd.Flags().BoolVar(&pitrAutoStart, "auto-start", false, "Automatically start PostgreSQL after setup")
restorePITRCmd.Flags().BoolVar(&pitrMonitor, "monitor", false, "Monitor recovery progress (requires --auto-start)")
restorePITRCmd.MarkFlagRequired("base-backup")
restorePITRCmd.MarkFlagRequired("wal-archive")
restorePITRCmd.MarkFlagRequired("target-dir")
@@ -269,13 +269,13 @@ func init() {
// runRestoreSingle restores a single database
func runRestoreSingle(cmd *cobra.Command, args []string) error {
archivePath := args[0]
// Check if this is a cloud URI
var cleanupFunc func() error
if cloud.IsCloudURI(archivePath) {
log.Info("Detected cloud URI, downloading backup...", "uri", archivePath)
// Download from cloud
result, err := restore.DownloadFromCloudURI(cmd.Context(), archivePath, restore.DownloadOptions{
VerifyChecksum: true,
@@ -284,10 +284,10 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
if err != nil {
return fmt.Errorf("failed to download from cloud: %w", err)
}
archivePath = result.LocalPath
cleanupFunc = result.Cleanup
// Ensure cleanup happens on exit
defer func() {
if cleanupFunc != nil {
@@ -296,7 +296,7 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
}
}
}()
log.Info("Download completed", "local_path", archivePath)
} else {
// Convert to absolute path for local files
@@ -409,7 +409,7 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
defer signal.Stop(sigChan) // Ensure signal cleanup on exit
go func() {
<-sigChan
log.Warn("Restore interrupted by user")
@@ -418,7 +418,7 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
// Execute restore
log.Info("Starting restore...", "database", targetDB)
// Audit log: restore start
user := security.GetCurrentUser()
startTime := time.Now()
@@ -428,7 +428,7 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
auditLogger.LogRestoreFailed(user, targetDB, err)
return fmt.Errorf("restore failed: %w", err)
}
// Audit log: restore success
auditLogger.LogRestoreComplete(user, targetDB, time.Since(startTime))
@@ -491,7 +491,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
checkDir := cfg.BackupDir
if restoreWorkdir != "" {
checkDir = restoreWorkdir
// Verify workdir exists or create it
if _, err := os.Stat(restoreWorkdir); os.IsNotExist(err) {
log.Warn("Working directory does not exist, will be created", "path", restoreWorkdir)
@@ -499,7 +499,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
return fmt.Errorf("cannot create working directory: %w", err)
}
}
log.Warn("⚠️ Using alternative working directory for extraction")
log.Warn(" This is recommended when system disk space is limited")
log.Warn(" Location: " + restoreWorkdir)
@@ -515,7 +515,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
if err := safety.VerifyTools("postgres"); err != nil {
return fmt.Errorf("tool verification failed: %w", err)
}
} // Create database instance for pre-checks
} // Create database instance for pre-checks
db, err := database.New(cfg, log)
if err != nil {
return fmt.Errorf("failed to create database instance: %w", err)
@@ -592,7 +592,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
defer signal.Stop(sigChan) // Ensure signal cleanup on exit
go func() {
<-sigChan
log.Warn("Restore interrupted by user")
@@ -622,7 +622,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
// Execute cluster restore
log.Info("Starting cluster restore...")
// Audit log: restore start
user := security.GetCurrentUser()
startTime := time.Now()
@@ -632,7 +632,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
auditLogger.LogRestoreFailed(user, "all_databases", err)
return fmt.Errorf("cluster restore failed: %w", err)
}
// Audit log: restore success
auditLogger.LogRestoreComplete(user, "all_databases", time.Since(startTime))

View File

@@ -7,6 +7,7 @@ import (
"dbbackup/internal/config"
"dbbackup/internal/logger"
"dbbackup/internal/security"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -42,13 +43,13 @@ For help with specific commands, use: dbbackup [command] --help`,
if cfg == nil {
return nil
}
// Store which flags were explicitly set by user
flagsSet := make(map[string]bool)
cmd.Flags().Visit(func(f *pflag.Flag) {
flagsSet[f.Name] = true
})
// Load local config if not disabled
if !cfg.NoLoadConfig {
if localCfg, err := config.LoadLocalConfig(); err != nil {
@@ -65,11 +66,11 @@ For help with specific commands, use: dbbackup [command] --help`,
savedDumpJobs := cfg.DumpJobs
savedRetentionDays := cfg.RetentionDays
savedMinBackups := cfg.MinBackups
// Apply config from file
config.ApplyLocalConfig(cfg, localCfg)
log.Info("Loaded configuration from .dbbackup.conf")
// Restore explicitly set flag values (flags have priority)
if flagsSet["backup-dir"] {
cfg.BackupDir = savedBackupDir
@@ -103,7 +104,7 @@ For help with specific commands, use: dbbackup [command] --help`,
}
}
}
return cfg.SetDatabaseType(cfg.DatabaseType)
},
}
@@ -112,10 +113,10 @@ For help with specific commands, use: dbbackup [command] --help`,
func Execute(ctx context.Context, config *config.Config, logger logger.Logger) error {
cfg = config
log = logger
// Initialize audit logger
auditLogger = security.NewAuditLogger(logger, true)
// Initialize rate limiter
rateLimiter = security.NewRateLimiter(config.MaxRetries, logger)
@@ -143,7 +144,7 @@ func Execute(ctx context.Context, config *config.Config, logger logger.Logger) e
rootCmd.PersistentFlags().IntVar(&cfg.CompressionLevel, "compression", cfg.CompressionLevel, "Compression level (0-9)")
rootCmd.PersistentFlags().BoolVar(&cfg.NoSaveConfig, "no-save-config", false, "Don't save configuration after successful operations")
rootCmd.PersistentFlags().BoolVar(&cfg.NoLoadConfig, "no-config", false, "Don't load configuration from .dbbackup.conf")
// Security flags (MEDIUM priority)
rootCmd.PersistentFlags().IntVar(&cfg.RetentionDays, "retention-days", cfg.RetentionDays, "Backup retention period in days (0=disabled)")
rootCmd.PersistentFlags().IntVar(&cfg.MinBackups, "min-backups", cfg.MinBackups, "Minimum number of backups to keep")

View File

@@ -14,18 +14,18 @@ import (
func runStatus(ctx context.Context) error {
// Update config from environment
cfg.UpdateFromEnvironment()
// Validate configuration
if err := cfg.Validate(); err != nil {
return fmt.Errorf("configuration error: %w", err)
}
// Display header
displayHeader()
// Display configuration
displayConfiguration()
// Test database connection
return testConnection(ctx)
}
@@ -41,7 +41,7 @@ func displayHeader() {
fmt.Println("\033[1;37m Database Backup & Recovery Tool\033[0m")
fmt.Println("\033[1;34m==============================================================\033[0m")
}
fmt.Printf("Version: %s (built: %s, commit: %s)\n", cfg.Version, cfg.BuildTime, cfg.GitCommit)
fmt.Println()
}
@@ -53,32 +53,32 @@ func displayConfiguration() {
fmt.Printf(" Host: %s:%d\n", cfg.Host, cfg.Port)
fmt.Printf(" User: %s\n", cfg.User)
fmt.Printf(" Database: %s\n", cfg.Database)
if cfg.Password != "" {
fmt.Printf(" Password: ****** (set)\n")
} else {
fmt.Printf(" Password: (not set)\n")
}
fmt.Printf(" SSL Mode: %s\n", cfg.SSLMode)
if cfg.Insecure {
fmt.Printf(" SSL: disabled\n")
}
fmt.Printf(" Backup Dir: %s\n", cfg.BackupDir)
fmt.Printf(" Compression: %d\n", cfg.CompressionLevel)
fmt.Printf(" Jobs: %d\n", cfg.Jobs)
fmt.Printf(" Dump Jobs: %d\n", cfg.DumpJobs)
fmt.Printf(" Max Cores: %d\n", cfg.MaxCores)
fmt.Printf(" Auto Detect: %v\n", cfg.AutoDetectCores)
// System information
fmt.Println()
fmt.Println("System Information:")
fmt.Printf(" OS: %s/%s\n", runtime.GOOS, runtime.GOARCH)
fmt.Printf(" CPU Cores: %d\n", runtime.NumCPU())
fmt.Printf(" Go Version: %s\n", runtime.Version())
// Check if backup directory exists
if info, err := os.Stat(cfg.BackupDir); err != nil {
fmt.Printf(" Backup Dir: %s (does not exist - will be created)\n", cfg.BackupDir)
@@ -87,7 +87,7 @@ func displayConfiguration() {
} else {
fmt.Printf(" Backup Dir: %s (exists but not a directory!)\n", cfg.BackupDir)
}
fmt.Println()
}
@@ -95,7 +95,7 @@ func displayConfiguration() {
func testConnection(ctx context.Context) error {
// Create progress indicator
indicator := progress.NewIndicator(true, "spinner")
// Create database instance
db, err := database.New(cfg, log)
if err != nil {
@@ -103,7 +103,7 @@ func testConnection(ctx context.Context) error {
return err
}
defer db.Close()
// Test tool availability
indicator.Start("Checking required tools...")
if err := db.ValidateBackupTools(); err != nil {
@@ -111,7 +111,7 @@ func testConnection(ctx context.Context) error {
return err
}
indicator.Complete("Required tools available")
// Test connection
indicator.Start(fmt.Sprintf("Connecting to %s...", cfg.DatabaseType))
if err := db.Connect(ctx); err != nil {
@@ -119,32 +119,32 @@ func testConnection(ctx context.Context) error {
return err
}
indicator.Complete("Connected successfully")
// Test basic operations
indicator.Start("Testing database operations...")
// Get version
version, err := db.GetVersion(ctx)
if err != nil {
indicator.Fail(fmt.Sprintf("Failed to get database version: %v", err))
return err
}
// List databases
databases, err := db.ListDatabases(ctx)
if err != nil {
indicator.Fail(fmt.Sprintf("Failed to list databases: %v", err))
return err
}
indicator.Complete("Database operations successful")
// Display results
fmt.Println("Connection Test Results:")
fmt.Printf(" Status: Connected ✅\n")
fmt.Printf(" Version: %s\n", version)
fmt.Printf(" Databases: %d found\n", len(databases))
if len(databases) > 0 {
fmt.Printf(" Database List: ")
if len(databases) <= 5 {
@@ -165,9 +165,9 @@ func testConnection(ctx context.Context) error {
}
fmt.Println()
}
fmt.Println()
fmt.Println("✅ Status check completed successfully!")
return nil
}
}

View File

@@ -12,6 +12,7 @@ import (
"dbbackup/internal/metadata"
"dbbackup/internal/restore"
"dbbackup/internal/verification"
"github.com/spf13/cobra"
)
@@ -57,12 +58,12 @@ func runVerifyBackup(cmd *cobra.Command, args []string) error {
break
}
}
// If cloud URIs detected, handle separately
if hasCloudURI {
return runVerifyCloudBackup(cmd, args)
}
// Expand glob patterns for local files
var backupFiles []string
for _, pattern := range args {
@@ -89,9 +90,9 @@ func runVerifyBackup(cmd *cobra.Command, args []string) error {
for _, backupFile := range backupFiles {
// Skip metadata files
if strings.HasSuffix(backupFile, ".meta.json") ||
strings.HasSuffix(backupFile, ".sha256") ||
strings.HasSuffix(backupFile, ".info") {
if strings.HasSuffix(backupFile, ".meta.json") ||
strings.HasSuffix(backupFile, ".sha256") ||
strings.HasSuffix(backupFile, ".info") {
continue
}
@@ -172,7 +173,7 @@ func verifyCloudBackup(ctx context.Context, uri string, quick, verbose bool) (*r
if err != nil {
return nil, err
}
// If not quick mode, also run full verification
if !quick {
_, err := verification.Verify(result.LocalPath)
@@ -181,25 +182,25 @@ func verifyCloudBackup(ctx context.Context, uri string, quick, verbose bool) (*r
return nil, err
}
}
return result, nil
}
// runVerifyCloudBackup verifies backups from cloud storage
func runVerifyCloudBackup(cmd *cobra.Command, args []string) error {
fmt.Printf("Verifying cloud backup(s)...\n\n")
successCount := 0
failureCount := 0
for _, uri := range args {
if !isCloudURI(uri) {
fmt.Printf("⚠️ Skipping non-cloud URI: %s\n", uri)
continue
}
fmt.Printf("☁️ %s\n", uri)
// Download and verify
result, err := verifyCloudBackup(cmd.Context(), uri, quickVerify, verboseVerify)
if err != nil {
@@ -207,10 +208,10 @@ func runVerifyCloudBackup(cmd *cobra.Command, args []string) error {
failureCount++
continue
}
// Cleanup temp file
defer result.Cleanup()
fmt.Printf(" ✅ VALID\n")
if verboseVerify && result.MetadataPath != "" {
meta, _ := metadata.Load(result.MetadataPath)
@@ -224,12 +225,12 @@ func runVerifyCloudBackup(cmd *cobra.Command, args []string) error {
fmt.Println()
successCount++
}
fmt.Printf("\n✅ Summary: %d valid, %d failed\n", successCount, failureCount)
if failureCount > 0 {
os.Exit(1)
}
return nil
}