All checks were successful
CI/CD / Test (push) Successful in 2m55s
CI/CD / Lint (push) Successful in 1m12s
CI/CD / Integration Tests (push) Successful in 50s
CI/CD / Native Engine Tests (push) Successful in 51s
CI/CD / Build Binary (push) Successful in 45s
CI/CD / Test Release Build (push) Successful in 1m20s
CI/CD / Release Binaries (push) Successful in 10m27s
Features: - Performance analysis package with 2GB/s+ throughput benchmarks - Comprehensive test coverage improvements (exitcode, errors, metadata 100%) - Grafana dashboard updates - Structured error types with codes and remediation guidance Testing: - Added exitcode tests (100% coverage) - Added errors package tests (100% coverage) - Added metadata tests (92.2% coverage) - Improved fs tests (20.9% coverage) - Improved checks tests (20.3% coverage) Performance: - 2,048 MB/s dump throughput (4x target) - 1,673 MB/s restore throughput (5.6x target) - Buffer pooling for bounded memory usage
739 lines
22 KiB
Go
Executable File
739 lines
22 KiB
Go
Executable File
package cmd
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
"time"
|
|
|
|
"dbbackup/internal/backup"
|
|
"dbbackup/internal/checks"
|
|
"dbbackup/internal/config"
|
|
"dbbackup/internal/database"
|
|
"dbbackup/internal/notify"
|
|
"dbbackup/internal/security"
|
|
"dbbackup/internal/validation"
|
|
)
|
|
|
|
// runClusterBackup performs a full cluster backup
|
|
func runClusterBackup(ctx context.Context) error {
|
|
if !cfg.IsPostgreSQL() {
|
|
return fmt.Errorf("cluster backup requires PostgreSQL (detected: %s). Use 'backup single' for individual database backups", cfg.DisplayDatabaseType())
|
|
}
|
|
|
|
// Update config from environment
|
|
cfg.UpdateFromEnvironment()
|
|
|
|
// Validate configuration
|
|
if err := cfg.Validate(); err != nil {
|
|
return fmt.Errorf("configuration error: %w", err)
|
|
}
|
|
|
|
// Validate input parameters with comprehensive security checks
|
|
if err := validateBackupParams(cfg); err != nil {
|
|
return fmt.Errorf("validation error: %w", err)
|
|
}
|
|
|
|
// Handle dry-run mode
|
|
if backupDryRun {
|
|
return runBackupPreflight(ctx, "")
|
|
}
|
|
|
|
// Check privileges
|
|
privChecker := security.NewPrivilegeChecker(log)
|
|
if err := privChecker.CheckAndWarn(cfg.AllowRoot); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Check resource limits
|
|
if cfg.CheckResources {
|
|
resChecker := security.NewResourceChecker(log)
|
|
if _, err := resChecker.CheckResourceLimits(); err != nil {
|
|
log.Warn("Failed to check resource limits", "error", err)
|
|
}
|
|
}
|
|
|
|
log.Info("Starting cluster backup",
|
|
"host", cfg.Host,
|
|
"port", cfg.Port,
|
|
"backup_dir", cfg.BackupDir)
|
|
|
|
// Audit log: backup start
|
|
user := security.GetCurrentUser()
|
|
auditLogger.LogBackupStart(user, "all_databases", "cluster")
|
|
|
|
// Track start time for notifications
|
|
backupStartTime := time.Now()
|
|
|
|
// Notify: backup started
|
|
if notifyManager != nil {
|
|
notifyManager.Notify(notify.NewEvent(notify.EventBackupStarted, notify.SeverityInfo, "Cluster backup started").
|
|
WithDatabase("all_databases").
|
|
WithDetail("host", cfg.Host).
|
|
WithDetail("backup_dir", cfg.BackupDir))
|
|
}
|
|
|
|
// Rate limit connection attempts
|
|
host := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port)
|
|
if err := rateLimiter.CheckAndWait(host); err != nil {
|
|
auditLogger.LogBackupFailed(user, "all_databases", err)
|
|
return fmt.Errorf("rate limit exceeded for %s. Too many connection attempts. Wait 60s or check credentials: %w", host, err)
|
|
}
|
|
|
|
// Create database instance
|
|
db, err := database.New(cfg, log)
|
|
if err != nil {
|
|
auditLogger.LogBackupFailed(user, "all_databases", err)
|
|
return fmt.Errorf("failed to create database instance: %w", err)
|
|
}
|
|
defer db.Close()
|
|
|
|
// Connect to database
|
|
if err := db.Connect(ctx); err != nil {
|
|
rateLimiter.RecordFailure(host)
|
|
auditLogger.LogBackupFailed(user, "all_databases", err)
|
|
return fmt.Errorf("failed to connect to %s@%s:%d. Check: 1) Database is running 2) Credentials are correct 3) pg_hba.conf allows connection: %w", cfg.User, cfg.Host, cfg.Port, err)
|
|
}
|
|
rateLimiter.RecordSuccess(host)
|
|
|
|
// Create backup engine
|
|
engine := backup.New(cfg, log, db)
|
|
|
|
// Perform cluster backup
|
|
if err := engine.BackupCluster(ctx); err != nil {
|
|
auditLogger.LogBackupFailed(user, "all_databases", err)
|
|
// Notify: backup failed
|
|
if notifyManager != nil {
|
|
notifyManager.Notify(notify.NewEvent(notify.EventBackupFailed, notify.SeverityError, "Cluster backup failed").
|
|
WithDatabase("all_databases").
|
|
WithError(err).
|
|
WithDuration(time.Since(backupStartTime)))
|
|
}
|
|
return err
|
|
}
|
|
|
|
// Apply encryption if requested
|
|
if isEncryptionEnabled() {
|
|
if err := encryptLatestClusterBackup(); err != nil {
|
|
log.Error("Failed to encrypt backup", "error", err)
|
|
// Notify: encryption failed
|
|
if notifyManager != nil {
|
|
notifyManager.Notify(notify.NewEvent(notify.EventBackupFailed, notify.SeverityError, "Backup encryption failed").
|
|
WithDatabase("all_databases").
|
|
WithError(err).
|
|
WithDuration(time.Since(backupStartTime)))
|
|
}
|
|
return fmt.Errorf("backup completed successfully but encryption failed. Unencrypted backup remains in %s: %w", cfg.BackupDir, err)
|
|
}
|
|
log.Info("Cluster backup encrypted successfully")
|
|
}
|
|
|
|
// Audit log: backup success
|
|
auditLogger.LogBackupComplete(user, "all_databases", cfg.BackupDir, 0)
|
|
|
|
// Notify: backup completed
|
|
if notifyManager != nil {
|
|
notifyManager.Notify(notify.NewEvent(notify.EventBackupCompleted, notify.SeveritySuccess, "Cluster backup completed successfully").
|
|
WithDatabase("all_databases").
|
|
WithDuration(time.Since(backupStartTime)).
|
|
WithDetail("backup_dir", cfg.BackupDir))
|
|
}
|
|
|
|
// Cleanup old backups if retention policy is enabled
|
|
if cfg.RetentionDays > 0 {
|
|
retentionPolicy := security.NewRetentionPolicy(cfg.RetentionDays, cfg.MinBackups, log)
|
|
if deleted, freed, err := retentionPolicy.CleanupOldBackups(cfg.BackupDir); err != nil {
|
|
log.Warn("Failed to cleanup old backups", "error", err)
|
|
} else if deleted > 0 {
|
|
log.Info("Cleaned up old backups", "deleted", deleted, "freed_mb", freed/1024/1024)
|
|
}
|
|
}
|
|
|
|
// Save configuration for future use (unless disabled)
|
|
if !cfg.NoSaveConfig {
|
|
localCfg := config.ConfigFromConfig(cfg)
|
|
if err := config.SaveLocalConfig(localCfg); err != nil {
|
|
log.Warn("Failed to save configuration", "error", err)
|
|
} else {
|
|
log.Info("Configuration saved to .dbbackup.conf")
|
|
auditLogger.LogConfigChange(user, "config_file", "", ".dbbackup.conf")
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// runSingleBackup performs a single database backup
|
|
func runSingleBackup(ctx context.Context, databaseName string) error {
|
|
// Update config from environment
|
|
cfg.UpdateFromEnvironment()
|
|
|
|
// IMPORTANT: Set the database name from positional argument
|
|
// This overrides the default 'postgres' when using MySQL
|
|
cfg.Database = databaseName
|
|
|
|
// Validate configuration
|
|
if err := cfg.Validate(); err != nil {
|
|
return fmt.Errorf("configuration error: %w", err)
|
|
}
|
|
|
|
// Validate input parameters with comprehensive security checks
|
|
if err := validateBackupParams(cfg); err != nil {
|
|
return fmt.Errorf("validation error: %w", err)
|
|
}
|
|
|
|
// Handle dry-run mode
|
|
if backupDryRun {
|
|
return runBackupPreflight(ctx, databaseName)
|
|
}
|
|
|
|
// Get backup type and base backup from command line flags
|
|
backupType := backupTypeFlag
|
|
baseBackup := baseBackupFlag
|
|
|
|
// Validate backup type
|
|
if backupType != "full" && backupType != "incremental" {
|
|
return fmt.Errorf("invalid backup type: %s (must be 'full' or 'incremental')", backupType)
|
|
}
|
|
|
|
// Validate incremental backup requirements
|
|
if backupType == "incremental" {
|
|
if !cfg.IsPostgreSQL() && !cfg.IsMySQL() {
|
|
return fmt.Errorf("incremental backups require PostgreSQL or MySQL/MariaDB (detected: %s). Use --backup-type=full for other databases", cfg.DisplayDatabaseType())
|
|
}
|
|
if baseBackup == "" {
|
|
return fmt.Errorf("incremental backup requires --base-backup flag pointing to initial full backup archive")
|
|
}
|
|
// Verify base backup exists
|
|
if _, err := os.Stat(baseBackup); os.IsNotExist(err) {
|
|
return fmt.Errorf("base backup file not found at %s. Ensure path is correct and file exists", baseBackup)
|
|
}
|
|
}
|
|
|
|
// Check privileges
|
|
privChecker := security.NewPrivilegeChecker(log)
|
|
if err := privChecker.CheckAndWarn(cfg.AllowRoot); err != nil {
|
|
return err
|
|
}
|
|
|
|
log.Info("Starting single database backup",
|
|
"database", databaseName,
|
|
"db_type", cfg.DatabaseType,
|
|
"backup_type", backupType,
|
|
"host", cfg.Host,
|
|
"port", cfg.Port,
|
|
"backup_dir", cfg.BackupDir)
|
|
|
|
if backupType == "incremental" {
|
|
log.Info("Incremental backup", "base_backup", baseBackup)
|
|
}
|
|
|
|
// Audit log: backup start
|
|
user := security.GetCurrentUser()
|
|
auditLogger.LogBackupStart(user, databaseName, "single")
|
|
|
|
// Track start time for notifications
|
|
backupStartTime := time.Now()
|
|
|
|
// Notify: backup started
|
|
if notifyManager != nil {
|
|
notifyManager.Notify(notify.NewEvent(notify.EventBackupStarted, notify.SeverityInfo, "Database backup started").
|
|
WithDatabase(databaseName).
|
|
WithDetail("host", cfg.Host).
|
|
WithDetail("backup_type", backupType))
|
|
}
|
|
|
|
// Rate limit connection attempts
|
|
host := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port)
|
|
if err := rateLimiter.CheckAndWait(host); err != nil {
|
|
auditLogger.LogBackupFailed(user, databaseName, err)
|
|
return fmt.Errorf("rate limit exceeded: %w", err)
|
|
}
|
|
|
|
// Create database instance
|
|
db, err := database.New(cfg, log)
|
|
if err != nil {
|
|
auditLogger.LogBackupFailed(user, databaseName, err)
|
|
return fmt.Errorf("failed to create database instance: %w", err)
|
|
}
|
|
defer db.Close()
|
|
|
|
// Connect to database
|
|
if err := db.Connect(ctx); err != nil {
|
|
rateLimiter.RecordFailure(host)
|
|
auditLogger.LogBackupFailed(user, databaseName, err)
|
|
return fmt.Errorf("failed to connect to database: %w", err)
|
|
}
|
|
rateLimiter.RecordSuccess(host)
|
|
|
|
// Verify database exists
|
|
exists, err := db.DatabaseExists(ctx, databaseName)
|
|
if err != nil {
|
|
auditLogger.LogBackupFailed(user, databaseName, err)
|
|
return fmt.Errorf("failed to check if database exists: %w", err)
|
|
}
|
|
if !exists {
|
|
err := fmt.Errorf("database '%s' does not exist", databaseName)
|
|
auditLogger.LogBackupFailed(user, databaseName, err)
|
|
return err
|
|
}
|
|
|
|
// Check if native engine should be used
|
|
if cfg.UseNativeEngine {
|
|
log.Info("Using native engine for backup", "database", databaseName)
|
|
err = runNativeBackup(ctx, db, databaseName, backupType, baseBackup, backupStartTime, user)
|
|
|
|
if err != nil && cfg.FallbackToTools {
|
|
log.Warn("Native engine failed, falling back to external tools", "error", err)
|
|
// Continue with tool-based backup below
|
|
} else {
|
|
// Native engine succeeded or no fallback configured
|
|
return err // Return success (nil) or failure
|
|
}
|
|
}
|
|
|
|
// Create backup engine (tool-based)
|
|
engine := backup.New(cfg, log, db)
|
|
|
|
// Perform backup based on type
|
|
var backupErr error
|
|
if backupType == "incremental" {
|
|
// Incremental backup - supported for PostgreSQL and MySQL
|
|
log.Info("Creating incremental backup", "base_backup", baseBackup)
|
|
|
|
// Create appropriate incremental engine based on database type
|
|
var incrEngine interface {
|
|
FindChangedFiles(context.Context, *backup.IncrementalBackupConfig) ([]backup.ChangedFile, error)
|
|
CreateIncrementalBackup(context.Context, *backup.IncrementalBackupConfig, []backup.ChangedFile) error
|
|
}
|
|
|
|
if cfg.IsPostgreSQL() {
|
|
incrEngine = backup.NewPostgresIncrementalEngine(log)
|
|
} else {
|
|
incrEngine = backup.NewMySQLIncrementalEngine(log)
|
|
}
|
|
|
|
// Configure incremental backup
|
|
incrConfig := &backup.IncrementalBackupConfig{
|
|
BaseBackupPath: baseBackup,
|
|
DataDirectory: cfg.BackupDir, // Note: This should be the actual data directory
|
|
CompressionLevel: cfg.CompressionLevel,
|
|
}
|
|
|
|
// Find changed files
|
|
changedFiles, err := incrEngine.FindChangedFiles(ctx, incrConfig)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to find changed files: %w", err)
|
|
}
|
|
|
|
// Create incremental backup
|
|
if err := incrEngine.CreateIncrementalBackup(ctx, incrConfig, changedFiles); err != nil {
|
|
return fmt.Errorf("failed to create incremental backup: %w", err)
|
|
}
|
|
|
|
log.Info("Incremental backup completed", "changed_files", len(changedFiles))
|
|
} else {
|
|
// Full backup
|
|
backupErr = engine.BackupSingle(ctx, databaseName)
|
|
}
|
|
|
|
if backupErr != nil {
|
|
auditLogger.LogBackupFailed(user, databaseName, backupErr)
|
|
// Notify: backup failed
|
|
if notifyManager != nil {
|
|
notifyManager.Notify(notify.NewEvent(notify.EventBackupFailed, notify.SeverityError, "Database backup failed").
|
|
WithDatabase(databaseName).
|
|
WithError(backupErr).
|
|
WithDuration(time.Since(backupStartTime)))
|
|
}
|
|
return backupErr
|
|
}
|
|
|
|
// Apply encryption if requested
|
|
if isEncryptionEnabled() {
|
|
if err := encryptLatestBackup(databaseName); err != nil {
|
|
log.Error("Failed to encrypt backup", "error", err)
|
|
// Notify: encryption failed
|
|
if notifyManager != nil {
|
|
notifyManager.Notify(notify.NewEvent(notify.EventBackupFailed, notify.SeverityError, "Backup encryption failed").
|
|
WithDatabase(databaseName).
|
|
WithError(err).
|
|
WithDuration(time.Since(backupStartTime)))
|
|
}
|
|
return fmt.Errorf("backup succeeded but encryption failed: %w", err)
|
|
}
|
|
log.Info("Backup encrypted successfully")
|
|
}
|
|
|
|
// Audit log: backup success
|
|
auditLogger.LogBackupComplete(user, databaseName, cfg.BackupDir, 0)
|
|
|
|
// Notify: backup completed
|
|
if notifyManager != nil {
|
|
notifyManager.Notify(notify.NewEvent(notify.EventBackupCompleted, notify.SeveritySuccess, "Database backup completed successfully").
|
|
WithDatabase(databaseName).
|
|
WithDuration(time.Since(backupStartTime)).
|
|
WithDetail("backup_dir", cfg.BackupDir).
|
|
WithDetail("backup_type", backupType))
|
|
}
|
|
|
|
// Cleanup old backups if retention policy is enabled
|
|
if cfg.RetentionDays > 0 {
|
|
retentionPolicy := security.NewRetentionPolicy(cfg.RetentionDays, cfg.MinBackups, log)
|
|
if deleted, freed, err := retentionPolicy.CleanupOldBackups(cfg.BackupDir); err != nil {
|
|
log.Warn("Failed to cleanup old backups", "error", err)
|
|
} else if deleted > 0 {
|
|
log.Info("Cleaned up old backups", "deleted", deleted, "freed_mb", freed/1024/1024)
|
|
}
|
|
}
|
|
|
|
// Save configuration for future use (unless disabled)
|
|
if !cfg.NoSaveConfig {
|
|
localCfg := config.ConfigFromConfig(cfg)
|
|
if err := config.SaveLocalConfig(localCfg); err != nil {
|
|
log.Warn("Failed to save configuration", "error", err)
|
|
} else {
|
|
log.Info("Configuration saved to .dbbackup.conf")
|
|
auditLogger.LogConfigChange(user, "config_file", "", ".dbbackup.conf")
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// runSampleBackup performs a sample database backup
|
|
func runSampleBackup(ctx context.Context, databaseName string) error {
|
|
// Update config from environment
|
|
cfg.UpdateFromEnvironment()
|
|
|
|
// IMPORTANT: Set the database name from positional argument
|
|
cfg.Database = databaseName
|
|
|
|
// Validate configuration
|
|
if err := cfg.Validate(); err != nil {
|
|
return fmt.Errorf("configuration error: %w", err)
|
|
}
|
|
|
|
// Validate input parameters with comprehensive security checks
|
|
if err := validateBackupParams(cfg); err != nil {
|
|
return fmt.Errorf("validation error: %w", err)
|
|
}
|
|
|
|
// Handle dry-run mode
|
|
if backupDryRun {
|
|
return runBackupPreflight(ctx, databaseName)
|
|
}
|
|
|
|
// Check privileges
|
|
privChecker := security.NewPrivilegeChecker(log)
|
|
if err := privChecker.CheckAndWarn(cfg.AllowRoot); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Validate sample parameters
|
|
if cfg.SampleValue <= 0 {
|
|
return fmt.Errorf("sample value must be greater than 0")
|
|
}
|
|
|
|
switch cfg.SampleStrategy {
|
|
case "percent":
|
|
if cfg.SampleValue > 100 {
|
|
return fmt.Errorf("percentage cannot exceed 100")
|
|
}
|
|
case "ratio":
|
|
if cfg.SampleValue < 2 {
|
|
return fmt.Errorf("ratio must be at least 2")
|
|
}
|
|
case "count":
|
|
// Any positive count is valid
|
|
default:
|
|
return fmt.Errorf("invalid sampling strategy: %s (must be ratio, percent, or count)", cfg.SampleStrategy)
|
|
}
|
|
|
|
log.Info("Starting sample database backup",
|
|
"database", databaseName,
|
|
"db_type", cfg.DatabaseType,
|
|
"strategy", cfg.SampleStrategy,
|
|
"value", cfg.SampleValue,
|
|
"host", cfg.Host,
|
|
"port", cfg.Port,
|
|
"backup_dir", cfg.BackupDir)
|
|
|
|
// Audit log: backup start
|
|
user := security.GetCurrentUser()
|
|
auditLogger.LogBackupStart(user, databaseName, "sample")
|
|
|
|
// Rate limit connection attempts
|
|
host := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port)
|
|
if err := rateLimiter.CheckAndWait(host); err != nil {
|
|
auditLogger.LogBackupFailed(user, databaseName, err)
|
|
return fmt.Errorf("rate limit exceeded: %w", err)
|
|
}
|
|
|
|
// Create database instance
|
|
db, err := database.New(cfg, log)
|
|
if err != nil {
|
|
auditLogger.LogBackupFailed(user, databaseName, err)
|
|
return fmt.Errorf("failed to create database instance: %w", err)
|
|
}
|
|
defer db.Close()
|
|
|
|
// Connect to database
|
|
if err := db.Connect(ctx); err != nil {
|
|
rateLimiter.RecordFailure(host)
|
|
auditLogger.LogBackupFailed(user, databaseName, err)
|
|
return fmt.Errorf("failed to connect to database: %w", err)
|
|
}
|
|
rateLimiter.RecordSuccess(host)
|
|
|
|
// Verify database exists
|
|
exists, err := db.DatabaseExists(ctx, databaseName)
|
|
if err != nil {
|
|
auditLogger.LogBackupFailed(user, databaseName, err)
|
|
return fmt.Errorf("failed to check if database exists: %w", err)
|
|
}
|
|
if !exists {
|
|
err := fmt.Errorf("database '%s' does not exist", databaseName)
|
|
auditLogger.LogBackupFailed(user, databaseName, err)
|
|
return err
|
|
}
|
|
|
|
// Create backup engine
|
|
engine := backup.New(cfg, log, db)
|
|
|
|
// Perform sample backup
|
|
if err := engine.BackupSample(ctx, databaseName); err != nil {
|
|
auditLogger.LogBackupFailed(user, databaseName, err)
|
|
return err
|
|
}
|
|
|
|
// Apply encryption if requested
|
|
if isEncryptionEnabled() {
|
|
if err := encryptLatestBackup(databaseName); err != nil {
|
|
log.Error("Failed to encrypt backup", "error", err)
|
|
return fmt.Errorf("backup succeeded but encryption failed: %w", err)
|
|
}
|
|
log.Info("Sample backup encrypted successfully")
|
|
}
|
|
|
|
// Audit log: backup success
|
|
auditLogger.LogBackupComplete(user, databaseName, cfg.BackupDir, 0)
|
|
|
|
// Save configuration for future use (unless disabled)
|
|
if !cfg.NoSaveConfig {
|
|
localCfg := config.ConfigFromConfig(cfg)
|
|
if err := config.SaveLocalConfig(localCfg); err != nil {
|
|
log.Warn("Failed to save configuration", "error", err)
|
|
} else {
|
|
log.Info("Configuration saved to .dbbackup.conf")
|
|
auditLogger.LogConfigChange(user, "config_file", "", ".dbbackup.conf")
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// encryptLatestBackup finds and encrypts the most recent backup for a database
|
|
func encryptLatestBackup(databaseName string) error {
|
|
// Load encryption key
|
|
key, err := loadEncryptionKey(encryptionKeyFile, encryptionKeyEnv)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Find most recent backup file for this database
|
|
backupPath, err := findLatestBackup(cfg.BackupDir, databaseName)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Encrypt the backup
|
|
return backup.EncryptBackupFile(backupPath, key, log)
|
|
}
|
|
|
|
// encryptLatestClusterBackup finds and encrypts the most recent cluster backup
|
|
func encryptLatestClusterBackup() error {
|
|
// Load encryption key
|
|
key, err := loadEncryptionKey(encryptionKeyFile, encryptionKeyEnv)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Find most recent cluster backup
|
|
backupPath, err := findLatestClusterBackup(cfg.BackupDir)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Encrypt the backup
|
|
return backup.EncryptBackupFile(backupPath, key, log)
|
|
}
|
|
|
|
// findLatestBackup finds the most recently created backup file for a database
|
|
func findLatestBackup(backupDir, databaseName string) (string, error) {
|
|
entries, err := os.ReadDir(backupDir)
|
|
if err != nil {
|
|
return "", fmt.Errorf("failed to read backup directory: %w", err)
|
|
}
|
|
|
|
var latestPath string
|
|
var latestTime time.Time
|
|
|
|
prefix := "db_" + databaseName + "_"
|
|
for _, entry := range entries {
|
|
if entry.IsDir() {
|
|
continue
|
|
}
|
|
|
|
name := entry.Name()
|
|
// Skip metadata files and already encrypted files
|
|
if strings.HasSuffix(name, ".meta.json") || strings.HasSuffix(name, ".encrypted") {
|
|
continue
|
|
}
|
|
|
|
// Match database backup files
|
|
if strings.HasPrefix(name, prefix) && (strings.HasSuffix(name, ".dump") ||
|
|
strings.HasSuffix(name, ".dump.gz") || strings.HasSuffix(name, ".sql.gz")) {
|
|
info, err := entry.Info()
|
|
if err != nil {
|
|
continue
|
|
}
|
|
|
|
if info.ModTime().After(latestTime) {
|
|
latestTime = info.ModTime()
|
|
latestPath = filepath.Join(backupDir, name)
|
|
}
|
|
}
|
|
}
|
|
|
|
if latestPath == "" {
|
|
return "", fmt.Errorf("no backup found for database: %s", databaseName)
|
|
}
|
|
|
|
return latestPath, nil
|
|
}
|
|
|
|
// findLatestClusterBackup finds the most recently created cluster backup
|
|
func findLatestClusterBackup(backupDir string) (string, error) {
|
|
entries, err := os.ReadDir(backupDir)
|
|
if err != nil {
|
|
return "", fmt.Errorf("failed to read backup directory: %w", err)
|
|
}
|
|
|
|
var latestPath string
|
|
var latestTime time.Time
|
|
|
|
for _, entry := range entries {
|
|
if entry.IsDir() {
|
|
continue
|
|
}
|
|
|
|
name := entry.Name()
|
|
// Skip metadata files and already encrypted files
|
|
if strings.HasSuffix(name, ".meta.json") || strings.HasSuffix(name, ".encrypted") {
|
|
continue
|
|
}
|
|
|
|
// Match cluster backup files
|
|
if strings.HasPrefix(name, "cluster_") && strings.HasSuffix(name, ".tar.gz") {
|
|
info, err := entry.Info()
|
|
if err != nil {
|
|
continue
|
|
}
|
|
|
|
if info.ModTime().After(latestTime) {
|
|
latestTime = info.ModTime()
|
|
latestPath = filepath.Join(backupDir, name)
|
|
}
|
|
}
|
|
}
|
|
|
|
if latestPath == "" {
|
|
return "", fmt.Errorf("no cluster backup found")
|
|
}
|
|
|
|
return latestPath, nil
|
|
}
|
|
|
|
// runBackupPreflight runs preflight checks without executing backup
|
|
func runBackupPreflight(ctx context.Context, databaseName string) error {
|
|
checker := checks.NewPreflightChecker(cfg, log)
|
|
defer checker.Close()
|
|
|
|
result, err := checker.RunAllChecks(ctx, databaseName)
|
|
if err != nil {
|
|
return fmt.Errorf("preflight check error: %w", err)
|
|
}
|
|
|
|
// Format and print report
|
|
report := checks.FormatPreflightReport(result, databaseName, true)
|
|
fmt.Print(report)
|
|
|
|
// Return appropriate exit code
|
|
if !result.AllPassed {
|
|
return fmt.Errorf("preflight checks failed")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// validateBackupParams performs comprehensive input validation for backup parameters
|
|
func validateBackupParams(cfg *config.Config) error {
|
|
var errs []string
|
|
|
|
// Validate backup directory
|
|
if cfg.BackupDir != "" {
|
|
if err := validation.ValidateBackupDir(cfg.BackupDir); err != nil {
|
|
errs = append(errs, fmt.Sprintf("backup directory: %s", err))
|
|
}
|
|
}
|
|
|
|
// Validate job count
|
|
if cfg.Jobs > 0 {
|
|
if err := validation.ValidateJobs(cfg.Jobs); err != nil {
|
|
errs = append(errs, fmt.Sprintf("jobs: %s", err))
|
|
}
|
|
}
|
|
|
|
// Validate database name
|
|
if cfg.Database != "" {
|
|
if err := validation.ValidateDatabaseName(cfg.Database, cfg.DatabaseType); err != nil {
|
|
errs = append(errs, fmt.Sprintf("database name: %s", err))
|
|
}
|
|
}
|
|
|
|
// Validate host
|
|
if cfg.Host != "" {
|
|
if err := validation.ValidateHost(cfg.Host); err != nil {
|
|
errs = append(errs, fmt.Sprintf("host: %s", err))
|
|
}
|
|
}
|
|
|
|
// Validate port
|
|
if cfg.Port > 0 {
|
|
if err := validation.ValidatePort(cfg.Port); err != nil {
|
|
errs = append(errs, fmt.Sprintf("port: %s", err))
|
|
}
|
|
}
|
|
|
|
// Validate retention days
|
|
if cfg.RetentionDays > 0 {
|
|
if err := validation.ValidateRetentionDays(cfg.RetentionDays); err != nil {
|
|
errs = append(errs, fmt.Sprintf("retention days: %s", err))
|
|
}
|
|
}
|
|
|
|
// Validate compression level
|
|
if err := validation.ValidateCompressionLevel(cfg.CompressionLevel); err != nil {
|
|
errs = append(errs, fmt.Sprintf("compression level: %s", err))
|
|
}
|
|
|
|
if len(errs) > 0 {
|
|
return fmt.Errorf("validation failed: %s", strings.Join(errs, "; "))
|
|
}
|
|
|
|
return nil
|
|
}
|