diff --git a/cmd/backup_impl.go b/cmd/backup_impl.go index 9abb287..e3aa5ef 100644 --- a/cmd/backup_impl.go +++ b/cmd/backup_impl.go @@ -5,6 +5,7 @@ import ( "fmt" "dbbackup/internal/backup" + "dbbackup/internal/config" "dbbackup/internal/database" ) @@ -43,7 +44,21 @@ func runClusterBackup(ctx context.Context) error { engine := backup.New(cfg, log, db) // Perform cluster backup - return engine.BackupCluster(ctx) + if err := engine.BackupCluster(ctx); err != nil { + return err + } + + // Save configuration for future use (unless disabled) + if !cfg.NoSaveConfig { + localCfg := config.ConfigFromConfig(cfg) + if err := config.SaveLocalConfig(localCfg); err != nil { + log.Warn("Failed to save configuration", "error", err) + } else { + log.Info("Configuration saved to .dbbackup.conf") + } + } + + return nil } // runSingleBackup performs a single database backup @@ -88,7 +103,21 @@ func runSingleBackup(ctx context.Context, databaseName string) error { engine := backup.New(cfg, log, db) // Perform single database backup - return engine.BackupSingle(ctx, databaseName) + if err := engine.BackupSingle(ctx, databaseName); err != nil { + return err + } + + // Save configuration for future use (unless disabled) + if !cfg.NoSaveConfig { + localCfg := config.ConfigFromConfig(cfg) + if err := config.SaveLocalConfig(localCfg); err != nil { + log.Warn("Failed to save configuration", "error", err) + } else { + log.Info("Configuration saved to .dbbackup.conf") + } + } + + return nil } // runSampleBackup performs a sample database backup @@ -154,6 +183,20 @@ func runSampleBackup(ctx context.Context, databaseName string) error { // Create backup engine engine := backup.New(cfg, log, db) - // Perform sample database backup - return engine.BackupSample(ctx, databaseName) + // Perform sample backup + if err := engine.BackupSample(ctx, databaseName); err != nil { + return err + } + + // Save configuration for future use (unless disabled) + if !cfg.NoSaveConfig { + localCfg := config.ConfigFromConfig(cfg) + if err := config.SaveLocalConfig(localCfg); err != nil { + log.Warn("Failed to save configuration", "error", err) + } else { + log.Info("Configuration saved to .dbbackup.conf") + } + } + + return nil } \ No newline at end of file diff --git a/cmd/root.go b/cmd/root.go index 67466fb..dd74417 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -38,6 +38,17 @@ For help with specific commands, use: dbbackup [command] --help`, if cfg == nil { return nil } + + // Load local config if not disabled + if !cfg.NoLoadConfig { + if localCfg, err := config.LoadLocalConfig(); err != nil { + log.Warn("Failed to load local config", "error", err) + } else if localCfg != nil { + config.ApplyLocalConfig(cfg, localCfg) + log.Info("Loaded configuration from .dbbackup.conf") + } + } + return cfg.SetDatabaseType(cfg.DatabaseType) }, } @@ -69,6 +80,8 @@ func Execute(ctx context.Context, config *config.Config, logger logger.Logger) e rootCmd.PersistentFlags().StringVar(&cfg.SSLMode, "ssl-mode", cfg.SSLMode, "SSL mode for connections") rootCmd.PersistentFlags().BoolVar(&cfg.Insecure, "insecure", cfg.Insecure, "Disable SSL (shortcut for --ssl-mode=disable)") rootCmd.PersistentFlags().IntVar(&cfg.CompressionLevel, "compression", cfg.CompressionLevel, "Compression level (0-9)") + rootCmd.PersistentFlags().BoolVar(&cfg.NoSaveConfig, "no-save-config", false, "Don't save configuration after successful operations") + rootCmd.PersistentFlags().BoolVar(&cfg.NoLoadConfig, "no-config", false, "Don't load configuration from .dbbackup.conf") return rootCmd.ExecuteContext(ctx) } diff --git a/internal/backup/engine.go b/internal/backup/engine.go index 217e6ca..23d44a7 100644 --- a/internal/backup/engine.go +++ b/internal/backup/engine.go @@ -20,6 +20,7 @@ import ( "dbbackup/internal/config" "dbbackup/internal/database" "dbbackup/internal/logger" + "dbbackup/internal/metrics" "dbbackup/internal/progress" "dbbackup/internal/swap" ) @@ -202,6 +203,11 @@ func (e *Engine) BackupSingle(ctx context.Context, databaseName string) error { metaStep.Complete("Metadata file created") } + // Record metrics for observability + if info, err := os.Stat(outputFile); err == nil && metrics.GlobalMetrics != nil { + metrics.GlobalMetrics.RecordOperation("backup_single", databaseName, time.Now().Add(-time.Minute), info.Size(), true, 0) + } + // Complete operation tracker.UpdateProgress(100, "Backup operation completed successfully") tracker.Complete(fmt.Sprintf("Single database backup completed: %s", filepath.Base(outputFile))) @@ -304,9 +310,9 @@ func (e *Engine) BackupCluster(ctx context.Context) error { return fmt.Errorf("failed to create backup directory: %w", err) } - // Check disk space before starting backup + // Check disk space before starting backup (cached for performance) e.log.Info("Checking disk space availability") - spaceCheck := checks.CheckDiskSpace(e.cfg.BackupDir) + spaceCheck := checks.CheckDiskSpaceCached(e.cfg.BackupDir) if !e.silent { // Show disk space status in CLI mode diff --git a/internal/checks/cache.go b/internal/checks/cache.go new file mode 100644 index 0000000..1c697f9 --- /dev/null +++ b/internal/checks/cache.go @@ -0,0 +1,83 @@ +package checks + +import ( + "sync" + "time" +) + +// cacheEntry holds cached disk space information with TTL +type cacheEntry struct { + check *DiskSpaceCheck + timestamp time.Time +} + +// DiskSpaceCache provides thread-safe caching of disk space checks with TTL +type DiskSpaceCache struct { + cache map[string]*cacheEntry + cacheTTL time.Duration + mu sync.RWMutex +} + +// NewDiskSpaceCache creates a new disk space cache with specified TTL +func NewDiskSpaceCache(ttl time.Duration) *DiskSpaceCache { + if ttl <= 0 { + ttl = 30 * time.Second // Default 30 second cache + } + + return &DiskSpaceCache{ + cache: make(map[string]*cacheEntry), + cacheTTL: ttl, + } +} + +// Get retrieves cached disk space check or performs new check if cache miss/expired +func (c *DiskSpaceCache) Get(path string) *DiskSpaceCheck { + c.mu.RLock() + if entry, exists := c.cache[path]; exists { + if time.Since(entry.timestamp) < c.cacheTTL { + c.mu.RUnlock() + return entry.check + } + } + c.mu.RUnlock() + + // Cache miss or expired - perform new check + check := CheckDiskSpace(path) + + c.mu.Lock() + c.cache[path] = &cacheEntry{ + check: check, + timestamp: time.Now(), + } + c.mu.Unlock() + + return check +} + +// Clear removes all cached entries +func (c *DiskSpaceCache) Clear() { + c.mu.Lock() + defer c.mu.Unlock() + c.cache = make(map[string]*cacheEntry) +} + +// Cleanup removes expired entries (call periodically) +func (c *DiskSpaceCache) Cleanup() { + c.mu.Lock() + defer c.mu.Unlock() + + now := time.Now() + for path, entry := range c.cache { + if now.Sub(entry.timestamp) >= c.cacheTTL { + delete(c.cache, path) + } + } +} + +// Global cache instance with 30-second TTL +var globalDiskCache = NewDiskSpaceCache(30 * time.Second) + +// CheckDiskSpaceCached performs cached disk space check +func CheckDiskSpaceCached(path string) *DiskSpaceCheck { + return globalDiskCache.Get(path) +} \ No newline at end of file diff --git a/internal/checks/error_hints.go b/internal/checks/error_hints.go index 6403eff..a7c021b 100644 --- a/internal/checks/error_hints.go +++ b/internal/checks/error_hints.go @@ -2,9 +2,21 @@ package checks import ( "fmt" + "regexp" "strings" ) +// Compiled regex patterns for robust error matching +var errorPatterns = map[string]*regexp.Regexp{ + "already_exists": regexp.MustCompile(`(?i)(already exists|duplicate key|unique constraint|relation.*exists)`), + "disk_full": regexp.MustCompile(`(?i)(no space left|disk.*full|write.*failed.*space|insufficient.*space)`), + "lock_exhaustion": regexp.MustCompile(`(?i)(max_locks_per_transaction|out of shared memory|lock.*exhausted|could not open large object)`), + "syntax_error": regexp.MustCompile(`(?i)syntax error at.*line \d+`), + "permission_denied": regexp.MustCompile(`(?i)(permission denied|must be owner|access denied)`), + "connection_failed": regexp.MustCompile(`(?i)(connection refused|could not connect|no pg_hba\.conf entry)`), + "version_mismatch": regexp.MustCompile(`(?i)(version mismatch|incompatible|unsupported version)`), +} + // ErrorClassification represents the severity and type of error type ErrorClassification struct { Type string // "ignorable", "warning", "critical", "fatal" @@ -15,11 +27,90 @@ type ErrorClassification struct { Severity int // 0=info, 1=warning, 2=error, 3=fatal } +// classifyErrorByPattern uses compiled regex patterns for robust error classification +func classifyErrorByPattern(msg string) string { + for category, pattern := range errorPatterns { + if pattern.MatchString(msg) { + return category + } + } + return "unknown" +} + // ClassifyError analyzes an error message and provides actionable hints func ClassifyError(errorMsg string) *ErrorClassification { + // Use regex pattern matching for robustness + patternMatch := classifyErrorByPattern(errorMsg) lowerMsg := strings.ToLower(errorMsg) - // Ignorable errors (objects already exist) + // Use pattern matching first, fall back to string matching + switch patternMatch { + case "already_exists": + return &ErrorClassification{ + Type: "ignorable", + Category: "duplicate", + Message: errorMsg, + Hint: "Object already exists in target database - this is normal during restore", + Action: "No action needed - restore will continue", + Severity: 0, + } + case "disk_full": + return &ErrorClassification{ + Type: "critical", + Category: "disk_space", + Message: errorMsg, + Hint: "Insufficient disk space to complete operation", + Action: "Free up disk space: rm old_backups/* or increase storage", + Severity: 3, + } + case "lock_exhaustion": + return &ErrorClassification{ + Type: "critical", + Category: "locks", + Message: errorMsg, + Hint: "Lock table exhausted - typically caused by large objects in parallel restore", + Action: "Increase max_locks_per_transaction in postgresql.conf to 512 or higher", + Severity: 2, + } + case "permission_denied": + return &ErrorClassification{ + Type: "critical", + Category: "permissions", + Message: errorMsg, + Hint: "Insufficient permissions to perform operation", + Action: "Run as superuser or use --no-owner flag for restore", + Severity: 2, + } + case "connection_failed": + return &ErrorClassification{ + Type: "critical", + Category: "network", + Message: errorMsg, + Hint: "Cannot connect to database server", + Action: "Check database is running and pg_hba.conf allows connection", + Severity: 2, + } + case "version_mismatch": + return &ErrorClassification{ + Type: "warning", + Category: "version", + Message: errorMsg, + Hint: "PostgreSQL version mismatch between backup and restore target", + Action: "Review release notes for compatibility: https://www.postgresql.org/docs/", + Severity: 1, + } + case "syntax_error": + return &ErrorClassification{ + Type: "critical", + Category: "corruption", + Message: errorMsg, + Hint: "Syntax error in dump file - backup may be corrupted or incomplete", + Action: "Re-create backup with: dbbackup backup single ", + Severity: 3, + } + } + + // Fallback to original string matching for backward compatibility if strings.Contains(lowerMsg, "already exists") { return &ErrorClassification{ Type: "ignorable", diff --git a/internal/cleanup/processes.go b/internal/cleanup/processes.go index eb58fe0..ca4a9d9 100644 --- a/internal/cleanup/processes.go +++ b/internal/cleanup/processes.go @@ -4,16 +4,81 @@ package cleanup import ( + "context" "fmt" "os" "os/exec" "strconv" "strings" + "sync" "syscall" "dbbackup/internal/logger" ) +// ProcessManager tracks and manages process lifecycle safely +type ProcessManager struct { + mu sync.RWMutex + processes map[int]*os.Process + ctx context.Context + cancel context.CancelFunc + log logger.Logger +} + +// NewProcessManager creates a new process manager +func NewProcessManager(log logger.Logger) *ProcessManager { + ctx, cancel := context.WithCancel(context.Background()) + return &ProcessManager{ + processes: make(map[int]*os.Process), + ctx: ctx, + cancel: cancel, + log: log, + } +} + +// Track adds a process to be managed +func (pm *ProcessManager) Track(proc *os.Process) { + pm.mu.Lock() + defer pm.mu.Unlock() + pm.processes[proc.Pid] = proc + + // Auto-cleanup when process exits + go func() { + proc.Wait() + pm.mu.Lock() + delete(pm.processes, proc.Pid) + pm.mu.Unlock() + }() +} + +// KillAll kills all tracked processes +func (pm *ProcessManager) KillAll() error { + pm.mu.RLock() + procs := make([]*os.Process, 0, len(pm.processes)) + for _, proc := range pm.processes { + procs = append(procs, proc) + } + pm.mu.RUnlock() + + var errors []error + for _, proc := range procs { + if err := proc.Kill(); err != nil { + errors = append(errors, err) + } + } + + if len(errors) > 0 { + return fmt.Errorf("failed to kill %d processes: %v", len(errors), errors) + } + return nil +} + +// Close cleans up the process manager +func (pm *ProcessManager) Close() error { + pm.cancel() + return pm.KillAll() +} + // KillOrphanedProcesses finds and kills any orphaned pg_dump, pg_restore, gzip, or pigz processes func KillOrphanedProcesses(log logger.Logger) error { processNames := []string{"pg_dump", "pg_restore", "gzip", "pigz", "gunzip"} diff --git a/internal/config/config.go b/internal/config/config.go index fdae46c..5f920fc 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -49,6 +49,10 @@ type Config struct { Debug bool LogLevel string LogFormat string + + // Config persistence + NoSaveConfig bool + NoLoadConfig bool OutputLength int // Single database backup/restore diff --git a/internal/config/persist.go b/internal/config/persist.go new file mode 100644 index 0000000..4ca68ae --- /dev/null +++ b/internal/config/persist.go @@ -0,0 +1,246 @@ +package config + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" +) + +const ConfigFileName = ".dbbackup.conf" + +// LocalConfig represents a saved configuration in the current directory +type LocalConfig struct { + // Database settings + DBType string + Host string + Port int + User string + Database string + SSLMode string + + // Backup settings + BackupDir string + Compression int + Jobs int + DumpJobs int + + // Performance settings + CPUWorkload string + MaxCores int +} + +// LoadLocalConfig loads configuration from .dbbackup.conf in current directory +func LoadLocalConfig() (*LocalConfig, error) { + configPath := filepath.Join(".", ConfigFileName) + + data, err := os.ReadFile(configPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil // No config file, not an error + } + return nil, fmt.Errorf("failed to read config file: %w", err) + } + + cfg := &LocalConfig{} + lines := strings.Split(string(data), "\n") + currentSection := "" + + for _, line := range lines { + line = strings.TrimSpace(line) + + // Skip empty lines and comments + if line == "" || strings.HasPrefix(line, "#") { + continue + } + + // Section headers + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.Trim(line, "[]") + continue + } + + // Key-value pairs + parts := strings.SplitN(line, "=", 2) + if len(parts) != 2 { + continue + } + + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + + switch currentSection { + case "database": + switch key { + case "type": + cfg.DBType = value + case "host": + cfg.Host = value + case "port": + if p, err := strconv.Atoi(value); err == nil { + cfg.Port = p + } + case "user": + cfg.User = value + case "database": + cfg.Database = value + case "ssl_mode": + cfg.SSLMode = value + } + case "backup": + switch key { + case "backup_dir": + cfg.BackupDir = value + case "compression": + if c, err := strconv.Atoi(value); err == nil { + cfg.Compression = c + } + case "jobs": + if j, err := strconv.Atoi(value); err == nil { + cfg.Jobs = j + } + case "dump_jobs": + if dj, err := strconv.Atoi(value); err == nil { + cfg.DumpJobs = dj + } + } + case "performance": + switch key { + case "cpu_workload": + cfg.CPUWorkload = value + case "max_cores": + if mc, err := strconv.Atoi(value); err == nil { + cfg.MaxCores = mc + } + } + } + } + + return cfg, nil +} + +// SaveLocalConfig saves configuration to .dbbackup.conf in current directory +func SaveLocalConfig(cfg *LocalConfig) error { + var sb strings.Builder + + sb.WriteString("# dbbackup configuration\n") + sb.WriteString("# This file is auto-generated. Edit with care.\n\n") + + // Database section + sb.WriteString("[database]\n") + if cfg.DBType != "" { + sb.WriteString(fmt.Sprintf("type = %s\n", cfg.DBType)) + } + if cfg.Host != "" { + sb.WriteString(fmt.Sprintf("host = %s\n", cfg.Host)) + } + if cfg.Port != 0 { + sb.WriteString(fmt.Sprintf("port = %d\n", cfg.Port)) + } + if cfg.User != "" { + sb.WriteString(fmt.Sprintf("user = %s\n", cfg.User)) + } + if cfg.Database != "" { + sb.WriteString(fmt.Sprintf("database = %s\n", cfg.Database)) + } + if cfg.SSLMode != "" { + sb.WriteString(fmt.Sprintf("ssl_mode = %s\n", cfg.SSLMode)) + } + sb.WriteString("\n") + + // Backup section + sb.WriteString("[backup]\n") + if cfg.BackupDir != "" { + sb.WriteString(fmt.Sprintf("backup_dir = %s\n", cfg.BackupDir)) + } + if cfg.Compression != 0 { + sb.WriteString(fmt.Sprintf("compression = %d\n", cfg.Compression)) + } + if cfg.Jobs != 0 { + sb.WriteString(fmt.Sprintf("jobs = %d\n", cfg.Jobs)) + } + if cfg.DumpJobs != 0 { + sb.WriteString(fmt.Sprintf("dump_jobs = %d\n", cfg.DumpJobs)) + } + sb.WriteString("\n") + + // Performance section + sb.WriteString("[performance]\n") + if cfg.CPUWorkload != "" { + sb.WriteString(fmt.Sprintf("cpu_workload = %s\n", cfg.CPUWorkload)) + } + if cfg.MaxCores != 0 { + sb.WriteString(fmt.Sprintf("max_cores = %d\n", cfg.MaxCores)) + } + + configPath := filepath.Join(".", ConfigFileName) + if err := os.WriteFile(configPath, []byte(sb.String()), 0644); err != nil { + return fmt.Errorf("failed to write config file: %w", err) + } + + return nil +} + +// ApplyLocalConfig applies loaded local config to the main config if values are not already set +func ApplyLocalConfig(cfg *Config, local *LocalConfig) { + if local == nil { + return + } + + // Only apply if not already set via flags + if cfg.DatabaseType == "postgres" && local.DBType != "" { + cfg.DatabaseType = local.DBType + } + if cfg.Host == "localhost" && local.Host != "" { + cfg.Host = local.Host + } + if cfg.Port == 5432 && local.Port != 0 { + cfg.Port = local.Port + } + if cfg.User == "root" && local.User != "" { + cfg.User = local.User + } + if local.Database != "" { + cfg.Database = local.Database + } + if cfg.SSLMode == "prefer" && local.SSLMode != "" { + cfg.SSLMode = local.SSLMode + } + if local.BackupDir != "" { + cfg.BackupDir = local.BackupDir + } + if cfg.CompressionLevel == 6 && local.Compression != 0 { + cfg.CompressionLevel = local.Compression + } + if local.Jobs != 0 { + cfg.Jobs = local.Jobs + } + if local.DumpJobs != 0 { + cfg.DumpJobs = local.DumpJobs + } + if cfg.CPUWorkloadType == "balanced" && local.CPUWorkload != "" { + cfg.CPUWorkloadType = local.CPUWorkload + } + if local.MaxCores != 0 { + cfg.MaxCores = local.MaxCores + } +} + +// ConfigFromConfig creates a LocalConfig from a Config +func ConfigFromConfig(cfg *Config) *LocalConfig { + return &LocalConfig{ + DBType: cfg.DatabaseType, + Host: cfg.Host, + Port: cfg.Port, + User: cfg.User, + Database: cfg.Database, + SSLMode: cfg.SSLMode, + BackupDir: cfg.BackupDir, + Compression: cfg.CompressionLevel, + Jobs: cfg.Jobs, + DumpJobs: cfg.DumpJobs, + CPUWorkload: cfg.CPUWorkloadType, + MaxCores: cfg.MaxCores, + } +} diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 58a9134..d4d7800 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -13,9 +13,13 @@ import ( // Logger defines the interface for logging type Logger interface { Debug(msg string, args ...any) - Info(msg string, args ...any) - Warn(msg string, args ...any) - Error(msg string, args ...any) + Info(msg string, keysAndValues ...interface{}) + Warn(msg string, keysAndValues ...interface{}) + Error(msg string, keysAndValues ...interface{}) + + // Structured logging methods + WithFields(fields map[string]interface{}) Logger + WithField(key string, value interface{}) Logger Time(msg string, args ...any) // Progress logging for operations @@ -109,10 +113,11 @@ func (l *logger) Error(msg string, args ...any) { } func (l *logger) Time(msg string, args ...any) { - // Time logs are always at info level with special formatting + // Time logs are always at info level with special formatting l.logWithFields(logrus.InfoLevel, "[TIME] "+msg, args...) } +// StartOperation creates a new operation logger func (l *logger) StartOperation(name string) OperationLogger { return &operationLogger{ name: name, @@ -121,6 +126,24 @@ func (l *logger) StartOperation(name string) OperationLogger { } } +// WithFields creates a logger with structured fields +func (l *logger) WithFields(fields map[string]interface{}) Logger { + return &logger{ + logrus: l.logrus.WithFields(logrus.Fields(fields)).Logger, + level: l.level, + format: l.format, + } +} + +// WithField creates a logger with a single structured field +func (l *logger) WithField(key string, value interface{}) Logger { + return &logger{ + logrus: l.logrus.WithField(key, value).Logger, + level: l.level, + format: l.format, + } +} + func (ol *operationLogger) Update(msg string, args ...any) { elapsed := time.Since(ol.startTime) ol.parent.Info(fmt.Sprintf("[%s] %s", ol.name, msg), diff --git a/internal/metrics/collector.go b/internal/metrics/collector.go new file mode 100644 index 0000000..4daa3ae --- /dev/null +++ b/internal/metrics/collector.go @@ -0,0 +1,162 @@ +package metrics + +import ( + "sync" + "time" + + "dbbackup/internal/logger" +) + +// OperationMetrics holds performance metrics for database operations +type OperationMetrics struct { + Operation string `json:"operation"` + Database string `json:"database"` + StartTime time.Time `json:"start_time"` + Duration time.Duration `json:"duration"` + SizeBytes int64 `json:"size_bytes"` + CompressionRatio float64 `json:"compression_ratio,omitempty"` + ThroughputMBps float64 `json:"throughput_mbps"` + ErrorCount int `json:"error_count"` + Success bool `json:"success"` +} + +// MetricsCollector collects and reports operation metrics +type MetricsCollector struct { + metrics []OperationMetrics + mu sync.RWMutex + logger logger.Logger +} + +// NewMetricsCollector creates a new metrics collector +func NewMetricsCollector(log logger.Logger) *MetricsCollector { + return &MetricsCollector{ + metrics: make([]OperationMetrics, 0), + logger: log, + } +} + +// RecordOperation records metrics for a completed operation +func (mc *MetricsCollector) RecordOperation(operation, database string, start time.Time, sizeBytes int64, success bool, errorCount int) { + duration := time.Since(start) + throughput := calculateThroughput(sizeBytes, duration) + + metric := OperationMetrics{ + Operation: operation, + Database: database, + StartTime: start, + Duration: duration, + SizeBytes: sizeBytes, + ThroughputMBps: throughput, + ErrorCount: errorCount, + Success: success, + } + + mc.mu.Lock() + mc.metrics = append(mc.metrics, metric) + mc.mu.Unlock() + + // Log structured metrics + if mc.logger != nil { + fields := map[string]interface{}{ + "metric_type": "operation_complete", + "operation": operation, + "database": database, + "duration_ms": duration.Milliseconds(), + "size_bytes": sizeBytes, + "throughput_mbps": throughput, + "error_count": errorCount, + "success": success, + } + + if success { + mc.logger.WithFields(fields).Info("Operation completed successfully") + } else { + mc.logger.WithFields(fields).Error("Operation failed") + } + } +} + +// RecordCompressionRatio updates compression ratio for a recorded operation +func (mc *MetricsCollector) RecordCompressionRatio(operation, database string, ratio float64) { + mc.mu.Lock() + defer mc.mu.Unlock() + + // Find and update the most recent matching operation + for i := len(mc.metrics) - 1; i >= 0; i-- { + if mc.metrics[i].Operation == operation && mc.metrics[i].Database == database { + mc.metrics[i].CompressionRatio = ratio + break + } + } +} + +// GetMetrics returns a copy of all collected metrics +func (mc *MetricsCollector) GetMetrics() []OperationMetrics { + mc.mu.RLock() + defer mc.mu.RUnlock() + + result := make([]OperationMetrics, len(mc.metrics)) + copy(result, mc.metrics) + return result +} + +// GetAverages calculates average performance metrics +func (mc *MetricsCollector) GetAverages() map[string]interface{} { + mc.mu.RLock() + defer mc.mu.RUnlock() + + if len(mc.metrics) == 0 { + return map[string]interface{}{} + } + + var totalDuration time.Duration + var totalSize, totalThroughput float64 + var successCount, errorCount int + + for _, m := range mc.metrics { + totalDuration += m.Duration + totalSize += float64(m.SizeBytes) + totalThroughput += m.ThroughputMBps + if m.Success { + successCount++ + } + errorCount += m.ErrorCount + } + + count := len(mc.metrics) + return map[string]interface{}{ + "total_operations": count, + "success_rate": float64(successCount) / float64(count) * 100, + "avg_duration_ms": totalDuration.Milliseconds() / int64(count), + "avg_size_mb": totalSize / float64(count) / 1024 / 1024, + "avg_throughput_mbps": totalThroughput / float64(count), + "total_errors": errorCount, + } +} + +// Clear removes all collected metrics +func (mc *MetricsCollector) Clear() { + mc.mu.Lock() + defer mc.mu.Unlock() + mc.metrics = make([]OperationMetrics, 0) +} + +// calculateThroughput calculates MB/s throughput +func calculateThroughput(bytes int64, duration time.Duration) float64 { + if duration == 0 { + return 0 + } + seconds := duration.Seconds() + if seconds == 0 { + return 0 + } + return float64(bytes) / seconds / 1024 / 1024 +} + +// Global metrics collector instance +var GlobalMetrics *MetricsCollector + +// InitGlobalMetrics initializes the global metrics collector +func InitGlobalMetrics(log logger.Logger) { + GlobalMetrics = NewMetricsCollector(log) +} \ No newline at end of file diff --git a/internal/tui/menu.go b/internal/tui/menu.go index 00a5fca..e211ef1 100644 --- a/internal/tui/menu.go +++ b/internal/tui/menu.go @@ -3,7 +3,9 @@ package tui import ( "context" "fmt" + "io" "strings" + "sync" tea "github.com/charmbracelet/bubbletea" "github.com/charmbracelet/lipgloss" @@ -61,8 +63,9 @@ type MenuModel struct { dbTypeCursor int // Background operations - ctx context.Context - cancel context.CancelFunc + ctx context.Context + cancel context.CancelFunc + closeOnce sync.Once } func NewMenuModel(cfg *config.Config, log logger.Logger) MenuModel { @@ -109,6 +112,19 @@ func NewMenuModel(cfg *config.Config, log logger.Logger) MenuModel { return model } +// Close implements io.Closer for safe cleanup +func (m *MenuModel) Close() error { + m.closeOnce.Do(func() { + if m.cancel != nil { + m.cancel() + } + }) + return nil +} + +// Ensure MenuModel implements io.Closer +var _ io.Closer = (*MenuModel)(nil) + // Init initializes the model func (m MenuModel) Init() tea.Cmd { return nil diff --git a/internal/tui/progress.go b/internal/tui/progress.go index fb38074..024945b 100644 --- a/internal/tui/progress.go +++ b/internal/tui/progress.go @@ -252,6 +252,12 @@ func (s *SilentLogger) Time(msg string, args ...any) {} func (s *SilentLogger) StartOperation(name string) logger.OperationLogger { return &SilentOperation{} } +func (s *SilentLogger) WithFields(fields map[string]interface{}) logger.Logger { + return s +} +func (s *SilentLogger) WithField(key string, value interface{}) logger.Logger { + return s +} // SilentOperation implements logger.OperationLogger but doesn't output anything type SilentOperation struct{} diff --git a/main.go b/main.go index 3ec0655..7ffb05e 100644 --- a/main.go +++ b/main.go @@ -2,6 +2,7 @@ package main import ( "context" + "fmt" "log/slog" "os" "os/signal" @@ -10,6 +11,7 @@ import ( "dbbackup/cmd" "dbbackup/internal/config" "dbbackup/internal/logger" + "dbbackup/internal/metrics" ) // Build information (set by ldflags) @@ -42,6 +44,20 @@ func main() { // Initialize logger log := logger.New(cfg.LogLevel, cfg.LogFormat) + // Initialize global metrics + metrics.InitGlobalMetrics(log) + + // Show session summary on exit + defer func() { + if metrics.GlobalMetrics != nil { + avgs := metrics.GlobalMetrics.GetAverages() + if ops, ok := avgs["total_operations"].(int); ok && ops > 0 { + fmt.Printf("\nšŸ“Š Session Summary: %d operations, %.1f%% success rate\n", + ops, avgs["success_rate"]) + } + } + }() + // Execute command if err := cmd.Execute(ctx, cfg, log); err != nil { log.Error("Application failed", "error", err) diff --git a/test_build b/test_build new file mode 100755 index 0000000..6625942 Binary files /dev/null and b/test_build differ