From 2019591b5b643d47a7098e3a65024a324a60e11b Mon Sep 17 00:00:00 2001 From: Renz Date: Wed, 12 Nov 2025 11:37:02 +0000 Subject: [PATCH] Optimize: Fix high/medium/low priority issues and apply optimizations High Priority Fixes: - Use configurable ClusterTimeoutMinutes for restore (was hardcoded 2 hours) - Add comment explaining goroutine cleanup in stderr reader (cmd.Run waits) - Add defer cancel() in cluster backup loop to prevent context leak on panic Medium Priority Fixes: - Standardize tick rate to 100ms for both backup and restore (consistent UX) - Add spinnerFrame field to BackupExecutionModel for incremental updates - Define package-level spinnerFrames constant to avoid repeated allocation Low Priority Fixes: - Add 30-second timeout per database in cluster cleanup loop - Prevents indefinite hangs when dropping many databases Optimizations: - Pre-allocate 512 bytes in View() string builders (reduces allocations) - Use incremental spinner frame calculation (more efficient than time-based) - Share spinner frames array across all TUI operations All changes are backward compatible and maintain existing behavior. --- internal/backup/engine.go | 6 +++++- internal/tui/backup_exec.go | 10 +++++++--- internal/tui/restore_exec.go | 17 +++++++++++++---- 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/internal/backup/engine.go b/internal/backup/engine.go index da06520..3128a03 100644 --- a/internal/backup/engine.go +++ b/internal/backup/engine.go @@ -399,8 +399,9 @@ func (e *Engine) BackupCluster(ctx context.Context) error { // Use a context with timeout for each database to prevent hangs // Use longer timeout for huge databases (2 hours per database) dbCtx, cancel := context.WithTimeout(ctx, 2*time.Hour) + defer cancel() // Ensure cancel is called even if executeCommand panics err := e.executeCommand(dbCtx, cmd, dumpFile) - cancel() + cancel() // Also call immediately for early cleanup if err != nil { e.log.Warn("Failed to backup database", "database", dbName, "error", err) @@ -786,6 +787,7 @@ regularTar: cmd := exec.CommandContext(ctx, compressCmd, compressArgs...) // Stream stderr to avoid memory issues + // Use io.Copy to ensure goroutine completes when pipe closes stderr, err := cmd.StderrPipe() if err == nil { go func() { @@ -796,12 +798,14 @@ regularTar: e.log.Debug("Archive creation", "output", line) } } + // Scanner will exit when stderr pipe closes after cmd.Wait() }() } if err := cmd.Run(); err != nil { return fmt.Errorf("tar failed: %w", err) } + // cmd.Run() calls Wait() which closes stderr pipe, terminating the goroutine return nil } diff --git a/internal/tui/backup_exec.go b/internal/tui/backup_exec.go index 48b6eb4..568f085 100644 --- a/internal/tui/backup_exec.go +++ b/internal/tui/backup_exec.go @@ -29,6 +29,7 @@ type BackupExecutionModel struct { result string startTime time.Time details []string + spinnerFrame int } func NewBackupExecution(cfg *config.Config, log logger.Logger, parent tea.Model, backupType, dbName string, ratio int) BackupExecutionModel { @@ -42,6 +43,7 @@ func NewBackupExecution(cfg *config.Config, log logger.Logger, parent tea.Model, status: "Initializing...", startTime: time.Now(), details: []string{}, + spinnerFrame: 0, } } @@ -144,6 +146,9 @@ func (m BackupExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { switch msg := msg.(type) { case backupTickMsg: if !m.done { + // Increment spinner frame for smooth animation + m.spinnerFrame = (m.spinnerFrame + 1) % len(spinnerFrames) + // Update status based on elapsed time to show progress elapsedSec := int(time.Since(m.startTime).Seconds()) @@ -207,6 +212,7 @@ func (m BackupExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { func (m BackupExecutionModel) View() string { var s strings.Builder + s.Grow(512) // Pre-allocate estimated capacity for better performance // Clear screen with newlines and render header s.WriteString("\n\n") @@ -227,9 +233,7 @@ func (m BackupExecutionModel) View() string { // Status with spinner if !m.done { - spinner := []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"} - frame := int(time.Since(m.startTime).Milliseconds()/100) % len(spinner) - s.WriteString(fmt.Sprintf(" %s %s\n", spinner[frame], m.status)) + s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status)) } else { s.WriteString(fmt.Sprintf(" %s\n\n", m.status)) diff --git a/internal/tui/restore_exec.go b/internal/tui/restore_exec.go index 506f5f7..b38c337 100644 --- a/internal/tui/restore_exec.go +++ b/internal/tui/restore_exec.go @@ -15,6 +15,9 @@ import ( "dbbackup/internal/restore" ) +// Shared spinner frames for consistent animation across all TUI operations +var spinnerFrames = []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"} + // RestoreExecutionModel handles restore execution with progress type RestoreExecutionModel struct { config *config.Config @@ -61,7 +64,7 @@ func NewRestoreExecution(cfg *config.Config, log logger.Logger, parent tea.Model phase: "Starting", startTime: time.Now(), details: []string{}, - spinnerFrames: []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}, + spinnerFrames: spinnerFrames, // Use package-level constant spinnerFrame: 0, } } @@ -76,7 +79,7 @@ func (m RestoreExecutionModel) Init() tea.Cmd { type restoreTickMsg time.Time func restoreTickCmd() tea.Cmd { - return tea.Tick(time.Millisecond*200, func(t time.Time) tea.Msg { + return tea.Tick(time.Millisecond*100, func(t time.Time) tea.Msg { return restoreTickMsg(t) }) } @@ -96,7 +99,9 @@ type restoreCompleteMsg struct { func executeRestoreWithTUIProgress(cfg *config.Config, log logger.Logger, archive ArchiveInfo, targetDB string, cleanFirst, createIfMissing bool, restoreType string, cleanClusterFirst bool, existingDBs []string) tea.Cmd { return func() tea.Msg { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Hour) + // Use configurable cluster timeout (minutes) from config; default set in config.New() + restoreTimeout := time.Duration(cfg.ClusterTimeoutMinutes) * time.Minute + ctx, cancel := context.WithTimeout(context.Background(), restoreTimeout) defer cancel() start := time.Now() @@ -120,13 +125,16 @@ func executeRestoreWithTUIProgress(cfg *config.Config, log logger.Logger, archiv // This matches how cluster restore works - uses CLI tools, not database connections droppedCount := 0 for _, dbName := range existingDBs { - if err := dropDatabaseCLI(ctx, cfg, dbName); err != nil { + // Create timeout context for each database drop (30 seconds per DB) + dropCtx, dropCancel := context.WithTimeout(ctx, 30*time.Second) + if err := dropDatabaseCLI(dropCtx, cfg, dbName); err != nil { log.Warn("Failed to drop database", "name", dbName, "error", err) // Continue with other databases } else { droppedCount++ log.Info("Dropped database", "name", dbName) } + dropCancel() // Clean up context } log.Info("Cluster cleanup completed", "dropped", droppedCount, "total", len(existingDBs)) @@ -263,6 +271,7 @@ func (m RestoreExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { func (m RestoreExecutionModel) View() string { var s strings.Builder + s.Grow(512) // Pre-allocate estimated capacity for better performance // Title title := "💾 Restoring Database"