Optimize: Fix high/medium/low priority issues and apply optimizations
High Priority Fixes: - Use configurable ClusterTimeoutMinutes for restore (was hardcoded 2 hours) - Add comment explaining goroutine cleanup in stderr reader (cmd.Run waits) - Add defer cancel() in cluster backup loop to prevent context leak on panic Medium Priority Fixes: - Standardize tick rate to 100ms for both backup and restore (consistent UX) - Add spinnerFrame field to BackupExecutionModel for incremental updates - Define package-level spinnerFrames constant to avoid repeated allocation Low Priority Fixes: - Add 30-second timeout per database in cluster cleanup loop - Prevents indefinite hangs when dropping many databases Optimizations: - Pre-allocate 512 bytes in View() string builders (reduces allocations) - Use incremental spinner frame calculation (more efficient than time-based) - Share spinner frames array across all TUI operations All changes are backward compatible and maintain existing behavior.
This commit is contained in:
@@ -29,6 +29,7 @@ type BackupExecutionModel struct {
|
||||
result string
|
||||
startTime time.Time
|
||||
details []string
|
||||
spinnerFrame int
|
||||
}
|
||||
|
||||
func NewBackupExecution(cfg *config.Config, log logger.Logger, parent tea.Model, backupType, dbName string, ratio int) BackupExecutionModel {
|
||||
@@ -42,6 +43,7 @@ func NewBackupExecution(cfg *config.Config, log logger.Logger, parent tea.Model,
|
||||
status: "Initializing...",
|
||||
startTime: time.Now(),
|
||||
details: []string{},
|
||||
spinnerFrame: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,6 +146,9 @@ func (m BackupExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
switch msg := msg.(type) {
|
||||
case backupTickMsg:
|
||||
if !m.done {
|
||||
// Increment spinner frame for smooth animation
|
||||
m.spinnerFrame = (m.spinnerFrame + 1) % len(spinnerFrames)
|
||||
|
||||
// Update status based on elapsed time to show progress
|
||||
elapsedSec := int(time.Since(m.startTime).Seconds())
|
||||
|
||||
@@ -207,6 +212,7 @@ func (m BackupExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
|
||||
func (m BackupExecutionModel) View() string {
|
||||
var s strings.Builder
|
||||
s.Grow(512) // Pre-allocate estimated capacity for better performance
|
||||
|
||||
// Clear screen with newlines and render header
|
||||
s.WriteString("\n\n")
|
||||
@@ -227,9 +233,7 @@ func (m BackupExecutionModel) View() string {
|
||||
|
||||
// Status with spinner
|
||||
if !m.done {
|
||||
spinner := []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
|
||||
frame := int(time.Since(m.startTime).Milliseconds()/100) % len(spinner)
|
||||
s.WriteString(fmt.Sprintf(" %s %s\n", spinner[frame], m.status))
|
||||
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
||||
} else {
|
||||
s.WriteString(fmt.Sprintf(" %s\n\n", m.status))
|
||||
|
||||
|
||||
@@ -15,6 +15,9 @@ import (
|
||||
"dbbackup/internal/restore"
|
||||
)
|
||||
|
||||
// Shared spinner frames for consistent animation across all TUI operations
|
||||
var spinnerFrames = []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
|
||||
|
||||
// RestoreExecutionModel handles restore execution with progress
|
||||
type RestoreExecutionModel struct {
|
||||
config *config.Config
|
||||
@@ -61,7 +64,7 @@ func NewRestoreExecution(cfg *config.Config, log logger.Logger, parent tea.Model
|
||||
phase: "Starting",
|
||||
startTime: time.Now(),
|
||||
details: []string{},
|
||||
spinnerFrames: []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"},
|
||||
spinnerFrames: spinnerFrames, // Use package-level constant
|
||||
spinnerFrame: 0,
|
||||
}
|
||||
}
|
||||
@@ -76,7 +79,7 @@ func (m RestoreExecutionModel) Init() tea.Cmd {
|
||||
type restoreTickMsg time.Time
|
||||
|
||||
func restoreTickCmd() tea.Cmd {
|
||||
return tea.Tick(time.Millisecond*200, func(t time.Time) tea.Msg {
|
||||
return tea.Tick(time.Millisecond*100, func(t time.Time) tea.Msg {
|
||||
return restoreTickMsg(t)
|
||||
})
|
||||
}
|
||||
@@ -96,7 +99,9 @@ type restoreCompleteMsg struct {
|
||||
|
||||
func executeRestoreWithTUIProgress(cfg *config.Config, log logger.Logger, archive ArchiveInfo, targetDB string, cleanFirst, createIfMissing bool, restoreType string, cleanClusterFirst bool, existingDBs []string) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Hour)
|
||||
// Use configurable cluster timeout (minutes) from config; default set in config.New()
|
||||
restoreTimeout := time.Duration(cfg.ClusterTimeoutMinutes) * time.Minute
|
||||
ctx, cancel := context.WithTimeout(context.Background(), restoreTimeout)
|
||||
defer cancel()
|
||||
|
||||
start := time.Now()
|
||||
@@ -120,13 +125,16 @@ func executeRestoreWithTUIProgress(cfg *config.Config, log logger.Logger, archiv
|
||||
// This matches how cluster restore works - uses CLI tools, not database connections
|
||||
droppedCount := 0
|
||||
for _, dbName := range existingDBs {
|
||||
if err := dropDatabaseCLI(ctx, cfg, dbName); err != nil {
|
||||
// Create timeout context for each database drop (30 seconds per DB)
|
||||
dropCtx, dropCancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
if err := dropDatabaseCLI(dropCtx, cfg, dbName); err != nil {
|
||||
log.Warn("Failed to drop database", "name", dbName, "error", err)
|
||||
// Continue with other databases
|
||||
} else {
|
||||
droppedCount++
|
||||
log.Info("Dropped database", "name", dbName)
|
||||
}
|
||||
dropCancel() // Clean up context
|
||||
}
|
||||
|
||||
log.Info("Cluster cleanup completed", "dropped", droppedCount, "total", len(existingDBs))
|
||||
@@ -263,6 +271,7 @@ func (m RestoreExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
|
||||
func (m RestoreExecutionModel) View() string {
|
||||
var s strings.Builder
|
||||
s.Grow(512) // Pre-allocate estimated capacity for better performance
|
||||
|
||||
// Title
|
||||
title := "💾 Restoring Database"
|
||||
|
||||
Reference in New Issue
Block a user