Issue: Interactive cluster restore showed 'Status: Initializing...' throughout the entire restore process, making it appear stuck even though restore was working. Root cause: - Status and phase were set once in NewRestoreExecution() - Never updated during the restore process - Only changed to 'Completed' or 'Failed' at the end - No visual feedback about what stage of restore was running Solution: Time-based status progression Added logic in Update() tick handler to change status based on elapsed time: - 0-2 sec: 'Initializing restore...' / Phase: Starting - 2-5 sec: Context-aware status: - If cleanup: 'Cleaning N existing database(s)...' / Phase: Cleanup - If cluster: 'Extracting cluster archive...' / Phase: Extraction - If single: 'Preparing restore...' / Phase: Preparation - 5-10 sec: - If cluster: 'Restoring global objects...' / Phase: Globals - If single: 'Restoring database...' / Phase: Restore - 10+ sec: 'Restoring [cluster] databases...' / Phase: Restore Benefits: - User sees the restore is progressing through stages - Different status messages for cluster vs single database restore - Shows cleanup phase when enabled - Spinner + changing status = clear visual feedback - Better user experience during long-running restores Note: These are estimated phases since the restore engine runs in silent mode (no stdout interference with TUI). Actual operation may be faster or slower than time estimates, but provides much better UX than static 'Initializing'.
382 lines
11 KiB
Go
382 lines
11 KiB
Go
package tui
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"os/exec"
|
|
"strings"
|
|
"time"
|
|
|
|
tea "github.com/charmbracelet/bubbletea"
|
|
|
|
"dbbackup/internal/config"
|
|
"dbbackup/internal/database"
|
|
"dbbackup/internal/logger"
|
|
"dbbackup/internal/restore"
|
|
)
|
|
|
|
// RestoreExecutionModel handles restore execution with progress
|
|
type RestoreExecutionModel struct {
|
|
config *config.Config
|
|
logger logger.Logger
|
|
parent tea.Model
|
|
archive ArchiveInfo
|
|
targetDB string
|
|
cleanFirst bool
|
|
createIfMissing bool
|
|
restoreType string
|
|
cleanClusterFirst bool // Drop all user databases before cluster restore
|
|
existingDBs []string // List of databases to drop
|
|
|
|
// Progress tracking
|
|
status string
|
|
phase string
|
|
progress int
|
|
details []string
|
|
startTime time.Time
|
|
spinnerFrame int
|
|
spinnerFrames []string
|
|
|
|
// Results
|
|
done bool
|
|
err error
|
|
result string
|
|
elapsed time.Duration
|
|
}
|
|
|
|
// NewRestoreExecution creates a new restore execution model
|
|
func NewRestoreExecution(cfg *config.Config, log logger.Logger, parent tea.Model, archive ArchiveInfo, targetDB string, cleanFirst, createIfMissing bool, restoreType string, cleanClusterFirst bool, existingDBs []string) RestoreExecutionModel {
|
|
return RestoreExecutionModel{
|
|
config: cfg,
|
|
logger: log,
|
|
parent: parent,
|
|
archive: archive,
|
|
targetDB: targetDB,
|
|
cleanFirst: cleanFirst,
|
|
createIfMissing: createIfMissing,
|
|
restoreType: restoreType,
|
|
cleanClusterFirst: cleanClusterFirst,
|
|
existingDBs: existingDBs,
|
|
status: "Initializing...",
|
|
phase: "Starting",
|
|
startTime: time.Now(),
|
|
details: []string{},
|
|
spinnerFrames: []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"},
|
|
spinnerFrame: 0,
|
|
}
|
|
}
|
|
|
|
func (m RestoreExecutionModel) Init() tea.Cmd {
|
|
return tea.Batch(
|
|
executeRestoreWithTUIProgress(m.config, m.logger, m.archive, m.targetDB, m.cleanFirst, m.createIfMissing, m.restoreType, m.cleanClusterFirst, m.existingDBs),
|
|
restoreTickCmd(),
|
|
)
|
|
}
|
|
|
|
type restoreTickMsg time.Time
|
|
|
|
func restoreTickCmd() tea.Cmd {
|
|
return tea.Tick(time.Millisecond*200, func(t time.Time) tea.Msg {
|
|
return restoreTickMsg(t)
|
|
})
|
|
}
|
|
|
|
type restoreProgressMsg struct {
|
|
status string
|
|
phase string
|
|
progress int
|
|
detail string
|
|
}
|
|
|
|
type restoreCompleteMsg struct {
|
|
result string
|
|
err error
|
|
elapsed time.Duration
|
|
}
|
|
|
|
func executeRestoreWithTUIProgress(cfg *config.Config, log logger.Logger, archive ArchiveInfo, targetDB string, cleanFirst, createIfMissing bool, restoreType string, cleanClusterFirst bool, existingDBs []string) tea.Cmd {
|
|
return func() tea.Msg {
|
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Hour)
|
|
defer cancel()
|
|
|
|
start := time.Now()
|
|
|
|
// Create database instance
|
|
dbClient, err := database.New(cfg, log)
|
|
if err != nil {
|
|
return restoreCompleteMsg{
|
|
result: "",
|
|
err: fmt.Errorf("failed to create database client: %w", err),
|
|
elapsed: time.Since(start),
|
|
}
|
|
}
|
|
defer dbClient.Close()
|
|
|
|
// STEP 1: Clean cluster if requested (drop all existing user databases)
|
|
if restoreType == "restore-cluster" && cleanClusterFirst && len(existingDBs) > 0 {
|
|
log.Info("Dropping existing user databases before cluster restore", "count", len(existingDBs))
|
|
|
|
// Drop databases using command-line psql (no connection required)
|
|
// This matches how cluster restore works - uses CLI tools, not database connections
|
|
droppedCount := 0
|
|
for _, dbName := range existingDBs {
|
|
if err := dropDatabaseCLI(ctx, cfg, dbName); err != nil {
|
|
log.Warn("Failed to drop database", "name", dbName, "error", err)
|
|
// Continue with other databases
|
|
} else {
|
|
droppedCount++
|
|
log.Info("Dropped database", "name", dbName)
|
|
}
|
|
}
|
|
|
|
log.Info("Cluster cleanup completed", "dropped", droppedCount, "total", len(existingDBs))
|
|
}
|
|
|
|
// STEP 2: Create restore engine with silent progress (no stdout interference with TUI)
|
|
engine := restore.NewSilent(cfg, log, dbClient)
|
|
|
|
// Set up progress callback (but it won't work in goroutine - progress is already sent via logs)
|
|
// The TUI will just use spinner animation to show activity
|
|
|
|
// STEP 3: Execute restore based on type
|
|
var restoreErr error
|
|
if restoreType == "restore-cluster" {
|
|
restoreErr = engine.RestoreCluster(ctx, archive.Path)
|
|
} else {
|
|
restoreErr = engine.RestoreSingle(ctx, archive.Path, targetDB, cleanFirst, createIfMissing)
|
|
}
|
|
|
|
if restoreErr != nil {
|
|
return restoreCompleteMsg{
|
|
result: "",
|
|
err: restoreErr,
|
|
elapsed: time.Since(start),
|
|
}
|
|
}
|
|
|
|
result := fmt.Sprintf("Successfully restored from %s", archive.Name)
|
|
if restoreType == "restore-single" {
|
|
result = fmt.Sprintf("Successfully restored '%s' from %s", targetDB, archive.Name)
|
|
} else if restoreType == "restore-cluster" && cleanClusterFirst {
|
|
result = fmt.Sprintf("Successfully restored cluster from %s (cleaned %d existing database(s) first)", archive.Name, len(existingDBs))
|
|
}
|
|
|
|
return restoreCompleteMsg{
|
|
result: result,
|
|
err: nil,
|
|
elapsed: time.Since(start),
|
|
}
|
|
}
|
|
}
|
|
|
|
func (m RestoreExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|
switch msg := msg.(type) {
|
|
case restoreTickMsg:
|
|
if !m.done {
|
|
m.spinnerFrame = (m.spinnerFrame + 1) % len(m.spinnerFrames)
|
|
m.elapsed = time.Since(m.startTime)
|
|
|
|
// Update status based on elapsed time to show progress
|
|
// This provides visual feedback even though we don't have real-time progress
|
|
elapsedSec := int(m.elapsed.Seconds())
|
|
|
|
if elapsedSec < 2 {
|
|
m.status = "Initializing restore..."
|
|
m.phase = "Starting"
|
|
} else if elapsedSec < 5 {
|
|
if m.cleanClusterFirst && len(m.existingDBs) > 0 {
|
|
m.status = fmt.Sprintf("Cleaning %d existing database(s)...", len(m.existingDBs))
|
|
m.phase = "Cleanup"
|
|
} else if m.restoreType == "restore-cluster" {
|
|
m.status = "Extracting cluster archive..."
|
|
m.phase = "Extraction"
|
|
} else {
|
|
m.status = "Preparing restore..."
|
|
m.phase = "Preparation"
|
|
}
|
|
} else if elapsedSec < 10 {
|
|
if m.restoreType == "restore-cluster" {
|
|
m.status = "Restoring global objects..."
|
|
m.phase = "Globals"
|
|
} else {
|
|
m.status = fmt.Sprintf("Restoring database '%s'...", m.targetDB)
|
|
m.phase = "Restore"
|
|
}
|
|
} else {
|
|
if m.restoreType == "restore-cluster" {
|
|
m.status = "Restoring cluster databases..."
|
|
m.phase = "Restore"
|
|
} else {
|
|
m.status = fmt.Sprintf("Restoring database '%s'...", m.targetDB)
|
|
m.phase = "Restore"
|
|
}
|
|
}
|
|
|
|
return m, restoreTickCmd()
|
|
}
|
|
return m, nil
|
|
|
|
case restoreProgressMsg:
|
|
m.status = msg.status
|
|
m.phase = msg.phase
|
|
m.progress = msg.progress
|
|
if msg.detail != "" {
|
|
m.details = append(m.details, msg.detail)
|
|
// Keep only last 5 details
|
|
if len(m.details) > 5 {
|
|
m.details = m.details[len(m.details)-5:]
|
|
}
|
|
}
|
|
return m, nil
|
|
|
|
case restoreCompleteMsg:
|
|
m.done = true
|
|
m.err = msg.err
|
|
m.result = msg.result
|
|
m.elapsed = msg.elapsed
|
|
|
|
if m.err == nil {
|
|
m.status = "Restore completed successfully"
|
|
m.phase = "Done"
|
|
m.progress = 100
|
|
} else {
|
|
m.status = "Failed"
|
|
m.phase = "Error"
|
|
}
|
|
return m, nil
|
|
|
|
case tea.KeyMsg:
|
|
switch msg.String() {
|
|
case "ctrl+c", "q":
|
|
// Always allow quitting
|
|
return m.parent, tea.Quit
|
|
|
|
case "enter", " ", "esc":
|
|
if m.done {
|
|
return m.parent, nil
|
|
}
|
|
}
|
|
}
|
|
|
|
return m, nil
|
|
}
|
|
|
|
func (m RestoreExecutionModel) View() string {
|
|
var s strings.Builder
|
|
|
|
// Title
|
|
title := "💾 Restoring Database"
|
|
if m.restoreType == "restore-cluster" {
|
|
title = "💾 Restoring Cluster"
|
|
}
|
|
s.WriteString(titleStyle.Render(title))
|
|
s.WriteString("\n\n")
|
|
|
|
// Archive info
|
|
s.WriteString(fmt.Sprintf("Archive: %s\n", m.archive.Name))
|
|
if m.restoreType == "restore-single" {
|
|
s.WriteString(fmt.Sprintf("Target: %s\n", m.targetDB))
|
|
}
|
|
s.WriteString("\n")
|
|
|
|
if m.done {
|
|
// Show result
|
|
if m.err != nil {
|
|
s.WriteString(errorStyle.Render("❌ Restore Failed"))
|
|
s.WriteString("\n\n")
|
|
s.WriteString(errorStyle.Render(fmt.Sprintf("Error: %v", m.err)))
|
|
s.WriteString("\n")
|
|
} else {
|
|
s.WriteString(successStyle.Render("✅ Restore Completed Successfully"))
|
|
s.WriteString("\n\n")
|
|
s.WriteString(successStyle.Render(m.result))
|
|
s.WriteString("\n")
|
|
}
|
|
|
|
s.WriteString(fmt.Sprintf("\nElapsed Time: %s\n", formatDuration(m.elapsed)))
|
|
s.WriteString("\n")
|
|
s.WriteString(infoStyle.Render("⌨️ Press Enter to continue"))
|
|
} else {
|
|
// Show progress
|
|
s.WriteString(fmt.Sprintf("Phase: %s\n", m.phase))
|
|
|
|
// Show status with rotating spinner (unified indicator for all operations)
|
|
spinner := m.spinnerFrames[m.spinnerFrame]
|
|
s.WriteString(fmt.Sprintf("Status: %s %s\n", spinner, m.status))
|
|
s.WriteString("\n")
|
|
|
|
// Only show progress bar for single database restore
|
|
// Cluster restore uses spinner only (consistent with CLI behavior)
|
|
if m.restoreType == "restore-single" {
|
|
progressBar := renderProgressBar(m.progress)
|
|
s.WriteString(progressBar)
|
|
s.WriteString(fmt.Sprintf(" %d%%\n", m.progress))
|
|
s.WriteString("\n")
|
|
}
|
|
|
|
// Elapsed time
|
|
s.WriteString(fmt.Sprintf("Elapsed: %s\n", formatDuration(m.elapsed)))
|
|
s.WriteString("\n")
|
|
s.WriteString(infoStyle.Render("⌨️ Press Ctrl+C to cancel"))
|
|
}
|
|
|
|
return s.String()
|
|
}
|
|
|
|
// renderProgressBar renders a text progress bar
|
|
func renderProgressBar(percent int) string {
|
|
width := 40
|
|
filled := (percent * width) / 100
|
|
|
|
bar := strings.Repeat("█", filled)
|
|
empty := strings.Repeat("░", width-filled)
|
|
|
|
return successStyle.Render(bar) + infoStyle.Render(empty)
|
|
}
|
|
|
|
// formatDuration formats duration in human readable format
|
|
func formatDuration(d time.Duration) string {
|
|
if d < time.Minute {
|
|
return fmt.Sprintf("%.1fs", d.Seconds())
|
|
}
|
|
if d < time.Hour {
|
|
minutes := int(d.Minutes())
|
|
seconds := int(d.Seconds()) % 60
|
|
return fmt.Sprintf("%dm %ds", minutes, seconds)
|
|
}
|
|
hours := int(d.Hours())
|
|
minutes := int(d.Minutes()) % 60
|
|
return fmt.Sprintf("%dh %dm", hours, minutes)
|
|
}
|
|
|
|
// dropDatabaseCLI drops a database using command-line psql
|
|
// This avoids needing an active database connection
|
|
func dropDatabaseCLI(ctx context.Context, cfg *config.Config, dbName string) error {
|
|
args := []string{
|
|
"-p", fmt.Sprintf("%d", cfg.Port),
|
|
"-U", cfg.User,
|
|
"-d", "postgres", // Connect to postgres maintenance DB
|
|
"-c", fmt.Sprintf("DROP DATABASE IF EXISTS %s", dbName),
|
|
}
|
|
|
|
// Only add -h flag if host is not localhost (to use Unix socket for peer auth)
|
|
if cfg.Host != "localhost" && cfg.Host != "127.0.0.1" && cfg.Host != "" {
|
|
args = append([]string{"-h", cfg.Host}, args...)
|
|
}
|
|
|
|
cmd := exec.CommandContext(ctx, "psql", args...)
|
|
|
|
// Set password if provided
|
|
if cfg.Password != "" {
|
|
cmd.Env = append(cmd.Environ(), fmt.Sprintf("PGPASSWORD=%s", cfg.Password))
|
|
}
|
|
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
return fmt.Errorf("failed to drop database %s: %w\nOutput: %s", dbName, err, string(output))
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|