This is a critical bugfix release addressing multiple hardcoded temporary directory paths that prevented proper use of the WorkDir configuration option. PROBLEM: Users configuring WorkDir (e.g., /u01/dba/tmp) for systems with small root filesystems still experienced failures because critical operations hardcoded /tmp instead of respecting the configured WorkDir. This made the WorkDir option essentially non-functional. FIXED LOCATIONS: 1. internal/restore/engine.go:632 - CRITICAL: Used BackupDir instead of WorkDir for extraction 2. cmd/restore.go:354,834 - CLI restore/diagnose commands ignored WorkDir 3. cmd/migrate.go:208,347 - Migration commands hardcoded /tmp 4. internal/migrate/engine.go:120 - Migration engine ignored WorkDir 5. internal/config/config.go:224 - SwapFilePath hardcoded /tmp 6. internal/config/config.go:519 - Backup directory fallback hardcoded /tmp 7. internal/tui/restore_exec.go:161 - Debug logs hardcoded /tmp 8. internal/tui/settings.go:805 - Directory browser default hardcoded /tmp 9. internal/tui/restore_preview.go:474 - Display message hardcoded /tmp NEW FEATURES: - Added Config.GetEffectiveWorkDir() helper method - WorkDir now respects WORK_DIR environment variable - All temp operations now consistently use configured WorkDir with /tmp fallback IMPACT: - Restores on systems with small root disks now work properly with WorkDir configured - Admins can control disk space usage for all temporary operations - Debug logs, extraction dirs, swap files all respect WorkDir setting Version: 3.42.1 (Critical Fix Release)
437 lines
13 KiB
Go
Executable File
437 lines
13 KiB
Go
Executable File
package tui
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"strings"
|
|
"time"
|
|
|
|
tea "github.com/charmbracelet/bubbletea"
|
|
|
|
"dbbackup/internal/config"
|
|
"dbbackup/internal/database"
|
|
"dbbackup/internal/logger"
|
|
"dbbackup/internal/restore"
|
|
)
|
|
|
|
// Shared spinner frames for consistent animation across all TUI operations
|
|
var spinnerFrames = []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
|
|
|
|
// RestoreExecutionModel handles restore execution with progress
|
|
type RestoreExecutionModel struct {
|
|
config *config.Config
|
|
logger logger.Logger
|
|
parent tea.Model
|
|
ctx context.Context
|
|
cancel context.CancelFunc // Cancel function to stop the operation
|
|
archive ArchiveInfo
|
|
targetDB string
|
|
cleanFirst bool
|
|
createIfMissing bool
|
|
restoreType string
|
|
cleanClusterFirst bool // Drop all user databases before cluster restore
|
|
existingDBs []string // List of databases to drop
|
|
saveDebugLog bool // Save detailed error report on failure
|
|
workDir string // Custom work directory for extraction
|
|
|
|
// Progress tracking
|
|
status string
|
|
phase string
|
|
progress int
|
|
details []string
|
|
startTime time.Time
|
|
spinnerFrame int
|
|
spinnerFrames []string
|
|
|
|
// Results
|
|
done bool
|
|
cancelling bool // True when user has requested cancellation
|
|
err error
|
|
result string
|
|
elapsed time.Duration
|
|
}
|
|
|
|
// NewRestoreExecution creates a new restore execution model
|
|
func NewRestoreExecution(cfg *config.Config, log logger.Logger, parent tea.Model, ctx context.Context, archive ArchiveInfo, targetDB string, cleanFirst, createIfMissing bool, restoreType string, cleanClusterFirst bool, existingDBs []string, saveDebugLog bool, workDir string) RestoreExecutionModel {
|
|
// Create a cancellable context derived from parent
|
|
childCtx, cancel := context.WithCancel(ctx)
|
|
return RestoreExecutionModel{
|
|
config: cfg,
|
|
logger: log,
|
|
parent: parent,
|
|
ctx: childCtx,
|
|
cancel: cancel,
|
|
archive: archive,
|
|
targetDB: targetDB,
|
|
cleanFirst: cleanFirst,
|
|
createIfMissing: createIfMissing,
|
|
restoreType: restoreType,
|
|
cleanClusterFirst: cleanClusterFirst,
|
|
existingDBs: existingDBs,
|
|
saveDebugLog: saveDebugLog,
|
|
workDir: workDir,
|
|
status: "Initializing...",
|
|
phase: "Starting",
|
|
startTime: time.Now(),
|
|
details: []string{},
|
|
spinnerFrames: spinnerFrames, // Use package-level constant
|
|
spinnerFrame: 0,
|
|
}
|
|
}
|
|
|
|
func (m RestoreExecutionModel) Init() tea.Cmd {
|
|
return tea.Batch(
|
|
executeRestoreWithTUIProgress(m.ctx, m.config, m.logger, m.archive, m.targetDB, m.cleanFirst, m.createIfMissing, m.restoreType, m.cleanClusterFirst, m.existingDBs, m.saveDebugLog),
|
|
restoreTickCmd(),
|
|
)
|
|
}
|
|
|
|
type restoreTickMsg time.Time
|
|
|
|
func restoreTickCmd() tea.Cmd {
|
|
return tea.Tick(time.Millisecond*100, func(t time.Time) tea.Msg {
|
|
return restoreTickMsg(t)
|
|
})
|
|
}
|
|
|
|
type restoreProgressMsg struct {
|
|
status string
|
|
phase string
|
|
progress int
|
|
detail string
|
|
}
|
|
|
|
type restoreCompleteMsg struct {
|
|
result string
|
|
err error
|
|
elapsed time.Duration
|
|
}
|
|
|
|
func executeRestoreWithTUIProgress(parentCtx context.Context, cfg *config.Config, log logger.Logger, archive ArchiveInfo, targetDB string, cleanFirst, createIfMissing bool, restoreType string, cleanClusterFirst bool, existingDBs []string, saveDebugLog bool) tea.Cmd {
|
|
return func() tea.Msg {
|
|
// Use configurable cluster timeout (minutes) from config; default set in config.New()
|
|
// Use parent context to inherit cancellation from TUI
|
|
restoreTimeout := time.Duration(cfg.ClusterTimeoutMinutes) * time.Minute
|
|
ctx, cancel := context.WithTimeout(parentCtx, restoreTimeout)
|
|
defer cancel()
|
|
|
|
start := time.Now()
|
|
|
|
// Create database instance
|
|
dbClient, err := database.New(cfg, log)
|
|
if err != nil {
|
|
return restoreCompleteMsg{
|
|
result: "",
|
|
err: fmt.Errorf("failed to create database client: %w", err),
|
|
elapsed: time.Since(start),
|
|
}
|
|
}
|
|
defer dbClient.Close()
|
|
|
|
// STEP 1: Clean cluster if requested (drop all existing user databases)
|
|
if restoreType == "restore-cluster" && cleanClusterFirst && len(existingDBs) > 0 {
|
|
log.Info("Dropping existing user databases before cluster restore", "count", len(existingDBs))
|
|
|
|
// Drop databases using command-line psql (no connection required)
|
|
// This matches how cluster restore works - uses CLI tools, not database connections
|
|
droppedCount := 0
|
|
for _, dbName := range existingDBs {
|
|
// Create timeout context for each database drop (30 seconds per DB)
|
|
dropCtx, dropCancel := context.WithTimeout(ctx, 30*time.Second)
|
|
if err := dropDatabaseCLI(dropCtx, cfg, dbName); err != nil {
|
|
log.Warn("Failed to drop database", "name", dbName, "error", err)
|
|
// Continue with other databases
|
|
} else {
|
|
droppedCount++
|
|
log.Info("Dropped database", "name", dbName)
|
|
}
|
|
dropCancel() // Clean up context
|
|
}
|
|
|
|
log.Info("Cluster cleanup completed", "dropped", droppedCount, "total", len(existingDBs))
|
|
}
|
|
|
|
// STEP 2: Create restore engine with silent progress (no stdout interference with TUI)
|
|
engine := restore.NewSilent(cfg, log, dbClient)
|
|
|
|
// Enable debug logging if requested
|
|
if saveDebugLog {
|
|
// Generate debug log path using configured WorkDir
|
|
workDir := cfg.GetEffectiveWorkDir()
|
|
debugLogPath := filepath.Join(workDir, fmt.Sprintf("dbbackup-restore-debug-%s.json", time.Now().Format("20060102-150405")))
|
|
engine.SetDebugLogPath(debugLogPath)
|
|
log.Info("Debug logging enabled", "path", debugLogPath)
|
|
}
|
|
|
|
// Set up progress callback (but it won't work in goroutine - progress is already sent via logs)
|
|
// The TUI will just use spinner animation to show activity
|
|
|
|
// STEP 3: Execute restore based on type
|
|
var restoreErr error
|
|
if restoreType == "restore-cluster" {
|
|
restoreErr = engine.RestoreCluster(ctx, archive.Path)
|
|
} else {
|
|
restoreErr = engine.RestoreSingle(ctx, archive.Path, targetDB, cleanFirst, createIfMissing)
|
|
}
|
|
|
|
if restoreErr != nil {
|
|
return restoreCompleteMsg{
|
|
result: "",
|
|
err: restoreErr,
|
|
elapsed: time.Since(start),
|
|
}
|
|
}
|
|
|
|
result := fmt.Sprintf("Successfully restored from %s", archive.Name)
|
|
if restoreType == "restore-single" {
|
|
result = fmt.Sprintf("Successfully restored '%s' from %s", targetDB, archive.Name)
|
|
} else if restoreType == "restore-cluster" && cleanClusterFirst {
|
|
result = fmt.Sprintf("Successfully restored cluster from %s (cleaned %d existing database(s) first)", archive.Name, len(existingDBs))
|
|
}
|
|
|
|
return restoreCompleteMsg{
|
|
result: result,
|
|
err: nil,
|
|
elapsed: time.Since(start),
|
|
}
|
|
}
|
|
}
|
|
|
|
func (m RestoreExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|
switch msg := msg.(type) {
|
|
case restoreTickMsg:
|
|
if !m.done {
|
|
m.spinnerFrame = (m.spinnerFrame + 1) % len(m.spinnerFrames)
|
|
m.elapsed = time.Since(m.startTime)
|
|
|
|
// Update status based on elapsed time to show progress
|
|
// This provides visual feedback even though we don't have real-time progress
|
|
elapsedSec := int(m.elapsed.Seconds())
|
|
|
|
if elapsedSec < 2 {
|
|
m.status = "Initializing restore..."
|
|
m.phase = "Starting"
|
|
} else if elapsedSec < 5 {
|
|
if m.cleanClusterFirst && len(m.existingDBs) > 0 {
|
|
m.status = fmt.Sprintf("Cleaning %d existing database(s)...", len(m.existingDBs))
|
|
m.phase = "Cleanup"
|
|
} else if m.restoreType == "restore-cluster" {
|
|
m.status = "Extracting cluster archive..."
|
|
m.phase = "Extraction"
|
|
} else {
|
|
m.status = "Preparing restore..."
|
|
m.phase = "Preparation"
|
|
}
|
|
} else if elapsedSec < 10 {
|
|
if m.restoreType == "restore-cluster" {
|
|
m.status = "Restoring global objects..."
|
|
m.phase = "Globals"
|
|
} else {
|
|
m.status = fmt.Sprintf("Restoring database '%s'...", m.targetDB)
|
|
m.phase = "Restore"
|
|
}
|
|
} else {
|
|
if m.restoreType == "restore-cluster" {
|
|
m.status = "Restoring cluster databases..."
|
|
m.phase = "Restore"
|
|
} else {
|
|
m.status = fmt.Sprintf("Restoring database '%s'...", m.targetDB)
|
|
m.phase = "Restore"
|
|
}
|
|
}
|
|
|
|
return m, restoreTickCmd()
|
|
}
|
|
return m, nil
|
|
|
|
case restoreProgressMsg:
|
|
m.status = msg.status
|
|
m.phase = msg.phase
|
|
m.progress = msg.progress
|
|
if msg.detail != "" {
|
|
m.details = append(m.details, msg.detail)
|
|
// Keep only last 5 details
|
|
if len(m.details) > 5 {
|
|
m.details = m.details[len(m.details)-5:]
|
|
}
|
|
}
|
|
return m, nil
|
|
|
|
case restoreCompleteMsg:
|
|
m.done = true
|
|
m.err = msg.err
|
|
m.result = msg.result
|
|
m.elapsed = msg.elapsed
|
|
|
|
if m.err == nil {
|
|
m.status = "Restore completed successfully"
|
|
m.phase = "Done"
|
|
m.progress = 100
|
|
} else {
|
|
m.status = "Failed"
|
|
m.phase = "Error"
|
|
}
|
|
// Auto-forward in auto-confirm mode when done
|
|
if m.config.TUIAutoConfirm && m.done {
|
|
return m.parent, tea.Quit
|
|
}
|
|
return m, nil
|
|
|
|
case tea.KeyMsg:
|
|
switch msg.String() {
|
|
case "ctrl+c", "esc":
|
|
if !m.done && !m.cancelling {
|
|
// User requested cancellation - cancel the context
|
|
m.cancelling = true
|
|
m.status = "⏹️ Cancelling restore... (please wait)"
|
|
m.phase = "Cancelling"
|
|
if m.cancel != nil {
|
|
m.cancel()
|
|
}
|
|
return m, nil
|
|
} else if m.done {
|
|
return m.parent, nil
|
|
}
|
|
case "q":
|
|
if !m.done && !m.cancelling {
|
|
m.cancelling = true
|
|
m.status = "⏹️ Cancelling restore... (please wait)"
|
|
m.phase = "Cancelling"
|
|
if m.cancel != nil {
|
|
m.cancel()
|
|
}
|
|
return m, nil
|
|
} else if m.done {
|
|
return m.parent, tea.Quit
|
|
}
|
|
case "enter", " ":
|
|
if m.done {
|
|
return m.parent, nil
|
|
}
|
|
}
|
|
}
|
|
|
|
return m, nil
|
|
}
|
|
|
|
func (m RestoreExecutionModel) View() string {
|
|
var s strings.Builder
|
|
s.Grow(512) // Pre-allocate estimated capacity for better performance
|
|
|
|
// Title
|
|
title := "💾 Restoring Database"
|
|
if m.restoreType == "restore-cluster" {
|
|
title = "💾 Restoring Cluster"
|
|
}
|
|
s.WriteString(titleStyle.Render(title))
|
|
s.WriteString("\n\n")
|
|
|
|
// Archive info
|
|
s.WriteString(fmt.Sprintf("Archive: %s\n", m.archive.Name))
|
|
if m.restoreType == "restore-single" {
|
|
s.WriteString(fmt.Sprintf("Target: %s\n", m.targetDB))
|
|
}
|
|
s.WriteString("\n")
|
|
|
|
if m.done {
|
|
// Show result
|
|
if m.err != nil {
|
|
s.WriteString(errorStyle.Render("❌ Restore Failed"))
|
|
s.WriteString("\n\n")
|
|
s.WriteString(errorStyle.Render(fmt.Sprintf("Error: %v", m.err)))
|
|
s.WriteString("\n")
|
|
} else {
|
|
s.WriteString(successStyle.Render("✅ Restore Completed Successfully"))
|
|
s.WriteString("\n\n")
|
|
s.WriteString(successStyle.Render(m.result))
|
|
s.WriteString("\n")
|
|
}
|
|
|
|
s.WriteString(fmt.Sprintf("\nElapsed Time: %s\n", formatDuration(m.elapsed)))
|
|
s.WriteString("\n")
|
|
s.WriteString(infoStyle.Render("⌨️ Press Enter to continue"))
|
|
} else {
|
|
// Show progress
|
|
s.WriteString(fmt.Sprintf("Phase: %s\n", m.phase))
|
|
|
|
// Show status with rotating spinner (unified indicator for all operations)
|
|
spinner := m.spinnerFrames[m.spinnerFrame]
|
|
s.WriteString(fmt.Sprintf("Status: %s %s\n", spinner, m.status))
|
|
s.WriteString("\n")
|
|
|
|
// Only show progress bar for single database restore
|
|
// Cluster restore uses spinner only (consistent with CLI behavior)
|
|
if m.restoreType == "restore-single" {
|
|
progressBar := renderProgressBar(m.progress)
|
|
s.WriteString(progressBar)
|
|
s.WriteString(fmt.Sprintf(" %d%%\n", m.progress))
|
|
s.WriteString("\n")
|
|
}
|
|
|
|
// Elapsed time
|
|
s.WriteString(fmt.Sprintf("Elapsed: %s\n", formatDuration(m.elapsed)))
|
|
s.WriteString("\n")
|
|
s.WriteString(infoStyle.Render("⌨️ Press Ctrl+C to cancel"))
|
|
}
|
|
|
|
return s.String()
|
|
}
|
|
|
|
// renderProgressBar renders a text progress bar
|
|
func renderProgressBar(percent int) string {
|
|
width := 40
|
|
filled := (percent * width) / 100
|
|
|
|
bar := strings.Repeat("█", filled)
|
|
empty := strings.Repeat("░", width-filled)
|
|
|
|
return successStyle.Render(bar) + infoStyle.Render(empty)
|
|
}
|
|
|
|
// formatDuration formats duration in human readable format
|
|
func formatDuration(d time.Duration) string {
|
|
if d < time.Minute {
|
|
return fmt.Sprintf("%.1fs", d.Seconds())
|
|
}
|
|
if d < time.Hour {
|
|
minutes := int(d.Minutes())
|
|
seconds := int(d.Seconds()) % 60
|
|
return fmt.Sprintf("%dm %ds", minutes, seconds)
|
|
}
|
|
hours := int(d.Hours())
|
|
minutes := int(d.Minutes()) % 60
|
|
return fmt.Sprintf("%dh %dm", hours, minutes)
|
|
}
|
|
|
|
// dropDatabaseCLI drops a database using command-line psql
|
|
// This avoids needing an active database connection
|
|
func dropDatabaseCLI(ctx context.Context, cfg *config.Config, dbName string) error {
|
|
args := []string{
|
|
"-p", fmt.Sprintf("%d", cfg.Port),
|
|
"-U", cfg.User,
|
|
"-d", "postgres", // Connect to postgres maintenance DB
|
|
"-c", fmt.Sprintf("DROP DATABASE IF EXISTS %s", dbName),
|
|
}
|
|
|
|
// Only add -h flag if host is not localhost (to use Unix socket for peer auth)
|
|
if cfg.Host != "localhost" && cfg.Host != "127.0.0.1" && cfg.Host != "" {
|
|
args = append([]string{"-h", cfg.Host}, args...)
|
|
}
|
|
|
|
cmd := exec.CommandContext(ctx, "psql", args...)
|
|
|
|
// Set password if provided
|
|
if cfg.Password != "" {
|
|
cmd.Env = append(cmd.Environ(), fmt.Sprintf("PGPASSWORD=%s", cfg.Password))
|
|
}
|
|
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
return fmt.Errorf("failed to drop database %s: %w\nOutput: %s", dbName, err, string(output))
|
|
}
|
|
|
|
return nil
|
|
}
|