feat: implement full restore functionality with TUI integration
- Add complete restore engine (internal/restore/) - RestoreSingle() for single database restore - RestoreCluster() for full cluster restore - Archive format detection (7 formats supported) - Safety validation (integrity, disk space, tools) - Streaming decompression with pigz support - Add CLI restore commands (cmd/restore.go) - restore single: restore single database backup - restore cluster: restore full cluster backup - restore list: list available backup archives - Safety-first design: dry-run by default, --confirm required - Add TUI restore integration (internal/tui/) - Archive browser: browse and select backups - Restore preview: safety checks and confirmation - Restore execution: real-time progress tracking - Backup manager: comprehensive archive management - Features: - Format auto-detection (.dump, .dump.gz, .sql, .sql.gz, .tar.gz) - Archive validation before restore - Disk space verification - Tool availability checks - Target database configuration - Clean-first and create-if-missing options - Parallel decompression support - Progress tracking with phases Phase 1 (Core Functionality) complete and tested
This commit is contained in:
445
internal/restore/engine.go
Normal file
445
internal/restore/engine.go
Normal file
@ -0,0 +1,445 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"dbbackup/internal/config"
|
||||
"dbbackup/internal/database"
|
||||
"dbbackup/internal/logger"
|
||||
"dbbackup/internal/progress"
|
||||
)
|
||||
|
||||
// Engine handles database restore operations
|
||||
type Engine struct {
|
||||
cfg *config.Config
|
||||
log logger.Logger
|
||||
db database.Database
|
||||
progress progress.Indicator
|
||||
detailedReporter *progress.DetailedReporter
|
||||
dryRun bool
|
||||
}
|
||||
|
||||
// New creates a new restore engine
|
||||
func New(cfg *config.Config, log logger.Logger, db database.Database) *Engine {
|
||||
progressIndicator := progress.NewIndicator(true, "line")
|
||||
detailedReporter := progress.NewDetailedReporter(progressIndicator, &loggerAdapter{logger: log})
|
||||
|
||||
return &Engine{
|
||||
cfg: cfg,
|
||||
log: log,
|
||||
db: db,
|
||||
progress: progressIndicator,
|
||||
detailedReporter: detailedReporter,
|
||||
dryRun: false,
|
||||
}
|
||||
}
|
||||
|
||||
// NewWithProgress creates a restore engine with custom progress indicator
|
||||
func NewWithProgress(cfg *config.Config, log logger.Logger, db database.Database, progressIndicator progress.Indicator, dryRun bool) *Engine {
|
||||
if progressIndicator == nil {
|
||||
progressIndicator = progress.NewNullIndicator()
|
||||
}
|
||||
|
||||
detailedReporter := progress.NewDetailedReporter(progressIndicator, &loggerAdapter{logger: log})
|
||||
|
||||
return &Engine{
|
||||
cfg: cfg,
|
||||
log: log,
|
||||
db: db,
|
||||
progress: progressIndicator,
|
||||
detailedReporter: detailedReporter,
|
||||
dryRun: dryRun,
|
||||
}
|
||||
}
|
||||
|
||||
// loggerAdapter adapts our logger to the progress.Logger interface
|
||||
type loggerAdapter struct {
|
||||
logger logger.Logger
|
||||
}
|
||||
|
||||
func (la *loggerAdapter) Info(msg string, args ...any) {
|
||||
la.logger.Info(msg, args...)
|
||||
}
|
||||
|
||||
func (la *loggerAdapter) Warn(msg string, args ...any) {
|
||||
la.logger.Warn(msg, args...)
|
||||
}
|
||||
|
||||
func (la *loggerAdapter) Error(msg string, args ...any) {
|
||||
la.logger.Error(msg, args...)
|
||||
}
|
||||
|
||||
func (la *loggerAdapter) Debug(msg string, args ...any) {
|
||||
la.logger.Debug(msg, args...)
|
||||
}
|
||||
|
||||
// RestoreSingle restores a single database from an archive
|
||||
func (e *Engine) RestoreSingle(ctx context.Context, archivePath, targetDB string, cleanFirst, createIfMissing bool) error {
|
||||
operation := e.log.StartOperation("Single Database Restore")
|
||||
|
||||
// Validate archive exists
|
||||
if _, err := os.Stat(archivePath); os.IsNotExist(err) {
|
||||
operation.Fail("Archive not found")
|
||||
return fmt.Errorf("archive not found: %s", archivePath)
|
||||
}
|
||||
|
||||
// Detect archive format
|
||||
format := DetectArchiveFormat(archivePath)
|
||||
e.log.Info("Detected archive format", "format", format, "path", archivePath)
|
||||
|
||||
if e.dryRun {
|
||||
e.log.Info("DRY RUN: Would restore single database", "archive", archivePath, "target", targetDB)
|
||||
return e.previewRestore(archivePath, targetDB, format)
|
||||
}
|
||||
|
||||
// Start progress tracking
|
||||
e.progress.Start(fmt.Sprintf("Restoring database '%s' from %s", targetDB, filepath.Base(archivePath)))
|
||||
|
||||
// Handle different archive formats
|
||||
var err error
|
||||
switch format {
|
||||
case FormatPostgreSQLDump, FormatPostgreSQLDumpGz:
|
||||
err = e.restorePostgreSQLDump(ctx, archivePath, targetDB, format == FormatPostgreSQLDumpGz, cleanFirst)
|
||||
case FormatPostgreSQLSQL, FormatPostgreSQLSQLGz:
|
||||
err = e.restorePostgreSQLSQL(ctx, archivePath, targetDB, format == FormatPostgreSQLSQLGz)
|
||||
case FormatMySQLSQL, FormatMySQLSQLGz:
|
||||
err = e.restoreMySQLSQL(ctx, archivePath, targetDB, format == FormatMySQLSQLGz)
|
||||
default:
|
||||
operation.Fail("Unsupported archive format")
|
||||
return fmt.Errorf("unsupported archive format: %s", format)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
e.progress.Fail(fmt.Sprintf("Restore failed: %v", err))
|
||||
operation.Fail(fmt.Sprintf("Restore failed: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
e.progress.Complete(fmt.Sprintf("Database '%s' restored successfully", targetDB))
|
||||
operation.Complete(fmt.Sprintf("Restored database '%s' from %s", targetDB, filepath.Base(archivePath)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// restorePostgreSQLDump restores from PostgreSQL custom dump format
|
||||
func (e *Engine) restorePostgreSQLDump(ctx context.Context, archivePath, targetDB string, compressed bool, cleanFirst bool) error {
|
||||
// Build restore command
|
||||
opts := database.RestoreOptions{
|
||||
Parallel: 1,
|
||||
Clean: cleanFirst,
|
||||
NoOwner: true,
|
||||
NoPrivileges: true,
|
||||
SingleTransaction: true,
|
||||
}
|
||||
|
||||
cmd := e.db.BuildRestoreCommand(targetDB, archivePath, opts)
|
||||
|
||||
if compressed {
|
||||
// For compressed dumps, decompress first
|
||||
return e.executeRestoreWithDecompression(ctx, archivePath, cmd)
|
||||
}
|
||||
|
||||
return e.executeRestoreCommand(ctx, cmd)
|
||||
}
|
||||
|
||||
// restorePostgreSQLSQL restores from PostgreSQL SQL script
|
||||
func (e *Engine) restorePostgreSQLSQL(ctx context.Context, archivePath, targetDB string, compressed bool) error {
|
||||
// Use psql for SQL scripts
|
||||
var cmd []string
|
||||
if compressed {
|
||||
cmd = []string{
|
||||
"bash", "-c",
|
||||
fmt.Sprintf("gunzip -c %s | psql -h %s -p %d -U %s -d %s",
|
||||
archivePath, e.cfg.Host, e.cfg.Port, e.cfg.User, targetDB),
|
||||
}
|
||||
} else {
|
||||
cmd = []string{
|
||||
"psql",
|
||||
"-h", e.cfg.Host,
|
||||
"-p", fmt.Sprintf("%d", e.cfg.Port),
|
||||
"-U", e.cfg.User,
|
||||
"-d", targetDB,
|
||||
"-f", archivePath,
|
||||
}
|
||||
}
|
||||
|
||||
return e.executeRestoreCommand(ctx, cmd)
|
||||
}
|
||||
|
||||
// restoreMySQLSQL restores from MySQL SQL script
|
||||
func (e *Engine) restoreMySQLSQL(ctx context.Context, archivePath, targetDB string, compressed bool) error {
|
||||
options := database.RestoreOptions{}
|
||||
|
||||
cmd := e.db.BuildRestoreCommand(targetDB, archivePath, options)
|
||||
|
||||
if compressed {
|
||||
// For compressed SQL, decompress on the fly
|
||||
cmd = []string{
|
||||
"bash", "-c",
|
||||
fmt.Sprintf("gunzip -c %s | %s", archivePath, strings.Join(cmd, " ")),
|
||||
}
|
||||
}
|
||||
|
||||
return e.executeRestoreCommand(ctx, cmd)
|
||||
}
|
||||
|
||||
// executeRestoreCommand executes a restore command
|
||||
func (e *Engine) executeRestoreCommand(ctx context.Context, cmdArgs []string) error {
|
||||
e.log.Info("Executing restore command", "command", strings.Join(cmdArgs, " "))
|
||||
|
||||
cmd := exec.CommandContext(ctx, cmdArgs[0], cmdArgs[1:]...)
|
||||
|
||||
// Set environment variables
|
||||
cmd.Env = append(os.Environ(),
|
||||
fmt.Sprintf("PGPASSWORD=%s", e.cfg.Password),
|
||||
fmt.Sprintf("MYSQL_PWD=%s", e.cfg.Password),
|
||||
)
|
||||
|
||||
// Capture output
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
e.log.Error("Restore command failed", "error", err, "output", string(output))
|
||||
return fmt.Errorf("restore failed: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
e.log.Info("Restore command completed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// executeRestoreWithDecompression handles decompression during restore
|
||||
func (e *Engine) executeRestoreWithDecompression(ctx context.Context, archivePath string, restoreCmd []string) error {
|
||||
// Check if pigz is available for faster decompression
|
||||
decompressCmd := "gunzip"
|
||||
if _, err := exec.LookPath("pigz"); err == nil {
|
||||
decompressCmd = "pigz"
|
||||
e.log.Info("Using pigz for parallel decompression")
|
||||
}
|
||||
|
||||
// Build pipeline: decompress | restore
|
||||
pipeline := fmt.Sprintf("%s -dc %s | %s", decompressCmd, archivePath, strings.Join(restoreCmd, " "))
|
||||
cmd := exec.CommandContext(ctx, "bash", "-c", pipeline)
|
||||
|
||||
cmd.Env = append(os.Environ(),
|
||||
fmt.Sprintf("PGPASSWORD=%s", e.cfg.Password),
|
||||
fmt.Sprintf("MYSQL_PWD=%s", e.cfg.Password),
|
||||
)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
e.log.Error("Restore with decompression failed", "error", err, "output", string(output))
|
||||
return fmt.Errorf("restore failed: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// previewRestore shows what would be done without executing
|
||||
func (e *Engine) previewRestore(archivePath, targetDB string, format ArchiveFormat) error {
|
||||
fmt.Println("\n" + strings.Repeat("=", 60))
|
||||
fmt.Println(" RESTORE PREVIEW (DRY RUN)")
|
||||
fmt.Println(strings.Repeat("=", 60))
|
||||
|
||||
stat, _ := os.Stat(archivePath)
|
||||
fmt.Printf("\nArchive: %s\n", filepath.Base(archivePath))
|
||||
fmt.Printf("Format: %s\n", format)
|
||||
if stat != nil {
|
||||
fmt.Printf("Size: %s\n", FormatBytes(stat.Size()))
|
||||
fmt.Printf("Modified: %s\n", stat.ModTime().Format("2006-01-02 15:04:05"))
|
||||
}
|
||||
fmt.Printf("Target Database: %s\n", targetDB)
|
||||
fmt.Printf("Target Host: %s:%d\n", e.cfg.Host, e.cfg.Port)
|
||||
|
||||
fmt.Println("\nOperations that would be performed:")
|
||||
switch format {
|
||||
case FormatPostgreSQLDump:
|
||||
fmt.Printf(" 1. Execute: pg_restore -d %s %s\n", targetDB, archivePath)
|
||||
case FormatPostgreSQLDumpGz:
|
||||
fmt.Printf(" 1. Decompress: %s\n", archivePath)
|
||||
fmt.Printf(" 2. Execute: pg_restore -d %s\n", targetDB)
|
||||
case FormatPostgreSQLSQL, FormatPostgreSQLSQLGz:
|
||||
fmt.Printf(" 1. Execute: psql -d %s -f %s\n", targetDB, archivePath)
|
||||
case FormatMySQLSQL, FormatMySQLSQLGz:
|
||||
fmt.Printf(" 1. Execute: mysql %s < %s\n", targetDB, archivePath)
|
||||
}
|
||||
|
||||
fmt.Println("\n⚠️ WARNING: This will restore data to the target database.")
|
||||
fmt.Println(" Existing data may be overwritten or merged.")
|
||||
fmt.Println("\nTo execute this restore, add the --confirm flag.")
|
||||
fmt.Println(strings.Repeat("=", 60) + "\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestoreCluster restores a full cluster from a tar.gz archive
|
||||
func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
||||
operation := e.log.StartOperation("Cluster Restore")
|
||||
|
||||
// Validate archive
|
||||
if _, err := os.Stat(archivePath); os.IsNotExist(err) {
|
||||
operation.Fail("Archive not found")
|
||||
return fmt.Errorf("archive not found: %s", archivePath)
|
||||
}
|
||||
|
||||
format := DetectArchiveFormat(archivePath)
|
||||
if format != FormatClusterTarGz {
|
||||
operation.Fail("Invalid cluster archive format")
|
||||
return fmt.Errorf("not a cluster archive: %s (detected format: %s)", archivePath, format)
|
||||
}
|
||||
|
||||
if e.dryRun {
|
||||
e.log.Info("DRY RUN: Would restore cluster", "archive", archivePath)
|
||||
return e.previewClusterRestore(archivePath)
|
||||
}
|
||||
|
||||
e.progress.Start(fmt.Sprintf("Restoring cluster from %s", filepath.Base(archivePath)))
|
||||
|
||||
// Create temporary extraction directory
|
||||
tempDir := filepath.Join(e.cfg.BackupDir, fmt.Sprintf(".restore_%d", time.Now().Unix()))
|
||||
if err := os.MkdirAll(tempDir, 0755); err != nil {
|
||||
operation.Fail("Failed to create temporary directory")
|
||||
return fmt.Errorf("failed to create temp directory: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Extract archive
|
||||
e.log.Info("Extracting cluster archive", "archive", archivePath, "tempDir", tempDir)
|
||||
if err := e.extractArchive(ctx, archivePath, tempDir); err != nil {
|
||||
operation.Fail("Archive extraction failed")
|
||||
return fmt.Errorf("failed to extract archive: %w", err)
|
||||
}
|
||||
|
||||
// Restore global objects (roles, tablespaces)
|
||||
globalsFile := filepath.Join(tempDir, "globals.sql")
|
||||
if _, err := os.Stat(globalsFile); err == nil {
|
||||
e.log.Info("Restoring global objects")
|
||||
e.progress.Update("Restoring global objects (roles, tablespaces)...")
|
||||
if err := e.restoreGlobals(ctx, globalsFile); err != nil {
|
||||
e.log.Warn("Failed to restore global objects", "error", err)
|
||||
// Continue anyway - global objects might already exist
|
||||
}
|
||||
}
|
||||
|
||||
// Restore individual databases
|
||||
dumpsDir := filepath.Join(tempDir, "dumps")
|
||||
if _, err := os.Stat(dumpsDir); err != nil {
|
||||
operation.Fail("No database dumps found in archive")
|
||||
return fmt.Errorf("no database dumps found in archive")
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(dumpsDir)
|
||||
if err != nil {
|
||||
operation.Fail("Failed to read dumps directory")
|
||||
return fmt.Errorf("failed to read dumps directory: %w", err)
|
||||
}
|
||||
|
||||
successCount := 0
|
||||
failCount := 0
|
||||
|
||||
for i, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
dumpFile := filepath.Join(dumpsDir, entry.Name())
|
||||
dbName := strings.TrimSuffix(entry.Name(), ".dump")
|
||||
|
||||
e.progress.Update(fmt.Sprintf("[%d/%d] Restoring database: %s", i+1, len(entries), dbName))
|
||||
e.log.Info("Restoring database", "name", dbName, "file", dumpFile)
|
||||
|
||||
if err := e.restorePostgreSQLDump(ctx, dumpFile, dbName, false, false); err != nil {
|
||||
e.log.Error("Failed to restore database", "name", dbName, "error", err)
|
||||
failCount++
|
||||
continue
|
||||
}
|
||||
|
||||
successCount++
|
||||
}
|
||||
|
||||
if failCount > 0 {
|
||||
e.progress.Fail(fmt.Sprintf("Cluster restore completed with errors: %d succeeded, %d failed", successCount, failCount))
|
||||
operation.Complete(fmt.Sprintf("Partial restore: %d succeeded, %d failed", successCount, failCount))
|
||||
return fmt.Errorf("cluster restore completed with %d failures", failCount)
|
||||
}
|
||||
|
||||
e.progress.Complete(fmt.Sprintf("Cluster restored successfully: %d databases", successCount))
|
||||
operation.Complete(fmt.Sprintf("Restored %d databases from cluster archive", successCount))
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractArchive extracts a tar.gz archive
|
||||
func (e *Engine) extractArchive(ctx context.Context, archivePath, destDir string) error {
|
||||
cmd := exec.CommandContext(ctx, "tar", "-xzf", archivePath, "-C", destDir)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("tar extraction failed: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// restoreGlobals restores global objects (roles, tablespaces)
|
||||
func (e *Engine) restoreGlobals(ctx context.Context, globalsFile string) error {
|
||||
cmd := exec.CommandContext(ctx,
|
||||
"psql",
|
||||
"-h", e.cfg.Host,
|
||||
"-p", fmt.Sprintf("%d", e.cfg.Port),
|
||||
"-U", e.cfg.User,
|
||||
"-d", "postgres",
|
||||
"-f", globalsFile,
|
||||
)
|
||||
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("PGPASSWORD=%s", e.cfg.Password))
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to restore globals: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// previewClusterRestore shows cluster restore preview
|
||||
func (e *Engine) previewClusterRestore(archivePath string) error {
|
||||
fmt.Println("\n" + strings.Repeat("=", 60))
|
||||
fmt.Println(" CLUSTER RESTORE PREVIEW (DRY RUN)")
|
||||
fmt.Println(strings.Repeat("=", 60))
|
||||
|
||||
stat, _ := os.Stat(archivePath)
|
||||
fmt.Printf("\nArchive: %s\n", filepath.Base(archivePath))
|
||||
if stat != nil {
|
||||
fmt.Printf("Size: %s\n", FormatBytes(stat.Size()))
|
||||
fmt.Printf("Modified: %s\n", stat.ModTime().Format("2006-01-02 15:04:05"))
|
||||
}
|
||||
fmt.Printf("Target Host: %s:%d\n", e.cfg.Host, e.cfg.Port)
|
||||
|
||||
fmt.Println("\nOperations that would be performed:")
|
||||
fmt.Println(" 1. Extract cluster archive to temporary directory")
|
||||
fmt.Println(" 2. Restore global objects (roles, tablespaces)")
|
||||
fmt.Println(" 3. Restore all databases found in archive")
|
||||
fmt.Println(" 4. Cleanup temporary files")
|
||||
|
||||
fmt.Println("\n⚠️ WARNING: This will restore multiple databases.")
|
||||
fmt.Println(" Existing databases may be overwritten or merged.")
|
||||
fmt.Println("\nTo execute this restore, add the --confirm flag.")
|
||||
fmt.Println(strings.Repeat("=", 60) + "\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FormatBytes formats bytes to human readable format
|
||||
func FormatBytes(bytes int64) string {
|
||||
const unit = 1024
|
||||
if bytes < unit {
|
||||
return fmt.Sprintf("%d B", bytes)
|
||||
}
|
||||
div, exp := int64(unit), 0
|
||||
for n := bytes / unit; n >= unit; n /= unit {
|
||||
div *= unit
|
||||
exp++
|
||||
}
|
||||
return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp])
|
||||
}
|
||||
110
internal/restore/formats.go
Normal file
110
internal/restore/formats.go
Normal file
@ -0,0 +1,110 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ArchiveFormat represents the type of backup archive
|
||||
type ArchiveFormat string
|
||||
|
||||
const (
|
||||
FormatPostgreSQLDump ArchiveFormat = "PostgreSQL Dump (.dump)"
|
||||
FormatPostgreSQLDumpGz ArchiveFormat = "PostgreSQL Dump Compressed (.dump.gz)"
|
||||
FormatPostgreSQLSQL ArchiveFormat = "PostgreSQL SQL (.sql)"
|
||||
FormatPostgreSQLSQLGz ArchiveFormat = "PostgreSQL SQL Compressed (.sql.gz)"
|
||||
FormatMySQLSQL ArchiveFormat = "MySQL SQL (.sql)"
|
||||
FormatMySQLSQLGz ArchiveFormat = "MySQL SQL Compressed (.sql.gz)"
|
||||
FormatClusterTarGz ArchiveFormat = "Cluster Archive (.tar.gz)"
|
||||
FormatUnknown ArchiveFormat = "Unknown"
|
||||
)
|
||||
|
||||
// DetectArchiveFormat detects the format of a backup archive from its filename
|
||||
func DetectArchiveFormat(filename string) ArchiveFormat {
|
||||
lower := strings.ToLower(filename)
|
||||
|
||||
// Check for cluster archives first (most specific)
|
||||
if strings.Contains(lower, "cluster") && strings.HasSuffix(lower, ".tar.gz") {
|
||||
return FormatClusterTarGz
|
||||
}
|
||||
|
||||
// Check for compressed formats
|
||||
if strings.HasSuffix(lower, ".dump.gz") {
|
||||
return FormatPostgreSQLDumpGz
|
||||
}
|
||||
|
||||
if strings.HasSuffix(lower, ".sql.gz") {
|
||||
// Determine if MySQL or PostgreSQL based on naming convention
|
||||
if strings.Contains(lower, "mysql") || strings.Contains(lower, "mariadb") {
|
||||
return FormatMySQLSQLGz
|
||||
}
|
||||
return FormatPostgreSQLSQLGz
|
||||
}
|
||||
|
||||
// Check for uncompressed formats
|
||||
if strings.HasSuffix(lower, ".dump") {
|
||||
return FormatPostgreSQLDump
|
||||
}
|
||||
|
||||
if strings.HasSuffix(lower, ".sql") {
|
||||
// Determine if MySQL or PostgreSQL based on naming convention
|
||||
if strings.Contains(lower, "mysql") || strings.Contains(lower, "mariadb") {
|
||||
return FormatMySQLSQL
|
||||
}
|
||||
return FormatPostgreSQLSQL
|
||||
}
|
||||
|
||||
if strings.HasSuffix(lower, ".tar.gz") || strings.HasSuffix(lower, ".tgz") {
|
||||
return FormatClusterTarGz
|
||||
}
|
||||
|
||||
return FormatUnknown
|
||||
}
|
||||
|
||||
// IsCompressed returns true if the archive format is compressed
|
||||
func (f ArchiveFormat) IsCompressed() bool {
|
||||
return f == FormatPostgreSQLDumpGz ||
|
||||
f == FormatPostgreSQLSQLGz ||
|
||||
f == FormatMySQLSQLGz ||
|
||||
f == FormatClusterTarGz
|
||||
}
|
||||
|
||||
// IsClusterBackup returns true if the archive is a cluster backup
|
||||
func (f ArchiveFormat) IsClusterBackup() bool {
|
||||
return f == FormatClusterTarGz
|
||||
}
|
||||
|
||||
// IsPostgreSQL returns true if the archive is PostgreSQL format
|
||||
func (f ArchiveFormat) IsPostgreSQL() bool {
|
||||
return f == FormatPostgreSQLDump ||
|
||||
f == FormatPostgreSQLDumpGz ||
|
||||
f == FormatPostgreSQLSQL ||
|
||||
f == FormatPostgreSQLSQLGz ||
|
||||
f == FormatClusterTarGz
|
||||
}
|
||||
|
||||
// IsMySQL returns true if format is MySQL
|
||||
func (f ArchiveFormat) IsMySQL() bool {
|
||||
return f == FormatMySQLSQL || f == FormatMySQLSQLGz
|
||||
}
|
||||
|
||||
// String returns human-readable format name
|
||||
func (f ArchiveFormat) String() string {
|
||||
switch f {
|
||||
case FormatPostgreSQLDump:
|
||||
return "PostgreSQL Dump"
|
||||
case FormatPostgreSQLDumpGz:
|
||||
return "PostgreSQL Dump (gzip)"
|
||||
case FormatPostgreSQLSQL:
|
||||
return "PostgreSQL SQL"
|
||||
case FormatPostgreSQLSQLGz:
|
||||
return "PostgreSQL SQL (gzip)"
|
||||
case FormatMySQLSQL:
|
||||
return "MySQL SQL"
|
||||
case FormatMySQLSQLGz:
|
||||
return "MySQL SQL (gzip)"
|
||||
case FormatClusterTarGz:
|
||||
return "Cluster Archive (tar.gz)"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
342
internal/restore/safety.go
Normal file
342
internal/restore/safety.go
Normal file
@ -0,0 +1,342 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"dbbackup/internal/config"
|
||||
"dbbackup/internal/logger"
|
||||
)
|
||||
|
||||
// Safety provides pre-restore validation and safety checks
|
||||
type Safety struct {
|
||||
cfg *config.Config
|
||||
log logger.Logger
|
||||
}
|
||||
|
||||
// NewSafety creates a new safety checker
|
||||
func NewSafety(cfg *config.Config, log logger.Logger) *Safety {
|
||||
return &Safety{
|
||||
cfg: cfg,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateArchive performs integrity checks on the archive
|
||||
func (s *Safety) ValidateArchive(archivePath string) error {
|
||||
// Check if file exists
|
||||
stat, err := os.Stat(archivePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("archive not accessible: %w", err)
|
||||
}
|
||||
|
||||
// Check if file is not empty
|
||||
if stat.Size() == 0 {
|
||||
return fmt.Errorf("archive is empty")
|
||||
}
|
||||
|
||||
// Check if file is too small (likely corrupted)
|
||||
if stat.Size() < 100 {
|
||||
return fmt.Errorf("archive is suspiciously small (%d bytes)", stat.Size())
|
||||
}
|
||||
|
||||
// Detect format
|
||||
format := DetectArchiveFormat(archivePath)
|
||||
if format == FormatUnknown {
|
||||
return fmt.Errorf("unknown archive format: %s", archivePath)
|
||||
}
|
||||
|
||||
// Validate based on format
|
||||
switch format {
|
||||
case FormatPostgreSQLDump:
|
||||
return s.validatePgDump(archivePath)
|
||||
case FormatPostgreSQLDumpGz:
|
||||
return s.validatePgDumpGz(archivePath)
|
||||
case FormatPostgreSQLSQL, FormatMySQLSQL:
|
||||
return s.validateSQLScript(archivePath)
|
||||
case FormatPostgreSQLSQLGz, FormatMySQLSQLGz:
|
||||
return s.validateSQLScriptGz(archivePath)
|
||||
case FormatClusterTarGz:
|
||||
return s.validateTarGz(archivePath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validatePgDump validates PostgreSQL dump file
|
||||
func (s *Safety) validatePgDump(path string) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Read first 512 bytes for signature check
|
||||
buffer := make([]byte, 512)
|
||||
n, err := file.Read(buffer)
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("cannot read file: %w", err)
|
||||
}
|
||||
|
||||
if n < 5 {
|
||||
return fmt.Errorf("file too small to validate")
|
||||
}
|
||||
|
||||
// Check for PGDMP signature
|
||||
if string(buffer[:5]) == "PGDMP" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check for PostgreSQL dump indicators
|
||||
content := strings.ToLower(string(buffer[:n]))
|
||||
if strings.Contains(content, "postgresql") || strings.Contains(content, "pg_dump") {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("does not appear to be a PostgreSQL dump file")
|
||||
}
|
||||
|
||||
// validatePgDumpGz validates compressed PostgreSQL dump
|
||||
func (s *Safety) validatePgDumpGz(path string) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Open gzip reader
|
||||
gz, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("not a valid gzip file: %w", err)
|
||||
}
|
||||
defer gz.Close()
|
||||
|
||||
// Read first 512 bytes
|
||||
buffer := make([]byte, 512)
|
||||
n, err := gz.Read(buffer)
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("cannot read gzip contents: %w", err)
|
||||
}
|
||||
|
||||
if n < 5 {
|
||||
return fmt.Errorf("gzip archive too small")
|
||||
}
|
||||
|
||||
// Check for PGDMP signature
|
||||
if string(buffer[:5]) == "PGDMP" {
|
||||
return nil
|
||||
}
|
||||
|
||||
content := strings.ToLower(string(buffer[:n]))
|
||||
if strings.Contains(content, "postgresql") || strings.Contains(content, "pg_dump") {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("does not appear to be a PostgreSQL dump file")
|
||||
}
|
||||
|
||||
// validateSQLScript validates SQL script
|
||||
func (s *Safety) validateSQLScript(path string) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
buffer := make([]byte, 1024)
|
||||
n, err := file.Read(buffer)
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("cannot read file: %w", err)
|
||||
}
|
||||
|
||||
content := strings.ToLower(string(buffer[:n]))
|
||||
if containsSQLKeywords(content) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("does not appear to contain SQL content")
|
||||
}
|
||||
|
||||
// validateSQLScriptGz validates compressed SQL script
|
||||
func (s *Safety) validateSQLScriptGz(path string) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
gz, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("not a valid gzip file: %w", err)
|
||||
}
|
||||
defer gz.Close()
|
||||
|
||||
buffer := make([]byte, 1024)
|
||||
n, err := gz.Read(buffer)
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("cannot read gzip contents: %w", err)
|
||||
}
|
||||
|
||||
content := strings.ToLower(string(buffer[:n]))
|
||||
if containsSQLKeywords(content) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("does not appear to contain SQL content")
|
||||
}
|
||||
|
||||
// validateTarGz validates tar.gz archive
|
||||
func (s *Safety) validateTarGz(path string) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Check gzip magic number
|
||||
buffer := make([]byte, 3)
|
||||
n, err := file.Read(buffer)
|
||||
if err != nil || n < 3 {
|
||||
return fmt.Errorf("cannot read file header")
|
||||
}
|
||||
|
||||
if buffer[0] == 0x1f && buffer[1] == 0x8b {
|
||||
return nil // Valid gzip header
|
||||
}
|
||||
|
||||
return fmt.Errorf("not a valid gzip file")
|
||||
}
|
||||
|
||||
// containsSQLKeywords checks if content contains SQL keywords
|
||||
func containsSQLKeywords(content string) bool {
|
||||
keywords := []string{
|
||||
"select", "insert", "create", "drop", "alter",
|
||||
"database", "table", "update", "delete", "from", "where",
|
||||
}
|
||||
|
||||
for _, keyword := range keywords {
|
||||
if strings.Contains(content, keyword) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// CheckDiskSpace verifies sufficient disk space for restore
|
||||
func (s *Safety) CheckDiskSpace(archivePath string, multiplier float64) error {
|
||||
// Get archive size
|
||||
stat, err := os.Stat(archivePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot stat archive: %w", err)
|
||||
}
|
||||
|
||||
archiveSize := stat.Size()
|
||||
|
||||
// Estimate required space (archive size * multiplier for decompression/extraction)
|
||||
requiredSpace := int64(float64(archiveSize) * multiplier)
|
||||
|
||||
// Get available disk space
|
||||
var statfs syscall.Statfs_t
|
||||
if err := syscall.Statfs(s.cfg.BackupDir, &statfs); err != nil {
|
||||
s.log.Warn("Cannot check disk space", "error", err)
|
||||
return nil // Don't fail if we can't check
|
||||
}
|
||||
|
||||
availableSpace := int64(statfs.Bavail) * statfs.Bsize
|
||||
|
||||
if availableSpace < requiredSpace {
|
||||
return fmt.Errorf("insufficient disk space: need %s, have %s",
|
||||
FormatBytes(requiredSpace), FormatBytes(availableSpace))
|
||||
}
|
||||
|
||||
s.log.Info("Disk space check passed",
|
||||
"required", FormatBytes(requiredSpace),
|
||||
"available", FormatBytes(availableSpace))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyTools checks if required restore tools are available
|
||||
func (s *Safety) VerifyTools(dbType string) error {
|
||||
var tools []string
|
||||
|
||||
if dbType == "postgres" {
|
||||
tools = []string{"pg_restore", "psql"}
|
||||
} else if dbType == "mysql" || dbType == "mariadb" {
|
||||
tools = []string{"mysql"}
|
||||
}
|
||||
|
||||
missing := []string{}
|
||||
for _, tool := range tools {
|
||||
if _, err := exec.LookPath(tool); err != nil {
|
||||
missing = append(missing, tool)
|
||||
}
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
return fmt.Errorf("missing required tools: %s", strings.Join(missing, ", "))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckDatabaseExists verifies if target database exists
|
||||
func (s *Safety) CheckDatabaseExists(ctx context.Context, dbName string) (bool, error) {
|
||||
if s.cfg.DatabaseType == "postgres" {
|
||||
return s.checkPostgresDatabaseExists(ctx, dbName)
|
||||
} else if s.cfg.DatabaseType == "mysql" || s.cfg.DatabaseType == "mariadb" {
|
||||
return s.checkMySQLDatabaseExists(ctx, dbName)
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("unsupported database type: %s", s.cfg.DatabaseType)
|
||||
}
|
||||
|
||||
// checkPostgresDatabaseExists checks if PostgreSQL database exists
|
||||
func (s *Safety) checkPostgresDatabaseExists(ctx context.Context, dbName string) (bool, error) {
|
||||
cmd := exec.CommandContext(ctx,
|
||||
"psql",
|
||||
"-h", s.cfg.Host,
|
||||
"-p", fmt.Sprintf("%d", s.cfg.Port),
|
||||
"-U", s.cfg.User,
|
||||
"-d", "postgres",
|
||||
"-tAc", fmt.Sprintf("SELECT 1 FROM pg_database WHERE datname='%s'", dbName),
|
||||
)
|
||||
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("PGPASSWORD=%s", s.cfg.Password))
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to check database existence: %w", err)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(output)) == "1", nil
|
||||
}
|
||||
|
||||
// checkMySQLDatabaseExists checks if MySQL database exists
|
||||
func (s *Safety) checkMySQLDatabaseExists(ctx context.Context, dbName string) (bool, error) {
|
||||
cmd := exec.CommandContext(ctx,
|
||||
"mysql",
|
||||
"-h", s.cfg.Host,
|
||||
"-P", fmt.Sprintf("%d", s.cfg.Port),
|
||||
"-u", s.cfg.User,
|
||||
"-e", fmt.Sprintf("SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME='%s'", dbName),
|
||||
)
|
||||
|
||||
if s.cfg.Password != "" {
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("MYSQL_PWD=%s", s.cfg.Password))
|
||||
}
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to check database existence: %w", err)
|
||||
}
|
||||
|
||||
return strings.Contains(string(output), dbName), nil
|
||||
}
|
||||
411
internal/tui/archive_browser.go
Normal file
411
internal/tui/archive_browser.go
Normal file
@ -0,0 +1,411 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
|
||||
"dbbackup/internal/config"
|
||||
"dbbackup/internal/logger"
|
||||
"dbbackup/internal/restore"
|
||||
)
|
||||
|
||||
var (
|
||||
archiveHeaderStyle = lipgloss.NewStyle().
|
||||
Bold(true).
|
||||
Foreground(lipgloss.Color("#7D56F4"))
|
||||
|
||||
archiveSelectedStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#FF75B7")).
|
||||
Bold(true)
|
||||
|
||||
archiveNormalStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#626262"))
|
||||
|
||||
archiveValidStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#04B575"))
|
||||
|
||||
archiveInvalidStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#FF6B6B"))
|
||||
|
||||
archiveOldStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#FFA500"))
|
||||
)
|
||||
|
||||
// ArchiveInfo holds information about a backup archive
|
||||
type ArchiveInfo struct {
|
||||
Name string
|
||||
Path string
|
||||
Format restore.ArchiveFormat
|
||||
Size int64
|
||||
Modified time.Time
|
||||
DatabaseName string
|
||||
Valid bool
|
||||
ValidationMsg string
|
||||
}
|
||||
|
||||
// ArchiveBrowserModel for browsing and selecting backup archives
|
||||
type ArchiveBrowserModel struct {
|
||||
config *config.Config
|
||||
logger logger.Logger
|
||||
parent tea.Model
|
||||
archives []ArchiveInfo
|
||||
cursor int
|
||||
loading bool
|
||||
err error
|
||||
mode string // "restore-single", "restore-cluster", "manage"
|
||||
filterType string // "all", "postgres", "mysql", "cluster"
|
||||
message string
|
||||
}
|
||||
|
||||
// NewArchiveBrowser creates a new archive browser
|
||||
func NewArchiveBrowser(cfg *config.Config, log logger.Logger, parent tea.Model, mode string) ArchiveBrowserModel {
|
||||
return ArchiveBrowserModel{
|
||||
config: cfg,
|
||||
logger: log,
|
||||
parent: parent,
|
||||
loading: true,
|
||||
mode: mode,
|
||||
filterType: "all",
|
||||
}
|
||||
}
|
||||
|
||||
func (m ArchiveBrowserModel) Init() tea.Cmd {
|
||||
return loadArchives(m.config, m.logger)
|
||||
}
|
||||
|
||||
type archiveListMsg struct {
|
||||
archives []ArchiveInfo
|
||||
err error
|
||||
}
|
||||
|
||||
func loadArchives(cfg *config.Config, log logger.Logger) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
backupDir := cfg.BackupDir
|
||||
|
||||
// Check if backup directory exists
|
||||
if _, err := os.Stat(backupDir); err != nil {
|
||||
return archiveListMsg{archives: nil, err: fmt.Errorf("backup directory not found: %s", backupDir)}
|
||||
}
|
||||
|
||||
// List all files
|
||||
files, err := os.ReadDir(backupDir)
|
||||
if err != nil {
|
||||
return archiveListMsg{archives: nil, err: fmt.Errorf("cannot read backup directory: %w", err)}
|
||||
}
|
||||
|
||||
var archives []ArchiveInfo
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
name := file.Name()
|
||||
format := restore.DetectArchiveFormat(name)
|
||||
|
||||
if format == restore.FormatUnknown {
|
||||
continue // Skip non-backup files
|
||||
}
|
||||
|
||||
info, _ := file.Info()
|
||||
fullPath := filepath.Join(backupDir, name)
|
||||
|
||||
// Extract database name
|
||||
dbName := extractDBNameFromFilename(name)
|
||||
|
||||
// Basic validation (just check if file is readable)
|
||||
valid := true
|
||||
validationMsg := "Valid"
|
||||
if info.Size() == 0 {
|
||||
valid = false
|
||||
validationMsg = "Empty file"
|
||||
}
|
||||
|
||||
archives = append(archives, ArchiveInfo{
|
||||
Name: name,
|
||||
Path: fullPath,
|
||||
Format: format,
|
||||
Size: info.Size(),
|
||||
Modified: info.ModTime(),
|
||||
DatabaseName: dbName,
|
||||
Valid: valid,
|
||||
ValidationMsg: validationMsg,
|
||||
})
|
||||
}
|
||||
|
||||
// Sort by modification time (newest first)
|
||||
sort.Slice(archives, func(i, j int) bool {
|
||||
return archives[i].Modified.After(archives[j].Modified)
|
||||
})
|
||||
|
||||
return archiveListMsg{archives: archives, err: nil}
|
||||
}
|
||||
}
|
||||
|
||||
func (m ArchiveBrowserModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
switch msg := msg.(type) {
|
||||
case archiveListMsg:
|
||||
m.loading = false
|
||||
if msg.err != nil {
|
||||
m.err = msg.err
|
||||
return m, nil
|
||||
}
|
||||
m.archives = m.filterArchives(msg.archives)
|
||||
if len(m.archives) == 0 {
|
||||
m.message = "No backup archives found"
|
||||
}
|
||||
return m, nil
|
||||
|
||||
case tea.KeyMsg:
|
||||
switch msg.String() {
|
||||
case "ctrl+c", "q", "esc":
|
||||
return m.parent, nil
|
||||
|
||||
case "up", "k":
|
||||
if m.cursor > 0 {
|
||||
m.cursor--
|
||||
}
|
||||
|
||||
case "down", "j":
|
||||
if m.cursor < len(m.archives)-1 {
|
||||
m.cursor++
|
||||
}
|
||||
|
||||
case "f":
|
||||
// Toggle filter
|
||||
filters := []string{"all", "postgres", "mysql", "cluster"}
|
||||
for i, f := range filters {
|
||||
if f == m.filterType {
|
||||
m.filterType = filters[(i+1)%len(filters)]
|
||||
break
|
||||
}
|
||||
}
|
||||
m.cursor = 0
|
||||
return m, loadArchives(m.config, m.logger)
|
||||
|
||||
case "enter", " ":
|
||||
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
||||
selected := m.archives[m.cursor]
|
||||
|
||||
// Validate selection based on mode
|
||||
if m.mode == "restore-cluster" && !selected.Format.IsClusterBackup() {
|
||||
m.message = errorStyle.Render("❌ Please select a cluster backup (.tar.gz)")
|
||||
return m, nil
|
||||
}
|
||||
|
||||
if m.mode == "restore-single" && selected.Format.IsClusterBackup() {
|
||||
m.message = errorStyle.Render("❌ Please select a single database backup")
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Open restore preview
|
||||
preview := NewRestorePreview(m.config, m.logger, m.parent, selected, m.mode)
|
||||
return preview, preview.Init()
|
||||
}
|
||||
|
||||
case "i":
|
||||
// Show detailed info
|
||||
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
||||
selected := m.archives[m.cursor]
|
||||
m.message = fmt.Sprintf("📦 %s | Format: %s | Size: %s | Modified: %s",
|
||||
selected.Name,
|
||||
selected.Format.String(),
|
||||
formatSize(selected.Size),
|
||||
selected.Modified.Format("2006-01-02 15:04:05"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m ArchiveBrowserModel) View() string {
|
||||
var s strings.Builder
|
||||
|
||||
// Header
|
||||
title := "📦 Backup Archives"
|
||||
if m.mode == "restore-single" {
|
||||
title = "📦 Select Archive to Restore (Single Database)"
|
||||
} else if m.mode == "restore-cluster" {
|
||||
title = "📦 Select Archive to Restore (Cluster)"
|
||||
}
|
||||
|
||||
s.WriteString(titleStyle.Render(title))
|
||||
s.WriteString("\n\n")
|
||||
|
||||
if m.loading {
|
||||
s.WriteString(infoStyle.Render("Loading archives..."))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
if m.err != nil {
|
||||
s.WriteString(errorStyle.Render(fmt.Sprintf("❌ Error: %v", m.err)))
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(infoStyle.Render("Press Esc to go back"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Filter info
|
||||
filterLabel := "Filter: " + m.filterType
|
||||
s.WriteString(infoStyle.Render(filterLabel))
|
||||
s.WriteString(infoStyle.Render(" (Press 'f' to change filter)"))
|
||||
s.WriteString("\n\n")
|
||||
|
||||
// Archives list
|
||||
if len(m.archives) == 0 {
|
||||
s.WriteString(infoStyle.Render(m.message))
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(infoStyle.Render("Press Esc to go back"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Column headers
|
||||
s.WriteString(archiveHeaderStyle.Render(fmt.Sprintf("%-40s %-25s %-12s %-20s",
|
||||
"FILENAME", "FORMAT", "SIZE", "MODIFIED")))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(strings.Repeat("─", 100))
|
||||
s.WriteString("\n")
|
||||
|
||||
// Show archives (limit to visible area)
|
||||
start := m.cursor - 5
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
end := start + 10
|
||||
if end > len(m.archives) {
|
||||
end = len(m.archives)
|
||||
}
|
||||
|
||||
for i := start; i < end; i++ {
|
||||
archive := m.archives[i]
|
||||
cursor := " "
|
||||
style := archiveNormalStyle
|
||||
|
||||
if i == m.cursor {
|
||||
cursor = ">"
|
||||
style = archiveSelectedStyle
|
||||
}
|
||||
|
||||
// Color code based on validity and age
|
||||
statusIcon := "✓"
|
||||
if !archive.Valid {
|
||||
statusIcon = "✗"
|
||||
style = archiveInvalidStyle
|
||||
} else if time.Since(archive.Modified) > 30*24*time.Hour {
|
||||
style = archiveOldStyle
|
||||
statusIcon = "⚠"
|
||||
}
|
||||
|
||||
filename := truncate(archive.Name, 38)
|
||||
format := truncate(archive.Format.String(), 23)
|
||||
|
||||
line := fmt.Sprintf("%s %s %-38s %-23s %-10s %-19s",
|
||||
cursor,
|
||||
statusIcon,
|
||||
filename,
|
||||
format,
|
||||
formatSize(archive.Size),
|
||||
archive.Modified.Format("2006-01-02 15:04"))
|
||||
|
||||
s.WriteString(style.Render(line))
|
||||
s.WriteString("\n")
|
||||
}
|
||||
|
||||
// Footer
|
||||
s.WriteString("\n")
|
||||
if m.message != "" {
|
||||
s.WriteString(m.message)
|
||||
s.WriteString("\n")
|
||||
}
|
||||
|
||||
s.WriteString(infoStyle.Render(fmt.Sprintf("Total: %d archive(s) | Selected: %d/%d",
|
||||
len(m.archives), m.cursor+1, len(m.archives))))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(infoStyle.Render("⌨️ ↑/↓: Navigate | Enter: Select | f: Filter | i: Info | Esc: Back"))
|
||||
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// filterArchives filters archives based on current filter setting
|
||||
func (m ArchiveBrowserModel) filterArchives(archives []ArchiveInfo) []ArchiveInfo {
|
||||
if m.filterType == "all" {
|
||||
return archives
|
||||
}
|
||||
|
||||
var filtered []ArchiveInfo
|
||||
for _, archive := range archives {
|
||||
switch m.filterType {
|
||||
case "postgres":
|
||||
if archive.Format.IsPostgreSQL() && !archive.Format.IsClusterBackup() {
|
||||
filtered = append(filtered, archive)
|
||||
}
|
||||
case "mysql":
|
||||
if archive.Format.IsMySQL() {
|
||||
filtered = append(filtered, archive)
|
||||
}
|
||||
case "cluster":
|
||||
if archive.Format.IsClusterBackup() {
|
||||
filtered = append(filtered, archive)
|
||||
}
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
// extractDBNameFromFilename extracts database name from archive filename
|
||||
func extractDBNameFromFilename(filename string) string {
|
||||
base := filepath.Base(filename)
|
||||
|
||||
// Remove extensions
|
||||
base = strings.TrimSuffix(base, ".tar.gz")
|
||||
base = strings.TrimSuffix(base, ".dump.gz")
|
||||
base = strings.TrimSuffix(base, ".sql.gz")
|
||||
base = strings.TrimSuffix(base, ".dump")
|
||||
base = strings.TrimSuffix(base, ".sql")
|
||||
|
||||
// Remove timestamp patterns (YYYYMMDD_HHMMSS)
|
||||
parts := strings.Split(base, "_")
|
||||
for i := len(parts) - 1; i >= 0; i-- {
|
||||
// Check if part looks like a date or time
|
||||
if len(parts[i]) == 8 || len(parts[i]) == 6 {
|
||||
parts = parts[:i]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(parts) > 0 {
|
||||
return parts[0]
|
||||
}
|
||||
|
||||
return base
|
||||
}
|
||||
|
||||
// formatSize formats file size
|
||||
func formatSize(bytes int64) string {
|
||||
const unit = 1024
|
||||
if bytes < unit {
|
||||
return fmt.Sprintf("%d B", bytes)
|
||||
}
|
||||
div, exp := int64(unit), 0
|
||||
for n := bytes / unit; n >= unit; n /= unit {
|
||||
div *= unit
|
||||
exp++
|
||||
}
|
||||
return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp])
|
||||
}
|
||||
|
||||
// truncate truncates string to max length
|
||||
func truncate(s string, max int) string {
|
||||
if len(s) <= max {
|
||||
return s
|
||||
}
|
||||
return s[:max-3] + "..."
|
||||
}
|
||||
230
internal/tui/backup_manager.go
Normal file
230
internal/tui/backup_manager.go
Normal file
@ -0,0 +1,230 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
|
||||
"dbbackup/internal/config"
|
||||
"dbbackup/internal/logger"
|
||||
)
|
||||
|
||||
// BackupManagerModel manages backup archives
|
||||
type BackupManagerModel struct {
|
||||
config *config.Config
|
||||
logger logger.Logger
|
||||
parent tea.Model
|
||||
archives []ArchiveInfo
|
||||
cursor int
|
||||
loading bool
|
||||
err error
|
||||
message string
|
||||
totalSize int64
|
||||
freeSpace int64
|
||||
}
|
||||
|
||||
// NewBackupManager creates a new backup manager
|
||||
func NewBackupManager(cfg *config.Config, log logger.Logger, parent tea.Model) BackupManagerModel {
|
||||
return BackupManagerModel{
|
||||
config: cfg,
|
||||
logger: log,
|
||||
parent: parent,
|
||||
loading: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (m BackupManagerModel) Init() tea.Cmd {
|
||||
return loadArchives(m.config, m.logger)
|
||||
}
|
||||
|
||||
func (m BackupManagerModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
switch msg := msg.(type) {
|
||||
case archiveListMsg:
|
||||
m.loading = false
|
||||
if msg.err != nil {
|
||||
m.err = msg.err
|
||||
return m, nil
|
||||
}
|
||||
m.archives = msg.archives
|
||||
|
||||
// Calculate total size
|
||||
m.totalSize = 0
|
||||
for _, archive := range m.archives {
|
||||
m.totalSize += archive.Size
|
||||
}
|
||||
|
||||
// Get free space (simplified - just show message)
|
||||
m.message = fmt.Sprintf("Loaded %d archive(s)", len(m.archives))
|
||||
return m, nil
|
||||
|
||||
case tea.KeyMsg:
|
||||
switch msg.String() {
|
||||
case "ctrl+c", "q", "esc":
|
||||
return m.parent, nil
|
||||
|
||||
case "up", "k":
|
||||
if m.cursor > 0 {
|
||||
m.cursor--
|
||||
}
|
||||
|
||||
case "down", "j":
|
||||
if m.cursor < len(m.archives)-1 {
|
||||
m.cursor++
|
||||
}
|
||||
|
||||
case "v":
|
||||
// Verify archive
|
||||
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
||||
selected := m.archives[m.cursor]
|
||||
m.message = fmt.Sprintf("🔍 Verifying %s...", selected.Name)
|
||||
// In real implementation, would run verification
|
||||
}
|
||||
|
||||
case "d":
|
||||
// Delete archive (with confirmation)
|
||||
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
||||
selected := m.archives[m.cursor]
|
||||
confirm := NewConfirmationModel(m.config, m.logger, m,
|
||||
"🗑️ Delete Archive",
|
||||
fmt.Sprintf("Delete archive '%s'? This cannot be undone.", selected.Name))
|
||||
return confirm, nil
|
||||
}
|
||||
|
||||
case "i":
|
||||
// Show info
|
||||
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
||||
selected := m.archives[m.cursor]
|
||||
m.message = fmt.Sprintf("📦 %s | %s | %s | Modified: %s",
|
||||
selected.Name,
|
||||
selected.Format.String(),
|
||||
formatSize(selected.Size),
|
||||
selected.Modified.Format("2006-01-02 15:04:05"))
|
||||
}
|
||||
|
||||
case "r":
|
||||
// Restore selected archive
|
||||
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
||||
selected := m.archives[m.cursor]
|
||||
mode := "restore-single"
|
||||
if selected.Format.IsClusterBackup() {
|
||||
mode = "restore-cluster"
|
||||
}
|
||||
preview := NewRestorePreview(m.config, m.logger, m.parent, selected, mode)
|
||||
return preview, preview.Init()
|
||||
}
|
||||
|
||||
case "R":
|
||||
// Refresh list
|
||||
m.loading = true
|
||||
m.message = "Refreshing..."
|
||||
return m, loadArchives(m.config, m.logger)
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m BackupManagerModel) View() string {
|
||||
var s strings.Builder
|
||||
|
||||
// Title
|
||||
s.WriteString(titleStyle.Render("🗄️ Backup Archive Manager"))
|
||||
s.WriteString("\n\n")
|
||||
|
||||
if m.loading {
|
||||
s.WriteString(infoStyle.Render("Loading archives..."))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
if m.err != nil {
|
||||
s.WriteString(errorStyle.Render(fmt.Sprintf("❌ Error: %v", m.err)))
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(infoStyle.Render("Press Esc to go back"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Summary
|
||||
s.WriteString(infoStyle.Render(fmt.Sprintf("Total Archives: %d | Total Size: %s",
|
||||
len(m.archives), formatSize(m.totalSize))))
|
||||
s.WriteString("\n\n")
|
||||
|
||||
// Archives list
|
||||
if len(m.archives) == 0 {
|
||||
s.WriteString(infoStyle.Render("No backup archives found"))
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(infoStyle.Render("Press Esc to go back"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Column headers
|
||||
s.WriteString(archiveHeaderStyle.Render(fmt.Sprintf("%-35s %-25s %-12s %-20s",
|
||||
"FILENAME", "FORMAT", "SIZE", "MODIFIED")))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(strings.Repeat("─", 95))
|
||||
s.WriteString("\n")
|
||||
|
||||
// Show archives (limit to visible area)
|
||||
start := m.cursor - 5
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
end := start + 12
|
||||
if end > len(m.archives) {
|
||||
end = len(m.archives)
|
||||
}
|
||||
|
||||
for i := start; i < end; i++ {
|
||||
archive := m.archives[i]
|
||||
cursor := " "
|
||||
style := archiveNormalStyle
|
||||
|
||||
if i == m.cursor {
|
||||
cursor = ">"
|
||||
style = archiveSelectedStyle
|
||||
}
|
||||
|
||||
// Status icon
|
||||
statusIcon := "✓"
|
||||
if !archive.Valid {
|
||||
statusIcon = "✗"
|
||||
style = archiveInvalidStyle
|
||||
} else if time.Since(archive.Modified) > 30*24*time.Hour {
|
||||
statusIcon = "⚠"
|
||||
}
|
||||
|
||||
filename := truncate(archive.Name, 33)
|
||||
format := truncate(archive.Format.String(), 23)
|
||||
|
||||
line := fmt.Sprintf("%s %s %-33s %-23s %-10s %-19s",
|
||||
cursor,
|
||||
statusIcon,
|
||||
filename,
|
||||
format,
|
||||
formatSize(archive.Size),
|
||||
archive.Modified.Format("2006-01-02 15:04"))
|
||||
|
||||
s.WriteString(style.Render(line))
|
||||
s.WriteString("\n")
|
||||
}
|
||||
|
||||
// Footer
|
||||
s.WriteString("\n")
|
||||
if m.message != "" {
|
||||
s.WriteString(infoStyle.Render(m.message))
|
||||
s.WriteString("\n")
|
||||
}
|
||||
|
||||
s.WriteString(infoStyle.Render(fmt.Sprintf("Selected: %d/%d", m.cursor+1, len(m.archives))))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(infoStyle.Render("⌨️ ↑/↓: Navigate | r: Restore | v: Verify | d: Delete | i: Info | R: Refresh | Esc: Back"))
|
||||
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// deleteArchive deletes a backup archive (to be called from confirmation)
|
||||
func deleteArchive(archivePath string) error {
|
||||
return os.Remove(archivePath)
|
||||
}
|
||||
@ -82,6 +82,11 @@ func NewMenuModel(cfg *config.Config, log logger.Logger) MenuModel {
|
||||
"Single Database Backup",
|
||||
"Sample Database Backup (with ratio)",
|
||||
"Cluster Backup (all databases)",
|
||||
"────────────────────────────────",
|
||||
"Restore Single Database",
|
||||
"Restore Cluster Backup",
|
||||
"List & Manage Backups",
|
||||
"────────────────────────────────",
|
||||
"View Active Operations",
|
||||
"Show Operation History",
|
||||
"Database Status & Health Check",
|
||||
@ -153,17 +158,27 @@ func (m MenuModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
return m.handleSampleBackup()
|
||||
case 2: // Cluster Backup
|
||||
return m.handleClusterBackup()
|
||||
case 3: // View Active Operations
|
||||
case 3: // Separator
|
||||
// Do nothing
|
||||
case 4: // Restore Single Database
|
||||
return m.handleRestoreSingle()
|
||||
case 5: // Restore Cluster Backup
|
||||
return m.handleRestoreCluster()
|
||||
case 6: // List & Manage Backups
|
||||
return m.handleBackupManager()
|
||||
case 7: // Separator
|
||||
// Do nothing
|
||||
case 8: // View Active Operations
|
||||
return m.handleViewOperations()
|
||||
case 4: // Show Operation History
|
||||
case 9: // Show Operation History
|
||||
return m.handleOperationHistory()
|
||||
case 5: // Database Status
|
||||
case 10: // Database Status
|
||||
return m.handleStatus()
|
||||
case 6: // Settings
|
||||
case 11: // Settings
|
||||
return m.handleSettings()
|
||||
case 7: // Clear History
|
||||
case 12: // Clear History
|
||||
m.message = "🗑️ History cleared"
|
||||
case 8: // Quit
|
||||
case 13: // Quit
|
||||
if m.cancel != nil {
|
||||
m.cancel()
|
||||
}
|
||||
@ -281,6 +296,28 @@ func (m MenuModel) handleSettings() (tea.Model, tea.Cmd) {
|
||||
return settingsModel, nil
|
||||
}
|
||||
|
||||
// handleRestoreSingle opens archive browser for single restore
|
||||
func (m MenuModel) handleRestoreSingle() (tea.Model, tea.Cmd) {
|
||||
browser := NewArchiveBrowser(m.config, m.logger, m, "restore-single")
|
||||
return browser, browser.Init()
|
||||
}
|
||||
|
||||
// handleRestoreCluster opens archive browser for cluster restore
|
||||
func (m MenuModel) handleRestoreCluster() (tea.Model, tea.Cmd) {
|
||||
if !m.config.IsPostgreSQL() {
|
||||
m.message = errorStyle.Render("❌ Cluster restore is available only for PostgreSQL")
|
||||
return m, nil
|
||||
}
|
||||
browser := NewArchiveBrowser(m.config, m.logger, m, "restore-cluster")
|
||||
return browser, browser.Init()
|
||||
}
|
||||
|
||||
// handleBackupManager opens backup management view
|
||||
func (m MenuModel) handleBackupManager() (tea.Model, tea.Cmd) {
|
||||
manager := NewBackupManager(m.config, m.logger, m)
|
||||
return manager, manager.Init()
|
||||
}
|
||||
|
||||
func (m *MenuModel) applyDatabaseSelection() {
|
||||
if m == nil || len(m.dbTypes) == 0 {
|
||||
return
|
||||
|
||||
286
internal/tui/restore_exec.go
Normal file
286
internal/tui/restore_exec.go
Normal file
@ -0,0 +1,286 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
|
||||
"dbbackup/internal/config"
|
||||
"dbbackup/internal/database"
|
||||
"dbbackup/internal/logger"
|
||||
"dbbackup/internal/restore"
|
||||
)
|
||||
|
||||
// RestoreExecutionModel handles restore execution with progress
|
||||
type RestoreExecutionModel struct {
|
||||
config *config.Config
|
||||
logger logger.Logger
|
||||
parent tea.Model
|
||||
archive ArchiveInfo
|
||||
targetDB string
|
||||
cleanFirst bool
|
||||
createIfMissing bool
|
||||
restoreType string
|
||||
|
||||
// Progress tracking
|
||||
status string
|
||||
phase string
|
||||
progress int
|
||||
details []string
|
||||
startTime time.Time
|
||||
|
||||
// Results
|
||||
done bool
|
||||
err error
|
||||
result string
|
||||
elapsed time.Duration
|
||||
}
|
||||
|
||||
// NewRestoreExecution creates a new restore execution model
|
||||
func NewRestoreExecution(cfg *config.Config, log logger.Logger, parent tea.Model, archive ArchiveInfo, targetDB string, cleanFirst, createIfMissing bool, restoreType string) RestoreExecutionModel {
|
||||
return RestoreExecutionModel{
|
||||
config: cfg,
|
||||
logger: log,
|
||||
parent: parent,
|
||||
archive: archive,
|
||||
targetDB: targetDB,
|
||||
cleanFirst: cleanFirst,
|
||||
createIfMissing: createIfMissing,
|
||||
restoreType: restoreType,
|
||||
status: "Initializing...",
|
||||
phase: "Starting",
|
||||
startTime: time.Now(),
|
||||
details: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (m RestoreExecutionModel) Init() tea.Cmd {
|
||||
return tea.Batch(
|
||||
executeRestoreWithTUIProgress(m.config, m.logger, m.archive, m.targetDB, m.cleanFirst, m.createIfMissing, m.restoreType),
|
||||
restoreTickCmd(),
|
||||
)
|
||||
}
|
||||
|
||||
type restoreTickMsg time.Time
|
||||
|
||||
func restoreTickCmd() tea.Cmd {
|
||||
return tea.Tick(time.Millisecond*200, func(t time.Time) tea.Msg {
|
||||
return restoreTickMsg(t)
|
||||
})
|
||||
}
|
||||
|
||||
type restoreProgressMsg struct {
|
||||
status string
|
||||
phase string
|
||||
progress int
|
||||
detail string
|
||||
}
|
||||
|
||||
type restoreCompleteMsg struct {
|
||||
result string
|
||||
err error
|
||||
elapsed time.Duration
|
||||
}
|
||||
|
||||
func executeRestoreWithTUIProgress(cfg *config.Config, log logger.Logger, archive ArchiveInfo, targetDB string, cleanFirst, createIfMissing bool, restoreType string) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Hour)
|
||||
defer cancel()
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Create database instance
|
||||
dbClient, err := database.New(cfg, log)
|
||||
if err != nil {
|
||||
return restoreCompleteMsg{
|
||||
result: "",
|
||||
err: fmt.Errorf("failed to create database client: %w", err),
|
||||
elapsed: time.Since(start),
|
||||
}
|
||||
}
|
||||
defer dbClient.Close()
|
||||
|
||||
// Create restore engine
|
||||
engine := restore.New(cfg, log, dbClient)
|
||||
|
||||
// Execute restore based on type
|
||||
var restoreErr error
|
||||
if restoreType == "restore-cluster" {
|
||||
restoreErr = engine.RestoreCluster(ctx, archive.Path)
|
||||
} else {
|
||||
restoreErr = engine.RestoreSingle(ctx, archive.Path, targetDB, cleanFirst, createIfMissing)
|
||||
}
|
||||
|
||||
if restoreErr != nil {
|
||||
return restoreCompleteMsg{
|
||||
result: "",
|
||||
err: restoreErr,
|
||||
elapsed: time.Since(start),
|
||||
}
|
||||
}
|
||||
|
||||
result := fmt.Sprintf("Successfully restored from %s", archive.Name)
|
||||
if restoreType == "restore-single" {
|
||||
result = fmt.Sprintf("Successfully restored '%s' from %s", targetDB, archive.Name)
|
||||
}
|
||||
|
||||
return restoreCompleteMsg{
|
||||
result: result,
|
||||
err: nil,
|
||||
elapsed: time.Since(start),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m RestoreExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
switch msg := msg.(type) {
|
||||
case restoreTickMsg:
|
||||
if !m.done {
|
||||
m.progress = (m.progress + 2) % 100
|
||||
m.elapsed = time.Since(m.startTime)
|
||||
return m, restoreTickCmd()
|
||||
}
|
||||
return m, nil
|
||||
|
||||
case restoreProgressMsg:
|
||||
m.status = msg.status
|
||||
m.phase = msg.phase
|
||||
m.progress = msg.progress
|
||||
if msg.detail != "" {
|
||||
m.details = append(m.details, msg.detail)
|
||||
// Keep only last 5 details
|
||||
if len(m.details) > 5 {
|
||||
m.details = m.details[len(m.details)-5:]
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
|
||||
case restoreCompleteMsg:
|
||||
m.done = true
|
||||
m.err = msg.err
|
||||
m.result = msg.result
|
||||
m.elapsed = msg.elapsed
|
||||
|
||||
if m.err == nil {
|
||||
m.status = "Completed"
|
||||
m.phase = "Done"
|
||||
m.progress = 100
|
||||
} else {
|
||||
m.status = "Failed"
|
||||
m.phase = "Error"
|
||||
}
|
||||
return m, nil
|
||||
|
||||
case tea.KeyMsg:
|
||||
switch msg.String() {
|
||||
case "ctrl+c":
|
||||
if !m.done {
|
||||
m.status = "Cancelling..."
|
||||
// In real implementation, would cancel context
|
||||
}
|
||||
return m, nil
|
||||
|
||||
case "enter", " ", "q", "esc":
|
||||
if m.done {
|
||||
return m.parent, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m RestoreExecutionModel) View() string {
|
||||
var s strings.Builder
|
||||
|
||||
// Title
|
||||
title := "💾 Restoring Database"
|
||||
if m.restoreType == "restore-cluster" {
|
||||
title = "💾 Restoring Cluster"
|
||||
}
|
||||
s.WriteString(titleStyle.Render(title))
|
||||
s.WriteString("\n\n")
|
||||
|
||||
// Archive info
|
||||
s.WriteString(fmt.Sprintf("Archive: %s\n", m.archive.Name))
|
||||
if m.restoreType == "restore-single" {
|
||||
s.WriteString(fmt.Sprintf("Target: %s\n", m.targetDB))
|
||||
}
|
||||
s.WriteString("\n")
|
||||
|
||||
if m.done {
|
||||
// Show result
|
||||
if m.err != nil {
|
||||
s.WriteString(errorStyle.Render("❌ Restore Failed"))
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(errorStyle.Render(fmt.Sprintf("Error: %v", m.err)))
|
||||
s.WriteString("\n")
|
||||
} else {
|
||||
s.WriteString(successStyle.Render("✅ Restore Completed Successfully"))
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(successStyle.Render(m.result))
|
||||
s.WriteString("\n")
|
||||
}
|
||||
|
||||
s.WriteString(fmt.Sprintf("\nElapsed Time: %s\n", formatDuration(m.elapsed)))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(infoStyle.Render("⌨️ Press Enter to continue"))
|
||||
} else {
|
||||
// Show progress
|
||||
s.WriteString(fmt.Sprintf("Phase: %s\n", m.phase))
|
||||
s.WriteString(fmt.Sprintf("Status: %s\n", m.status))
|
||||
s.WriteString("\n")
|
||||
|
||||
// Progress bar
|
||||
progressBar := renderProgressBar(m.progress)
|
||||
s.WriteString(progressBar)
|
||||
s.WriteString(fmt.Sprintf(" %d%%\n", m.progress))
|
||||
s.WriteString("\n")
|
||||
|
||||
// Details
|
||||
if len(m.details) > 0 {
|
||||
s.WriteString(infoStyle.Render("Recent activity:"))
|
||||
s.WriteString("\n")
|
||||
for _, detail := range m.details {
|
||||
s.WriteString(fmt.Sprintf(" • %s\n", detail))
|
||||
}
|
||||
s.WriteString("\n")
|
||||
}
|
||||
|
||||
// Elapsed time
|
||||
s.WriteString(fmt.Sprintf("Elapsed: %s\n", formatDuration(m.elapsed)))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(infoStyle.Render("⌨️ Press Ctrl+C to cancel"))
|
||||
}
|
||||
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// renderProgressBar renders a text progress bar
|
||||
func renderProgressBar(percent int) string {
|
||||
width := 40
|
||||
filled := (percent * width) / 100
|
||||
|
||||
bar := strings.Repeat("█", filled)
|
||||
empty := strings.Repeat("░", width-filled)
|
||||
|
||||
return successStyle.Render(bar) + infoStyle.Render(empty)
|
||||
}
|
||||
|
||||
// formatDuration formats duration in human readable format
|
||||
func formatDuration(d time.Duration) string {
|
||||
if d < time.Minute {
|
||||
return fmt.Sprintf("%.1fs", d.Seconds())
|
||||
}
|
||||
if d < time.Hour {
|
||||
minutes := int(d.Minutes())
|
||||
seconds := int(d.Seconds()) % 60
|
||||
return fmt.Sprintf("%dm %ds", minutes, seconds)
|
||||
}
|
||||
hours := int(d.Hours())
|
||||
minutes := int(d.Minutes()) % 60
|
||||
return fmt.Sprintf("%dh %dm", hours, minutes)
|
||||
}
|
||||
324
internal/tui/restore_preview.go
Normal file
324
internal/tui/restore_preview.go
Normal file
@ -0,0 +1,324 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
|
||||
"dbbackup/internal/config"
|
||||
"dbbackup/internal/logger"
|
||||
"dbbackup/internal/restore"
|
||||
)
|
||||
|
||||
var (
|
||||
previewBoxStyle = lipgloss.NewStyle().
|
||||
Border(lipgloss.RoundedBorder()).
|
||||
BorderForeground(lipgloss.Color("#7D56F4")).
|
||||
Padding(1, 2)
|
||||
|
||||
checkPassedStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#04B575"))
|
||||
|
||||
checkFailedStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#FF6B6B"))
|
||||
|
||||
checkWarningStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#FFA500"))
|
||||
|
||||
checkPendingStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#626262"))
|
||||
)
|
||||
|
||||
// SafetyCheck represents a pre-restore safety check
|
||||
type SafetyCheck struct {
|
||||
Name string
|
||||
Status string // "pending", "checking", "passed", "failed", "warning"
|
||||
Message string
|
||||
Critical bool
|
||||
}
|
||||
|
||||
// RestorePreviewModel shows restore preview and safety checks
|
||||
type RestorePreviewModel struct {
|
||||
config *config.Config
|
||||
logger logger.Logger
|
||||
parent tea.Model
|
||||
archive ArchiveInfo
|
||||
mode string
|
||||
targetDB string
|
||||
cleanFirst bool
|
||||
createIfMissing bool
|
||||
safetyChecks []SafetyCheck
|
||||
checking bool
|
||||
canProceed bool
|
||||
message string
|
||||
}
|
||||
|
||||
// NewRestorePreview creates a new restore preview
|
||||
func NewRestorePreview(cfg *config.Config, log logger.Logger, parent tea.Model, archive ArchiveInfo, mode string) RestorePreviewModel {
|
||||
// Default target database name from archive
|
||||
targetDB := archive.DatabaseName
|
||||
if targetDB == "" {
|
||||
targetDB = cfg.Database
|
||||
}
|
||||
|
||||
return RestorePreviewModel{
|
||||
config: cfg,
|
||||
logger: log,
|
||||
parent: parent,
|
||||
archive: archive,
|
||||
mode: mode,
|
||||
targetDB: targetDB,
|
||||
cleanFirst: false,
|
||||
createIfMissing: true,
|
||||
checking: true,
|
||||
safetyChecks: []SafetyCheck{
|
||||
{Name: "Archive integrity", Status: "pending", Critical: true},
|
||||
{Name: "Disk space", Status: "pending", Critical: true},
|
||||
{Name: "Required tools", Status: "pending", Critical: true},
|
||||
{Name: "Target database", Status: "pending", Critical: false},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m RestorePreviewModel) Init() tea.Cmd {
|
||||
return runSafetyChecks(m.config, m.logger, m.archive, m.targetDB)
|
||||
}
|
||||
|
||||
type safetyCheckCompleteMsg struct {
|
||||
checks []SafetyCheck
|
||||
canProceed bool
|
||||
}
|
||||
|
||||
func runSafetyChecks(cfg *config.Config, log logger.Logger, archive ArchiveInfo, targetDB string) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
safety := restore.NewSafety(cfg, log)
|
||||
checks := []SafetyCheck{}
|
||||
canProceed := true
|
||||
|
||||
// 1. Archive integrity
|
||||
check := SafetyCheck{Name: "Archive integrity", Status: "checking", Critical: true}
|
||||
if err := safety.ValidateArchive(archive.Path); err != nil {
|
||||
check.Status = "failed"
|
||||
check.Message = err.Error()
|
||||
canProceed = false
|
||||
} else {
|
||||
check.Status = "passed"
|
||||
check.Message = "Valid backup archive"
|
||||
}
|
||||
checks = append(checks, check)
|
||||
|
||||
// 2. Disk space
|
||||
check = SafetyCheck{Name: "Disk space", Status: "checking", Critical: true}
|
||||
multiplier := 3.0
|
||||
if archive.Format.IsClusterBackup() {
|
||||
multiplier = 4.0
|
||||
}
|
||||
if err := safety.CheckDiskSpace(archive.Path, multiplier); err != nil {
|
||||
check.Status = "warning"
|
||||
check.Message = err.Error()
|
||||
// Not critical - just warning
|
||||
} else {
|
||||
check.Status = "passed"
|
||||
check.Message = "Sufficient space available"
|
||||
}
|
||||
checks = append(checks, check)
|
||||
|
||||
// 3. Required tools
|
||||
check = SafetyCheck{Name: "Required tools", Status: "checking", Critical: true}
|
||||
dbType := "postgres"
|
||||
if archive.Format.IsMySQL() {
|
||||
dbType = "mysql"
|
||||
}
|
||||
if err := safety.VerifyTools(dbType); err != nil {
|
||||
check.Status = "failed"
|
||||
check.Message = err.Error()
|
||||
canProceed = false
|
||||
} else {
|
||||
check.Status = "passed"
|
||||
check.Message = "All required tools available"
|
||||
}
|
||||
checks = append(checks, check)
|
||||
|
||||
// 4. Target database check
|
||||
check = SafetyCheck{Name: "Target database", Status: "checking", Critical: false}
|
||||
exists, err := safety.CheckDatabaseExists(ctx, targetDB)
|
||||
if err != nil {
|
||||
check.Status = "warning"
|
||||
check.Message = fmt.Sprintf("Cannot check: %v", err)
|
||||
} else if exists {
|
||||
check.Status = "warning"
|
||||
check.Message = fmt.Sprintf("Database '%s' exists - will be overwritten if clean-first enabled", targetDB)
|
||||
} else {
|
||||
check.Status = "passed"
|
||||
check.Message = fmt.Sprintf("Database '%s' does not exist - will be created", targetDB)
|
||||
}
|
||||
checks = append(checks, check)
|
||||
|
||||
return safetyCheckCompleteMsg{checks: checks, canProceed: canProceed}
|
||||
}
|
||||
}
|
||||
|
||||
func (m RestorePreviewModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
switch msg := msg.(type) {
|
||||
case safetyCheckCompleteMsg:
|
||||
m.checking = false
|
||||
m.safetyChecks = msg.checks
|
||||
m.canProceed = msg.canProceed
|
||||
return m, nil
|
||||
|
||||
case tea.KeyMsg:
|
||||
switch msg.String() {
|
||||
case "ctrl+c", "q", "esc":
|
||||
return m.parent, nil
|
||||
|
||||
case "t":
|
||||
// Toggle clean-first
|
||||
m.cleanFirst = !m.cleanFirst
|
||||
m.message = fmt.Sprintf("Clean-first: %v", m.cleanFirst)
|
||||
|
||||
case "c":
|
||||
// Toggle create if missing
|
||||
m.createIfMissing = !m.createIfMissing
|
||||
m.message = fmt.Sprintf("Create if missing: %v", m.createIfMissing)
|
||||
|
||||
case "enter", " ":
|
||||
if m.checking {
|
||||
m.message = "Please wait for safety checks to complete..."
|
||||
return m, nil
|
||||
}
|
||||
|
||||
if !m.canProceed {
|
||||
m.message = errorStyle.Render("❌ Cannot proceed - critical safety checks failed")
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Proceed to restore execution
|
||||
exec := NewRestoreExecution(m.config, m.logger, m.parent, m.archive, m.targetDB, m.cleanFirst, m.createIfMissing, m.mode)
|
||||
return exec, exec.Init()
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m RestorePreviewModel) View() string {
|
||||
var s strings.Builder
|
||||
|
||||
// Title
|
||||
title := "🔍 Restore Preview"
|
||||
if m.mode == "restore-cluster" {
|
||||
title = "🔍 Cluster Restore Preview"
|
||||
}
|
||||
s.WriteString(titleStyle.Render(title))
|
||||
s.WriteString("\n\n")
|
||||
|
||||
// Archive Information
|
||||
s.WriteString(archiveHeaderStyle.Render("📦 Archive Information"))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(fmt.Sprintf(" File: %s\n", m.archive.Name))
|
||||
s.WriteString(fmt.Sprintf(" Format: %s\n", m.archive.Format.String()))
|
||||
s.WriteString(fmt.Sprintf(" Size: %s\n", formatSize(m.archive.Size)))
|
||||
s.WriteString(fmt.Sprintf(" Created: %s\n", m.archive.Modified.Format("2006-01-02 15:04:05")))
|
||||
if m.archive.DatabaseName != "" {
|
||||
s.WriteString(fmt.Sprintf(" Database: %s\n", m.archive.DatabaseName))
|
||||
}
|
||||
s.WriteString("\n")
|
||||
|
||||
// Target Information (only for single restore)
|
||||
if m.mode == "restore-single" {
|
||||
s.WriteString(archiveHeaderStyle.Render("🎯 Target Information"))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(fmt.Sprintf(" Database: %s\n", m.targetDB))
|
||||
s.WriteString(fmt.Sprintf(" Host: %s:%d\n", m.config.Host, m.config.Port))
|
||||
|
||||
cleanIcon := "✗"
|
||||
if m.cleanFirst {
|
||||
cleanIcon = "✓"
|
||||
}
|
||||
s.WriteString(fmt.Sprintf(" Clean First: %s %v\n", cleanIcon, m.cleanFirst))
|
||||
|
||||
createIcon := "✗"
|
||||
if m.createIfMissing {
|
||||
createIcon = "✓"
|
||||
}
|
||||
s.WriteString(fmt.Sprintf(" Create If Missing: %s %v\n", createIcon, m.createIfMissing))
|
||||
s.WriteString("\n")
|
||||
}
|
||||
|
||||
// Safety Checks
|
||||
s.WriteString(archiveHeaderStyle.Render("🛡️ Safety Checks"))
|
||||
s.WriteString("\n")
|
||||
|
||||
if m.checking {
|
||||
s.WriteString(infoStyle.Render(" Running safety checks..."))
|
||||
s.WriteString("\n")
|
||||
} else {
|
||||
for _, check := range m.safetyChecks {
|
||||
icon := "○"
|
||||
style := checkPendingStyle
|
||||
|
||||
switch check.Status {
|
||||
case "passed":
|
||||
icon = "✓"
|
||||
style = checkPassedStyle
|
||||
case "failed":
|
||||
icon = "✗"
|
||||
style = checkFailedStyle
|
||||
case "warning":
|
||||
icon = "⚠"
|
||||
style = checkWarningStyle
|
||||
case "checking":
|
||||
icon = "⟳"
|
||||
style = checkPendingStyle
|
||||
}
|
||||
|
||||
line := fmt.Sprintf(" %s %s", icon, check.Name)
|
||||
if check.Message != "" {
|
||||
line += fmt.Sprintf(" ... %s", check.Message)
|
||||
}
|
||||
s.WriteString(style.Render(line))
|
||||
s.WriteString("\n")
|
||||
}
|
||||
}
|
||||
s.WriteString("\n")
|
||||
|
||||
// Warnings
|
||||
if m.cleanFirst {
|
||||
s.WriteString(checkWarningStyle.Render("⚠️ Warning: Clean-first enabled"))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(infoStyle.Render(" All existing data in target database will be dropped!"))
|
||||
s.WriteString("\n\n")
|
||||
}
|
||||
|
||||
// Message
|
||||
if m.message != "" {
|
||||
s.WriteString(m.message)
|
||||
s.WriteString("\n\n")
|
||||
}
|
||||
|
||||
// Footer
|
||||
if m.checking {
|
||||
s.WriteString(infoStyle.Render("⌨️ Please wait..."))
|
||||
} else if m.canProceed {
|
||||
s.WriteString(successStyle.Render("✅ Ready to restore"))
|
||||
s.WriteString("\n")
|
||||
if m.mode == "restore-single" {
|
||||
s.WriteString(infoStyle.Render("⌨️ t: Toggle clean-first | c: Toggle create | Enter: Proceed | Esc: Cancel"))
|
||||
} else {
|
||||
s.WriteString(infoStyle.Render("⌨️ Enter: Proceed | Esc: Cancel"))
|
||||
}
|
||||
} else {
|
||||
s.WriteString(errorStyle.Render("❌ Cannot proceed - please fix errors above"))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(infoStyle.Render("⌨️ Esc: Go back"))
|
||||
}
|
||||
|
||||
return s.String()
|
||||
}
|
||||
Reference in New Issue
Block a user