Compare commits
8 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 838c5b8c15 | |||
| 9d95a193db | |||
| 3201f0fb6a | |||
| 62ddc57fb7 | |||
| 510175ff04 | |||
| a85ad0c88c | |||
| 4938dc1918 | |||
| 09a917766f |
@@ -4,8 +4,8 @@ This directory contains pre-compiled binaries for the DB Backup Tool across mult
|
||||
|
||||
## Build Information
|
||||
- **Version**: 3.42.34
|
||||
- **Build Time**: 2026-01-14_16:19:00_UTC
|
||||
- **Git Commit**: 7711a20
|
||||
- **Build Time**: 2026-01-16_13:17:19_UTC
|
||||
- **Git Commit**: 9d95a19
|
||||
|
||||
## Recent Updates (v1.1.0)
|
||||
- ✅ Fixed TUI progress display with line-by-line output
|
||||
|
||||
@@ -84,20 +84,14 @@ func findHbaFileViaPostgres() string {
|
||||
|
||||
// parsePgHbaConf parses pg_hba.conf and returns the authentication method
|
||||
func parsePgHbaConf(path string, user string) AuthMethod {
|
||||
// Try with sudo if we can't read directly
|
||||
// Try to read the file directly - do NOT use sudo as it triggers password prompts
|
||||
// If we can't read pg_hba.conf, we'll rely on connection attempts to determine auth
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
// Try with sudo (with timeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, "sudo", "cat", path)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
// If we can't read the file, return unknown and let the connection determine auth
|
||||
// This avoids sudo password prompts when running as postgres via su
|
||||
return AuthUnknown
|
||||
}
|
||||
return parseHbaContent(string(output), user)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
|
||||
@@ -28,6 +28,12 @@ import (
|
||||
"dbbackup/internal/swap"
|
||||
)
|
||||
|
||||
// ProgressCallback is called with byte-level progress updates during backup operations
|
||||
type ProgressCallback func(current, total int64, description string)
|
||||
|
||||
// DatabaseProgressCallback is called with database count progress during cluster backup
|
||||
type DatabaseProgressCallback func(done, total int, dbName string)
|
||||
|
||||
// Engine handles backup operations
|
||||
type Engine struct {
|
||||
cfg *config.Config
|
||||
@@ -36,6 +42,8 @@ type Engine struct {
|
||||
progress progress.Indicator
|
||||
detailedReporter *progress.DetailedReporter
|
||||
silent bool // Silent mode for TUI
|
||||
progressCallback ProgressCallback
|
||||
dbProgressCallback DatabaseProgressCallback
|
||||
}
|
||||
|
||||
// New creates a new backup engine
|
||||
@@ -86,6 +94,30 @@ func NewSilent(cfg *config.Config, log logger.Logger, db database.Database, prog
|
||||
}
|
||||
}
|
||||
|
||||
// SetProgressCallback sets a callback for detailed progress reporting (for TUI mode)
|
||||
func (e *Engine) SetProgressCallback(cb ProgressCallback) {
|
||||
e.progressCallback = cb
|
||||
}
|
||||
|
||||
// SetDatabaseProgressCallback sets a callback for database count progress during cluster backup
|
||||
func (e *Engine) SetDatabaseProgressCallback(cb DatabaseProgressCallback) {
|
||||
e.dbProgressCallback = cb
|
||||
}
|
||||
|
||||
// reportProgress reports progress to the callback if set
|
||||
func (e *Engine) reportProgress(current, total int64, description string) {
|
||||
if e.progressCallback != nil {
|
||||
e.progressCallback(current, total, description)
|
||||
}
|
||||
}
|
||||
|
||||
// reportDatabaseProgress reports database count progress to the callback if set
|
||||
func (e *Engine) reportDatabaseProgress(done, total int, dbName string) {
|
||||
if e.dbProgressCallback != nil {
|
||||
e.dbProgressCallback(done, total, dbName)
|
||||
}
|
||||
}
|
||||
|
||||
// loggerAdapter adapts our logger to the progress.Logger interface
|
||||
type loggerAdapter struct {
|
||||
logger logger.Logger
|
||||
@@ -465,6 +497,8 @@ func (e *Engine) BackupCluster(ctx context.Context) error {
|
||||
estimator.UpdateProgress(idx)
|
||||
e.printf(" [%d/%d] Backing up database: %s\n", idx+1, len(databases), name)
|
||||
quietProgress.Update(fmt.Sprintf("Backing up database %d/%d: %s", idx+1, len(databases), name))
|
||||
// Report database progress to TUI callback
|
||||
e.reportDatabaseProgress(idx+1, len(databases), name)
|
||||
mu.Unlock()
|
||||
|
||||
// Check database size and warn if very large
|
||||
@@ -903,11 +937,15 @@ func (e *Engine) createSampleBackup(ctx context.Context, databaseName, outputFil
|
||||
func (e *Engine) backupGlobals(ctx context.Context, tempDir string) error {
|
||||
globalsFile := filepath.Join(tempDir, "globals.sql")
|
||||
|
||||
cmd := exec.CommandContext(ctx, "pg_dumpall", "--globals-only")
|
||||
if e.cfg.Host != "localhost" {
|
||||
cmd.Args = append(cmd.Args, "-h", e.cfg.Host, "-p", fmt.Sprintf("%d", e.cfg.Port))
|
||||
// CRITICAL: Always pass port even for localhost - user may have non-standard port
|
||||
cmd := exec.CommandContext(ctx, "pg_dumpall", "--globals-only",
|
||||
"-p", fmt.Sprintf("%d", e.cfg.Port),
|
||||
"-U", e.cfg.User)
|
||||
|
||||
// Only add -h flag for non-localhost to use Unix socket for peer auth
|
||||
if e.cfg.Host != "localhost" && e.cfg.Host != "127.0.0.1" && e.cfg.Host != "" {
|
||||
cmd.Args = append([]string{cmd.Args[0], "-h", e.cfg.Host}, cmd.Args[1:]...)
|
||||
}
|
||||
cmd.Args = append(cmd.Args, "-U", e.cfg.User)
|
||||
|
||||
cmd.Env = os.Environ()
|
||||
if e.cfg.Password != "" {
|
||||
|
||||
@@ -316,11 +316,12 @@ func (p *PostgreSQL) BuildBackupCommand(database, outputFile string, options Bac
|
||||
cmd := []string{"pg_dump"}
|
||||
|
||||
// Connection parameters
|
||||
if p.cfg.Host != "localhost" {
|
||||
// CRITICAL: Always pass port even for localhost - user may have non-standard port
|
||||
if p.cfg.Host != "localhost" && p.cfg.Host != "127.0.0.1" && p.cfg.Host != "" {
|
||||
cmd = append(cmd, "-h", p.cfg.Host)
|
||||
cmd = append(cmd, "-p", strconv.Itoa(p.cfg.Port))
|
||||
cmd = append(cmd, "--no-password")
|
||||
}
|
||||
cmd = append(cmd, "-p", strconv.Itoa(p.cfg.Port))
|
||||
cmd = append(cmd, "-U", p.cfg.User)
|
||||
|
||||
// Format and compression
|
||||
@@ -380,11 +381,12 @@ func (p *PostgreSQL) BuildRestoreCommand(database, inputFile string, options Res
|
||||
cmd := []string{"pg_restore"}
|
||||
|
||||
// Connection parameters
|
||||
if p.cfg.Host != "localhost" {
|
||||
// CRITICAL: Always pass port even for localhost - user may have non-standard port
|
||||
if p.cfg.Host != "localhost" && p.cfg.Host != "127.0.0.1" && p.cfg.Host != "" {
|
||||
cmd = append(cmd, "-h", p.cfg.Host)
|
||||
cmd = append(cmd, "-p", strconv.Itoa(p.cfg.Port))
|
||||
cmd = append(cmd, "--no-password")
|
||||
}
|
||||
cmd = append(cmd, "-p", strconv.Itoa(p.cfg.Port))
|
||||
cmd = append(cmd, "-U", p.cfg.User)
|
||||
|
||||
// Parallel jobs (incompatible with --single-transaction per PostgreSQL docs)
|
||||
|
||||
@@ -34,6 +34,10 @@ type ProgressCallback func(current, total int64, description string)
|
||||
// DatabaseProgressCallback is called with database count progress during cluster restore
|
||||
type DatabaseProgressCallback func(done, total int, dbName string)
|
||||
|
||||
// DatabaseProgressWithTimingCallback is called with database progress including timing info
|
||||
// Parameters: done count, total count, database name, elapsed time for current restore phase, avg duration per DB
|
||||
type DatabaseProgressWithTimingCallback func(done, total int, dbName string, phaseElapsed, avgPerDB time.Duration)
|
||||
|
||||
// Engine handles database restore operations
|
||||
type Engine struct {
|
||||
cfg *config.Config
|
||||
@@ -47,6 +51,7 @@ type Engine struct {
|
||||
// TUI progress callback for detailed progress reporting
|
||||
progressCallback ProgressCallback
|
||||
dbProgressCallback DatabaseProgressCallback
|
||||
dbProgressTimingCallback DatabaseProgressWithTimingCallback
|
||||
}
|
||||
|
||||
// New creates a new restore engine
|
||||
@@ -112,6 +117,11 @@ func (e *Engine) SetDatabaseProgressCallback(cb DatabaseProgressCallback) {
|
||||
e.dbProgressCallback = cb
|
||||
}
|
||||
|
||||
// SetDatabaseProgressWithTimingCallback sets a callback for database progress with timing info
|
||||
func (e *Engine) SetDatabaseProgressWithTimingCallback(cb DatabaseProgressWithTimingCallback) {
|
||||
e.dbProgressTimingCallback = cb
|
||||
}
|
||||
|
||||
// reportProgress safely calls the progress callback if set
|
||||
func (e *Engine) reportProgress(current, total int64, description string) {
|
||||
if e.progressCallback != nil {
|
||||
@@ -126,6 +136,13 @@ func (e *Engine) reportDatabaseProgress(done, total int, dbName string) {
|
||||
}
|
||||
}
|
||||
|
||||
// reportDatabaseProgressWithTiming safely calls the timing-aware callback if set
|
||||
func (e *Engine) reportDatabaseProgressWithTiming(done, total int, dbName string, phaseElapsed, avgPerDB time.Duration) {
|
||||
if e.dbProgressTimingCallback != nil {
|
||||
e.dbProgressTimingCallback(done, total, dbName, phaseElapsed, avgPerDB)
|
||||
}
|
||||
}
|
||||
|
||||
// loggerAdapter adapts our logger to the progress.Logger interface
|
||||
type loggerAdapter struct {
|
||||
logger logger.Logger
|
||||
@@ -425,16 +442,18 @@ func (e *Engine) restorePostgreSQLSQL(ctx context.Context, archivePath, targetDB
|
||||
var cmd []string
|
||||
|
||||
// For localhost, omit -h to use Unix socket (avoids Ident auth issues)
|
||||
// But always include -p for port (in case of non-standard port)
|
||||
hostArg := ""
|
||||
portArg := fmt.Sprintf("-p %d", e.cfg.Port)
|
||||
if e.cfg.Host != "localhost" && e.cfg.Host != "" {
|
||||
hostArg = fmt.Sprintf("-h %s -p %d", e.cfg.Host, e.cfg.Port)
|
||||
hostArg = fmt.Sprintf("-h %s", e.cfg.Host)
|
||||
}
|
||||
|
||||
if compressed {
|
||||
// Use ON_ERROR_STOP=1 to fail fast on first error (prevents millions of errors on truncated dumps)
|
||||
psqlCmd := fmt.Sprintf("psql -U %s -d %s -v ON_ERROR_STOP=1", e.cfg.User, targetDB)
|
||||
psqlCmd := fmt.Sprintf("psql %s -U %s -d %s -v ON_ERROR_STOP=1", portArg, e.cfg.User, targetDB)
|
||||
if hostArg != "" {
|
||||
psqlCmd = fmt.Sprintf("psql %s -U %s -d %s -v ON_ERROR_STOP=1", hostArg, e.cfg.User, targetDB)
|
||||
psqlCmd = fmt.Sprintf("psql %s %s -U %s -d %s -v ON_ERROR_STOP=1", hostArg, portArg, e.cfg.User, targetDB)
|
||||
}
|
||||
// Set PGPASSWORD in the bash command for password-less auth
|
||||
cmd = []string{
|
||||
@@ -455,6 +474,7 @@ func (e *Engine) restorePostgreSQLSQL(ctx context.Context, archivePath, targetDB
|
||||
} else {
|
||||
cmd = []string{
|
||||
"psql",
|
||||
"-p", fmt.Sprintf("%d", e.cfg.Port),
|
||||
"-U", e.cfg.User,
|
||||
"-d", targetDB,
|
||||
"-v", "ON_ERROR_STOP=1",
|
||||
@@ -1037,6 +1057,11 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
||||
var successCount, failCount int32
|
||||
var mu sync.Mutex // Protect shared resources (progress, logger)
|
||||
|
||||
// Timing tracking for restore phase progress
|
||||
restorePhaseStart := time.Now()
|
||||
var completedDBTimes []time.Duration // Track duration for each completed DB restore
|
||||
var completedDBTimesMu sync.Mutex
|
||||
|
||||
// Create semaphore to limit concurrency
|
||||
semaphore := make(chan struct{}, parallelism)
|
||||
var wg sync.WaitGroup
|
||||
@@ -1062,6 +1087,19 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
||||
}
|
||||
}()
|
||||
|
||||
// Check for context cancellation before starting
|
||||
if ctx.Err() != nil {
|
||||
e.log.Warn("Context cancelled - skipping database restore", "file", filename)
|
||||
atomic.AddInt32(&failCount, 1)
|
||||
restoreErrorsMu.Lock()
|
||||
restoreErrors = multierror.Append(restoreErrors, fmt.Errorf("%s: restore skipped (context cancelled)", strings.TrimSuffix(strings.TrimSuffix(filename, ".dump"), ".sql.gz")))
|
||||
restoreErrorsMu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Track timing for this database restore
|
||||
dbRestoreStart := time.Now()
|
||||
|
||||
// Update estimator progress (thread-safe)
|
||||
mu.Lock()
|
||||
estimator.UpdateProgress(idx)
|
||||
@@ -1074,12 +1112,26 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
||||
|
||||
dbProgress := 15 + int(float64(idx)/float64(totalDBs)*85.0)
|
||||
|
||||
// Calculate average time per DB and report progress with timing
|
||||
completedDBTimesMu.Lock()
|
||||
var avgPerDB time.Duration
|
||||
if len(completedDBTimes) > 0 {
|
||||
var totalDuration time.Duration
|
||||
for _, d := range completedDBTimes {
|
||||
totalDuration += d
|
||||
}
|
||||
avgPerDB = totalDuration / time.Duration(len(completedDBTimes))
|
||||
}
|
||||
phaseElapsed := time.Since(restorePhaseStart)
|
||||
completedDBTimesMu.Unlock()
|
||||
|
||||
mu.Lock()
|
||||
statusMsg := fmt.Sprintf("Restoring database %s (%d/%d)", dbName, idx+1, totalDBs)
|
||||
e.progress.Update(statusMsg)
|
||||
e.log.Info("Restoring database", "name", dbName, "file", dumpFile, "progress", dbProgress)
|
||||
// Report database progress for TUI
|
||||
// Report database progress for TUI (both callbacks)
|
||||
e.reportDatabaseProgress(idx, totalDBs, dbName)
|
||||
e.reportDatabaseProgressWithTiming(idx, totalDBs, dbName, phaseElapsed, avgPerDB)
|
||||
mu.Unlock()
|
||||
|
||||
// STEP 1: Drop existing database completely (clean slate)
|
||||
@@ -1144,6 +1196,12 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
||||
return
|
||||
}
|
||||
|
||||
// Track completed database restore duration for ETA calculation
|
||||
dbRestoreDuration := time.Since(dbRestoreStart)
|
||||
completedDBTimesMu.Lock()
|
||||
completedDBTimes = append(completedDBTimes, dbRestoreDuration)
|
||||
completedDBTimesMu.Unlock()
|
||||
|
||||
atomic.AddInt32(&successCount, 1)
|
||||
}(dbIndex, entry.Name())
|
||||
|
||||
@@ -1156,6 +1214,35 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
||||
successCountFinal := int(atomic.LoadInt32(&successCount))
|
||||
failCountFinal := int(atomic.LoadInt32(&failCount))
|
||||
|
||||
// SANITY CHECK: Verify all databases were accounted for
|
||||
// This catches any goroutine that exited without updating counters
|
||||
accountedFor := successCountFinal + failCountFinal
|
||||
if accountedFor != totalDBs {
|
||||
missingCount := totalDBs - accountedFor
|
||||
e.log.Error("INTERNAL ERROR: Some database restore goroutines did not report status",
|
||||
"expected", totalDBs,
|
||||
"success", successCountFinal,
|
||||
"failed", failCountFinal,
|
||||
"unaccounted", missingCount)
|
||||
|
||||
// Treat unaccounted databases as failures
|
||||
failCountFinal += missingCount
|
||||
restoreErrorsMu.Lock()
|
||||
restoreErrors = multierror.Append(restoreErrors, fmt.Errorf("%d database(s) did not complete (possible goroutine crash or deadlock)", missingCount))
|
||||
restoreErrorsMu.Unlock()
|
||||
}
|
||||
|
||||
// CRITICAL: Check if no databases were restored at all
|
||||
if successCountFinal == 0 {
|
||||
e.progress.Fail(fmt.Sprintf("Cluster restore FAILED: 0 of %d databases restored", totalDBs))
|
||||
operation.Fail("No databases were restored")
|
||||
|
||||
if failCountFinal > 0 && restoreErrors != nil {
|
||||
return fmt.Errorf("cluster restore failed: all %d database(s) failed:\n%s", failCountFinal, restoreErrors.Error())
|
||||
}
|
||||
return fmt.Errorf("cluster restore failed: no databases were restored (0 of %d total). Check PostgreSQL logs for details", totalDBs)
|
||||
}
|
||||
|
||||
if failCountFinal > 0 {
|
||||
// Format multi-error with detailed output
|
||||
restoreErrors.ErrorFormat = func(errs []error) string {
|
||||
@@ -1375,6 +1462,8 @@ func (e *Engine) extractArchiveShell(ctx context.Context, archivePath, destDir s
|
||||
}
|
||||
|
||||
// restoreGlobals restores global objects (roles, tablespaces)
|
||||
// Note: psql returns 0 even when some statements fail (e.g., role already exists)
|
||||
// We track errors but only fail on FATAL errors that would prevent restore
|
||||
func (e *Engine) restoreGlobals(ctx context.Context, globalsFile string) error {
|
||||
args := []string{
|
||||
"-p", fmt.Sprintf("%d", e.cfg.Port),
|
||||
@@ -1404,6 +1493,8 @@ func (e *Engine) restoreGlobals(ctx context.Context, globalsFile string) error {
|
||||
|
||||
// Read stderr in chunks in goroutine
|
||||
var lastError string
|
||||
var errorCount int
|
||||
var fatalError bool
|
||||
stderrDone := make(chan struct{})
|
||||
go func() {
|
||||
defer close(stderrDone)
|
||||
@@ -1412,9 +1503,23 @@ func (e *Engine) restoreGlobals(ctx context.Context, globalsFile string) error {
|
||||
n, err := stderr.Read(buf)
|
||||
if n > 0 {
|
||||
chunk := string(buf[:n])
|
||||
if strings.Contains(chunk, "ERROR") || strings.Contains(chunk, "FATAL") {
|
||||
// Track different error types
|
||||
if strings.Contains(chunk, "FATAL") {
|
||||
fatalError = true
|
||||
lastError = chunk
|
||||
e.log.Warn("Globals restore stderr", "output", chunk)
|
||||
e.log.Error("Globals restore FATAL error", "output", chunk)
|
||||
} else if strings.Contains(chunk, "ERROR") {
|
||||
errorCount++
|
||||
lastError = chunk
|
||||
// Only log first few errors to avoid spam
|
||||
if errorCount <= 5 {
|
||||
// Check if it's an ignorable "already exists" error
|
||||
if strings.Contains(chunk, "already exists") {
|
||||
e.log.Debug("Globals restore: object already exists (expected)", "output", chunk)
|
||||
} else {
|
||||
e.log.Warn("Globals restore error", "output", chunk)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@@ -1442,10 +1547,23 @@ func (e *Engine) restoreGlobals(ctx context.Context, globalsFile string) error {
|
||||
|
||||
<-stderrDone
|
||||
|
||||
// Only fail on actual command errors or FATAL PostgreSQL errors
|
||||
// Regular ERROR messages (like "role already exists") are expected
|
||||
if cmdErr != nil {
|
||||
return fmt.Errorf("failed to restore globals: %w (last error: %s)", cmdErr, lastError)
|
||||
}
|
||||
|
||||
// If we had FATAL errors, those are real problems
|
||||
if fatalError {
|
||||
return fmt.Errorf("globals restore had FATAL error: %s", lastError)
|
||||
}
|
||||
|
||||
// Log summary if there were errors (but don't fail)
|
||||
if errorCount > 0 {
|
||||
e.log.Info("Globals restore completed with some errors (usually 'already exists' - expected)",
|
||||
"error_count", errorCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1513,6 +1631,7 @@ func (e *Engine) terminateConnections(ctx context.Context, dbName string) error
|
||||
}
|
||||
|
||||
// dropDatabaseIfExists drops a database completely (clean slate)
|
||||
// Uses PostgreSQL 13+ WITH (FORCE) option to forcefully drop even with active connections
|
||||
func (e *Engine) dropDatabaseIfExists(ctx context.Context, dbName string) error {
|
||||
// First terminate all connections
|
||||
if err := e.terminateConnections(ctx, dbName); err != nil {
|
||||
@@ -1522,28 +1641,69 @@ func (e *Engine) dropDatabaseIfExists(ctx context.Context, dbName string) error
|
||||
// Wait a moment for connections to terminate
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
// Drop the database
|
||||
// Try to revoke new connections (prevents race condition)
|
||||
// This only works if we have the privilege to do so
|
||||
revokeArgs := []string{
|
||||
"-p", fmt.Sprintf("%d", e.cfg.Port),
|
||||
"-U", e.cfg.User,
|
||||
"-d", "postgres",
|
||||
"-c", fmt.Sprintf("REVOKE CONNECT ON DATABASE \"%s\" FROM PUBLIC", dbName),
|
||||
}
|
||||
if e.cfg.Host != "localhost" && e.cfg.Host != "127.0.0.1" && e.cfg.Host != "" {
|
||||
revokeArgs = append([]string{"-h", e.cfg.Host}, revokeArgs...)
|
||||
}
|
||||
revokeCmd := exec.CommandContext(ctx, "psql", revokeArgs...)
|
||||
revokeCmd.Env = append(os.Environ(), fmt.Sprintf("PGPASSWORD=%s", e.cfg.Password))
|
||||
revokeCmd.Run() // Ignore errors - database might not exist
|
||||
|
||||
// Terminate connections again after revoking connect privilege
|
||||
e.terminateConnections(ctx, dbName)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
// Try DROP DATABASE WITH (FORCE) first (PostgreSQL 13+)
|
||||
// This forcefully terminates connections and drops the database atomically
|
||||
forceArgs := []string{
|
||||
"-p", fmt.Sprintf("%d", e.cfg.Port),
|
||||
"-U", e.cfg.User,
|
||||
"-d", "postgres",
|
||||
"-c", fmt.Sprintf("DROP DATABASE IF EXISTS \"%s\" WITH (FORCE)", dbName),
|
||||
}
|
||||
if e.cfg.Host != "localhost" && e.cfg.Host != "127.0.0.1" && e.cfg.Host != "" {
|
||||
forceArgs = append([]string{"-h", e.cfg.Host}, forceArgs...)
|
||||
}
|
||||
forceCmd := exec.CommandContext(ctx, "psql", forceArgs...)
|
||||
forceCmd.Env = append(os.Environ(), fmt.Sprintf("PGPASSWORD=%s", e.cfg.Password))
|
||||
|
||||
output, err := forceCmd.CombinedOutput()
|
||||
if err == nil {
|
||||
e.log.Info("Dropped existing database (with FORCE)", "name", dbName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If FORCE option failed (PostgreSQL < 13), try regular drop
|
||||
if strings.Contains(string(output), "syntax error") || strings.Contains(string(output), "WITH (FORCE)") {
|
||||
e.log.Debug("WITH (FORCE) not supported, using standard DROP", "name", dbName)
|
||||
|
||||
args := []string{
|
||||
"-p", fmt.Sprintf("%d", e.cfg.Port),
|
||||
"-U", e.cfg.User,
|
||||
"-d", "postgres",
|
||||
"-c", fmt.Sprintf("DROP DATABASE IF EXISTS \"%s\"", dbName),
|
||||
}
|
||||
|
||||
// Only add -h flag if host is not localhost (to use Unix socket for peer auth)
|
||||
if e.cfg.Host != "localhost" && e.cfg.Host != "127.0.0.1" && e.cfg.Host != "" {
|
||||
args = append([]string{"-h", e.cfg.Host}, args...)
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "psql", args...)
|
||||
|
||||
// Always set PGPASSWORD (empty string is fine for peer/ident auth)
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("PGPASSWORD=%s", e.cfg.Password))
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
output, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to drop database '%s': %w\nOutput: %s", dbName, err, string(output))
|
||||
}
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("failed to drop database '%s': %w\nOutput: %s", dbName, err, string(output))
|
||||
}
|
||||
|
||||
e.log.Info("Dropped existing database", "name", dbName)
|
||||
return nil
|
||||
@@ -1584,12 +1744,14 @@ func (e *Engine) ensureMySQLDatabaseExists(ctx context.Context, dbName string) e
|
||||
}
|
||||
|
||||
// ensurePostgresDatabaseExists checks if a PostgreSQL database exists and creates it if not
|
||||
// It attempts to extract encoding/locale from the dump file to preserve original settings
|
||||
func (e *Engine) ensurePostgresDatabaseExists(ctx context.Context, dbName string) error {
|
||||
// Skip creation for postgres and template databases - they should already exist
|
||||
if dbName == "postgres" || dbName == "template0" || dbName == "template1" {
|
||||
e.log.Info("Skipping create for system database (assume exists)", "name", dbName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build psql command with authentication
|
||||
buildPsqlCmd := func(ctx context.Context, database, query string) *exec.Cmd {
|
||||
args := []string{
|
||||
@@ -1629,14 +1791,31 @@ func (e *Engine) ensurePostgresDatabaseExists(ctx context.Context, dbName string
|
||||
|
||||
// Database doesn't exist, create it
|
||||
// IMPORTANT: Use template0 to avoid duplicate definition errors from local additions to template1
|
||||
// Also use UTF8 encoding explicitly as it's the most common and safest choice
|
||||
// See PostgreSQL docs: https://www.postgresql.org/docs/current/app-pgrestore.html#APP-PGRESTORE-NOTES
|
||||
e.log.Info("Creating database from template0", "name", dbName)
|
||||
e.log.Info("Creating database from template0 with UTF8 encoding", "name", dbName)
|
||||
|
||||
// Get server's default locale for LC_COLLATE and LC_CTYPE
|
||||
// This ensures compatibility while using the correct encoding
|
||||
localeCmd := buildPsqlCmd(ctx, "postgres", "SHOW lc_collate")
|
||||
localeOutput, _ := localeCmd.CombinedOutput()
|
||||
serverLocale := strings.TrimSpace(string(localeOutput))
|
||||
if serverLocale == "" {
|
||||
serverLocale = "en_US.UTF-8" // Fallback to common default
|
||||
}
|
||||
|
||||
// Build CREATE DATABASE command with encoding and locale
|
||||
// Using ENCODING 'UTF8' explicitly ensures the dump can be restored
|
||||
createSQL := fmt.Sprintf(
|
||||
"CREATE DATABASE \"%s\" WITH TEMPLATE template0 ENCODING 'UTF8' LC_COLLATE '%s' LC_CTYPE '%s'",
|
||||
dbName, serverLocale, serverLocale,
|
||||
)
|
||||
|
||||
createArgs := []string{
|
||||
"-p", fmt.Sprintf("%d", e.cfg.Port),
|
||||
"-U", e.cfg.User,
|
||||
"-d", "postgres",
|
||||
"-c", fmt.Sprintf("CREATE DATABASE \"%s\" WITH TEMPLATE template0", dbName),
|
||||
"-c", createSQL,
|
||||
}
|
||||
|
||||
// Only add -h flag if host is not localhost (to use Unix socket for peer auth)
|
||||
@@ -1651,10 +1830,28 @@ func (e *Engine) ensurePostgresDatabaseExists(ctx context.Context, dbName string
|
||||
|
||||
output, err = createCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
// Log the error and include the psql output in the returned error to aid debugging
|
||||
// If encoding/locale fails, try simpler CREATE DATABASE
|
||||
e.log.Warn("Database creation with encoding failed, trying simple create", "name", dbName, "error", err)
|
||||
|
||||
simpleArgs := []string{
|
||||
"-p", fmt.Sprintf("%d", e.cfg.Port),
|
||||
"-U", e.cfg.User,
|
||||
"-d", "postgres",
|
||||
"-c", fmt.Sprintf("CREATE DATABASE \"%s\" WITH TEMPLATE template0", dbName),
|
||||
}
|
||||
if e.cfg.Host != "localhost" && e.cfg.Host != "127.0.0.1" && e.cfg.Host != "" {
|
||||
simpleArgs = append([]string{"-h", e.cfg.Host}, simpleArgs...)
|
||||
}
|
||||
|
||||
simpleCmd := exec.CommandContext(ctx, "psql", simpleArgs...)
|
||||
simpleCmd.Env = append(os.Environ(), fmt.Sprintf("PGPASSWORD=%s", e.cfg.Password))
|
||||
|
||||
output, err = simpleCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
e.log.Warn("Database creation failed", "name", dbName, "error", err, "output", string(output))
|
||||
return fmt.Errorf("failed to create database '%s': %w (output: %s)", dbName, err, strings.TrimSpace(string(output)))
|
||||
}
|
||||
}
|
||||
|
||||
e.log.Info("Successfully created database from template0", "name", dbName)
|
||||
return nil
|
||||
@@ -1937,6 +2134,8 @@ type OriginalSettings struct {
|
||||
}
|
||||
|
||||
// boostPostgreSQLSettings boosts multiple PostgreSQL settings for large restores
|
||||
// NOTE: max_locks_per_transaction requires a PostgreSQL RESTART to take effect!
|
||||
// maintenance_work_mem can be changed with pg_reload_conf().
|
||||
func (e *Engine) boostPostgreSQLSettings(ctx context.Context, lockBoostValue int) (*OriginalSettings, error) {
|
||||
connStr := e.buildConnString()
|
||||
db, err := sql.Open("pgx", connStr)
|
||||
@@ -1956,30 +2155,156 @@ func (e *Engine) boostPostgreSQLSettings(ctx context.Context, lockBoostValue int
|
||||
// Get current maintenance_work_mem
|
||||
db.QueryRowContext(ctx, "SHOW maintenance_work_mem").Scan(&original.MaintenanceWorkMem)
|
||||
|
||||
// Boost max_locks_per_transaction (if not already high enough)
|
||||
// CRITICAL: max_locks_per_transaction requires a PostgreSQL RESTART!
|
||||
// pg_reload_conf() is NOT sufficient for this parameter.
|
||||
needsRestart := false
|
||||
if original.MaxLocks < lockBoostValue {
|
||||
_, err = db.ExecContext(ctx, fmt.Sprintf("ALTER SYSTEM SET max_locks_per_transaction = %d", lockBoostValue))
|
||||
if err != nil {
|
||||
e.log.Warn("Could not boost max_locks_per_transaction", "error", err)
|
||||
e.log.Warn("Could not set max_locks_per_transaction", "error", err)
|
||||
} else {
|
||||
needsRestart = true
|
||||
e.log.Warn("max_locks_per_transaction requires PostgreSQL restart to take effect",
|
||||
"current", original.MaxLocks,
|
||||
"target", lockBoostValue)
|
||||
}
|
||||
}
|
||||
|
||||
// Boost maintenance_work_mem to 2GB for faster index creation
|
||||
// (this one CAN be applied via pg_reload_conf)
|
||||
_, err = db.ExecContext(ctx, "ALTER SYSTEM SET maintenance_work_mem = '2GB'")
|
||||
if err != nil {
|
||||
e.log.Warn("Could not boost maintenance_work_mem", "error", err)
|
||||
}
|
||||
|
||||
// Reload config to apply changes (no restart needed for these settings)
|
||||
// Reload config to apply maintenance_work_mem
|
||||
_, err = db.ExecContext(ctx, "SELECT pg_reload_conf()")
|
||||
if err != nil {
|
||||
return original, fmt.Errorf("failed to reload config: %w", err)
|
||||
}
|
||||
|
||||
// If max_locks_per_transaction needs a restart, try to do it
|
||||
if needsRestart {
|
||||
if restarted := e.tryRestartPostgreSQL(ctx); restarted {
|
||||
e.log.Info("PostgreSQL restarted successfully - max_locks_per_transaction now active")
|
||||
// Wait for PostgreSQL to be ready
|
||||
time.Sleep(3 * time.Second)
|
||||
} else {
|
||||
// Cannot restart - warn user but continue
|
||||
// The setting is written to postgresql.auto.conf and will take effect on next restart
|
||||
e.log.Warn("=" + strings.Repeat("=", 70))
|
||||
e.log.Warn("NOTE: max_locks_per_transaction change requires PostgreSQL restart")
|
||||
e.log.Warn("Current value: " + strconv.Itoa(original.MaxLocks) + ", target: " + strconv.Itoa(lockBoostValue))
|
||||
e.log.Warn("")
|
||||
e.log.Warn("The setting has been saved to postgresql.auto.conf and will take")
|
||||
e.log.Warn("effect on the next PostgreSQL restart. If restore fails with")
|
||||
e.log.Warn("'out of shared memory' errors, ask your DBA to restart PostgreSQL.")
|
||||
e.log.Warn("")
|
||||
e.log.Warn("Continuing with restore - this may succeed if your databases")
|
||||
e.log.Warn("don't have many large objects (BLOBs).")
|
||||
e.log.Warn("=" + strings.Repeat("=", 70))
|
||||
// Continue anyway - might work for small restores or DBs without BLOBs
|
||||
}
|
||||
}
|
||||
|
||||
return original, nil
|
||||
}
|
||||
|
||||
// canRestartPostgreSQL checks if we have the ability to restart PostgreSQL
|
||||
// Returns false if running in a restricted environment (e.g., su postgres on enterprise systems)
|
||||
func (e *Engine) canRestartPostgreSQL() bool {
|
||||
// Check if we're running as postgres user - if so, we likely can't restart
|
||||
// because PostgreSQL is managed by init/systemd, not directly by pg_ctl
|
||||
currentUser := os.Getenv("USER")
|
||||
if currentUser == "" {
|
||||
currentUser = os.Getenv("LOGNAME")
|
||||
}
|
||||
|
||||
// If we're the postgres user, check if we have sudo access
|
||||
if currentUser == "postgres" {
|
||||
// Try a quick sudo check - if this fails, we can't restart
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
cmd := exec.CommandContext(ctx, "sudo", "-n", "true")
|
||||
cmd.Stdin = nil
|
||||
if err := cmd.Run(); err != nil {
|
||||
e.log.Info("Running as postgres user without sudo access - cannot restart PostgreSQL",
|
||||
"user", currentUser,
|
||||
"hint", "Ask system administrator to restart PostgreSQL if needed")
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// tryRestartPostgreSQL attempts to restart PostgreSQL using various methods
|
||||
// Returns true if restart was successful
|
||||
// IMPORTANT: Uses short timeouts and non-interactive sudo to avoid blocking on password prompts
|
||||
// NOTE: This function will return false immediately if running as postgres without sudo
|
||||
func (e *Engine) tryRestartPostgreSQL(ctx context.Context) bool {
|
||||
// First check if we can even attempt a restart
|
||||
if !e.canRestartPostgreSQL() {
|
||||
e.log.Info("Skipping PostgreSQL restart attempt (no privileges)")
|
||||
return false
|
||||
}
|
||||
|
||||
e.progress.Update("Attempting PostgreSQL restart for lock settings...")
|
||||
|
||||
// Use short timeout for each restart attempt (don't block on sudo password prompts)
|
||||
runWithTimeout := func(args ...string) bool {
|
||||
cmdCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
cmd := exec.CommandContext(cmdCtx, args[0], args[1:]...)
|
||||
// Set stdin to /dev/null to prevent sudo from waiting for password
|
||||
cmd.Stdin = nil
|
||||
return cmd.Run() == nil
|
||||
}
|
||||
|
||||
// Method 1: systemctl (most common on modern Linux) - use sudo -n for non-interactive
|
||||
if runWithTimeout("sudo", "-n", "systemctl", "restart", "postgresql") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Method 2: systemctl with version suffix (e.g., postgresql-15)
|
||||
for _, ver := range []string{"17", "16", "15", "14", "13", "12"} {
|
||||
if runWithTimeout("sudo", "-n", "systemctl", "restart", "postgresql-"+ver) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Method 3: service command (older systems)
|
||||
if runWithTimeout("sudo", "-n", "service", "postgresql", "restart") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Method 4: pg_ctl as postgres user (if we ARE postgres user, no sudo needed)
|
||||
if runWithTimeout("pg_ctl", "restart", "-D", "/var/lib/postgresql/data", "-m", "fast") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Method 5: Try common PGDATA paths with pg_ctl directly (for postgres user)
|
||||
pgdataPaths := []string{
|
||||
"/var/lib/pgsql/data",
|
||||
"/var/lib/pgsql/17/data",
|
||||
"/var/lib/pgsql/16/data",
|
||||
"/var/lib/pgsql/15/data",
|
||||
"/var/lib/postgresql/17/main",
|
||||
"/var/lib/postgresql/16/main",
|
||||
"/var/lib/postgresql/15/main",
|
||||
}
|
||||
for _, pgdata := range pgdataPaths {
|
||||
if runWithTimeout("pg_ctl", "restart", "-D", pgdata, "-m", "fast") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// resetPostgreSQLSettings restores original PostgreSQL settings
|
||||
// NOTE: max_locks_per_transaction changes are written but require restart to take effect.
|
||||
// We don't restart here since we're done with the restore.
|
||||
func (e *Engine) resetPostgreSQLSettings(ctx context.Context, original *OriginalSettings) error {
|
||||
connStr := e.buildConnString()
|
||||
db, err := sql.Open("pgx", connStr)
|
||||
@@ -1988,25 +2313,28 @@ func (e *Engine) resetPostgreSQLSettings(ctx context.Context, original *Original
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Reset max_locks_per_transaction
|
||||
// Reset max_locks_per_transaction (will take effect on next restart)
|
||||
if original.MaxLocks == 64 { // Default
|
||||
db.ExecContext(ctx, "ALTER SYSTEM RESET max_locks_per_transaction")
|
||||
} else if original.MaxLocks > 0 {
|
||||
db.ExecContext(ctx, fmt.Sprintf("ALTER SYSTEM SET max_locks_per_transaction = %d", original.MaxLocks))
|
||||
}
|
||||
|
||||
// Reset maintenance_work_mem
|
||||
// Reset maintenance_work_mem (takes effect immediately with reload)
|
||||
if original.MaintenanceWorkMem == "64MB" { // Default
|
||||
db.ExecContext(ctx, "ALTER SYSTEM RESET maintenance_work_mem")
|
||||
} else if original.MaintenanceWorkMem != "" {
|
||||
db.ExecContext(ctx, fmt.Sprintf("ALTER SYSTEM SET maintenance_work_mem = '%s'", original.MaintenanceWorkMem))
|
||||
}
|
||||
|
||||
// Reload config
|
||||
// Reload config (only maintenance_work_mem will take effect immediately)
|
||||
_, err = db.ExecContext(ctx, "SELECT pg_reload_conf()")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to reload config: %w", err)
|
||||
}
|
||||
|
||||
e.log.Info("PostgreSQL settings reset queued",
|
||||
"note", "max_locks_per_transaction will revert on next PostgreSQL restart")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -201,10 +201,19 @@ func (e *Engine) checkPostgreSQL(ctx context.Context, result *PreflightResult) {
|
||||
result.PostgreSQL.IsSuperuser = isSuperuser
|
||||
}
|
||||
|
||||
// Add info/warnings
|
||||
// CRITICAL: max_locks_per_transaction requires PostgreSQL RESTART to change!
|
||||
// Warn users loudly about this - it's the #1 cause of "out of shared memory" errors
|
||||
if result.PostgreSQL.MaxLocksPerTransaction < 256 {
|
||||
e.log.Info("PostgreSQL max_locks_per_transaction is low - will auto-boost",
|
||||
"current", result.PostgreSQL.MaxLocksPerTransaction)
|
||||
e.log.Warn("PostgreSQL max_locks_per_transaction is LOW",
|
||||
"current", result.PostgreSQL.MaxLocksPerTransaction,
|
||||
"recommended", "256+",
|
||||
"note", "REQUIRES PostgreSQL restart to change!")
|
||||
|
||||
result.Warnings = append(result.Warnings,
|
||||
fmt.Sprintf("max_locks_per_transaction=%d is low (recommend 256+). "+
|
||||
"This setting requires PostgreSQL RESTART to change. "+
|
||||
"BLOB-heavy databases may fail with 'out of shared memory' error.",
|
||||
result.PostgreSQL.MaxLocksPerTransaction))
|
||||
}
|
||||
|
||||
// Parse shared_buffers and warn if very low
|
||||
|
||||
207
internal/tui/backup_exec.go
Executable file → Normal file
207
internal/tui/backup_exec.go
Executable file → Normal file
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
@@ -33,6 +34,56 @@ type BackupExecutionModel struct {
|
||||
startTime time.Time
|
||||
details []string
|
||||
spinnerFrame int
|
||||
|
||||
// Database count progress (for cluster backup)
|
||||
dbTotal int
|
||||
dbDone int
|
||||
dbName string // Current database being backed up
|
||||
}
|
||||
|
||||
// sharedBackupProgressState holds progress state that can be safely accessed from callbacks
|
||||
type sharedBackupProgressState struct {
|
||||
mu sync.Mutex
|
||||
dbTotal int
|
||||
dbDone int
|
||||
dbName string
|
||||
hasUpdate bool
|
||||
}
|
||||
|
||||
// Package-level shared progress state for backup operations
|
||||
var (
|
||||
currentBackupProgressMu sync.Mutex
|
||||
currentBackupProgressState *sharedBackupProgressState
|
||||
)
|
||||
|
||||
func setCurrentBackupProgress(state *sharedBackupProgressState) {
|
||||
currentBackupProgressMu.Lock()
|
||||
defer currentBackupProgressMu.Unlock()
|
||||
currentBackupProgressState = state
|
||||
}
|
||||
|
||||
func clearCurrentBackupProgress() {
|
||||
currentBackupProgressMu.Lock()
|
||||
defer currentBackupProgressMu.Unlock()
|
||||
currentBackupProgressState = nil
|
||||
}
|
||||
|
||||
func getCurrentBackupProgress() (dbTotal, dbDone int, dbName string, hasUpdate bool) {
|
||||
currentBackupProgressMu.Lock()
|
||||
defer currentBackupProgressMu.Unlock()
|
||||
|
||||
if currentBackupProgressState == nil {
|
||||
return 0, 0, "", false
|
||||
}
|
||||
|
||||
currentBackupProgressState.mu.Lock()
|
||||
defer currentBackupProgressState.mu.Unlock()
|
||||
|
||||
hasUpdate = currentBackupProgressState.hasUpdate
|
||||
currentBackupProgressState.hasUpdate = false
|
||||
|
||||
return currentBackupProgressState.dbTotal, currentBackupProgressState.dbDone,
|
||||
currentBackupProgressState.dbName, hasUpdate
|
||||
}
|
||||
|
||||
func NewBackupExecution(cfg *config.Config, log logger.Logger, parent tea.Model, ctx context.Context, backupType, dbName string, ratio int) BackupExecutionModel {
|
||||
@@ -55,7 +106,6 @@ func NewBackupExecution(cfg *config.Config, log logger.Logger, parent tea.Model,
|
||||
}
|
||||
|
||||
func (m BackupExecutionModel) Init() tea.Cmd {
|
||||
// TUI handles all display through View() - no progress callbacks needed
|
||||
return tea.Batch(
|
||||
executeBackupWithTUIProgress(m.ctx, m.config, m.logger, m.backupType, m.databaseName, m.ratio),
|
||||
backupTickCmd(),
|
||||
@@ -91,6 +141,11 @@ func executeBackupWithTUIProgress(parentCtx context.Context, cfg *config.Config,
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Setup shared progress state for TUI polling
|
||||
progressState := &sharedBackupProgressState{}
|
||||
setCurrentBackupProgress(progressState)
|
||||
defer clearCurrentBackupProgress()
|
||||
|
||||
dbClient, err := database.New(cfg, log)
|
||||
if err != nil {
|
||||
return backupCompleteMsg{
|
||||
@@ -110,6 +165,16 @@ func executeBackupWithTUIProgress(parentCtx context.Context, cfg *config.Config,
|
||||
// Pass nil as indicator - TUI itself handles all display, no stdout printing
|
||||
engine := backup.NewSilent(cfg, log, dbClient, nil)
|
||||
|
||||
// Set database progress callback for cluster backups
|
||||
engine.SetDatabaseProgressCallback(func(done, total int, currentDB string) {
|
||||
progressState.mu.Lock()
|
||||
progressState.dbDone = done
|
||||
progressState.dbTotal = total
|
||||
progressState.dbName = currentDB
|
||||
progressState.hasUpdate = true
|
||||
progressState.mu.Unlock()
|
||||
})
|
||||
|
||||
var backupErr error
|
||||
switch backupType {
|
||||
case "single":
|
||||
@@ -157,10 +222,21 @@ func (m BackupExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
// Increment spinner frame for smooth animation
|
||||
m.spinnerFrame = (m.spinnerFrame + 1) % len(spinnerFrames)
|
||||
|
||||
// Update status based on elapsed time to show progress
|
||||
// Poll for database progress updates from callbacks
|
||||
dbTotal, dbDone, dbName, hasUpdate := getCurrentBackupProgress()
|
||||
if hasUpdate {
|
||||
m.dbTotal = dbTotal
|
||||
m.dbDone = dbDone
|
||||
m.dbName = dbName
|
||||
}
|
||||
|
||||
// Update status based on progress and elapsed time
|
||||
elapsedSec := int(time.Since(m.startTime).Seconds())
|
||||
|
||||
if elapsedSec < 2 {
|
||||
if m.dbTotal > 0 && m.dbDone > 0 {
|
||||
// We have real progress from cluster backup
|
||||
m.status = fmt.Sprintf("Backing up database: %s", m.dbName)
|
||||
} else if elapsedSec < 2 {
|
||||
m.status = "Initializing backup..."
|
||||
} else if elapsedSec < 5 {
|
||||
if m.backupType == "cluster" {
|
||||
@@ -234,6 +310,34 @@ func (m BackupExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// renderDatabaseProgressBar renders a progress bar for database count progress
|
||||
func renderBackupDatabaseProgressBar(done, total int, dbName string, width int) string {
|
||||
if total == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Calculate progress percentage
|
||||
percent := float64(done) / float64(total)
|
||||
if percent > 1.0 {
|
||||
percent = 1.0
|
||||
}
|
||||
|
||||
// Calculate filled width
|
||||
barWidth := width - 20 // Leave room for label and percentage
|
||||
if barWidth < 10 {
|
||||
barWidth = 10
|
||||
}
|
||||
filled := int(float64(barWidth) * percent)
|
||||
if filled > barWidth {
|
||||
filled = barWidth
|
||||
}
|
||||
|
||||
// Build progress bar
|
||||
bar := strings.Repeat("█", filled) + strings.Repeat("░", barWidth-filled)
|
||||
|
||||
return fmt.Sprintf(" Database: [%s] %d/%d", bar, done, total)
|
||||
}
|
||||
|
||||
func (m BackupExecutionModel) View() string {
|
||||
var s strings.Builder
|
||||
s.Grow(512) // Pre-allocate estimated capacity for better performance
|
||||
@@ -255,31 +359,104 @@ func (m BackupExecutionModel) View() string {
|
||||
s.WriteString(fmt.Sprintf(" %-10s %s\n", "Duration:", time.Since(m.startTime).Round(time.Second)))
|
||||
s.WriteString("\n")
|
||||
|
||||
// Status with spinner
|
||||
// Status display
|
||||
if !m.done {
|
||||
// Show database progress bar if we have progress data (cluster backup)
|
||||
if m.dbTotal > 0 && m.dbDone > 0 {
|
||||
// Show progress bar instead of spinner when we have real progress
|
||||
progressBar := renderBackupDatabaseProgressBar(m.dbDone, m.dbTotal, m.dbName, 50)
|
||||
s.WriteString(progressBar + "\n")
|
||||
s.WriteString(fmt.Sprintf(" %s\n", m.status))
|
||||
} else {
|
||||
// Show spinner during initial phases
|
||||
if m.cancelling {
|
||||
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
||||
} else {
|
||||
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
||||
}
|
||||
}
|
||||
|
||||
if !m.cancelling {
|
||||
s.WriteString("\n [KEY] Press Ctrl+C or ESC to cancel\n")
|
||||
}
|
||||
} else {
|
||||
s.WriteString(fmt.Sprintf(" %s\n\n", m.status))
|
||||
|
||||
// Show completion summary with detailed stats
|
||||
if m.err != nil {
|
||||
s.WriteString(fmt.Sprintf(" [FAIL] Error: %v\n", m.err))
|
||||
} else if m.result != "" {
|
||||
// Parse and display result cleanly
|
||||
lines := strings.Split(m.result, "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line != "" {
|
||||
s.WriteString(" " + line + "\n")
|
||||
s.WriteString("\n")
|
||||
s.WriteString(errorStyle.Render(" ╔══════════════════════════════════════════════════════════╗"))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(errorStyle.Render(" ║ [FAIL] BACKUP FAILED ║"))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(errorStyle.Render(" ╚══════════════════════════════════════════════════════════╝"))
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(errorStyle.Render(fmt.Sprintf(" Error: %v", m.err)))
|
||||
s.WriteString("\n")
|
||||
} else {
|
||||
s.WriteString("\n")
|
||||
s.WriteString(successStyle.Render(" ╔══════════════════════════════════════════════════════════╗"))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(successStyle.Render(" ║ [OK] BACKUP COMPLETED SUCCESSFULLY ║"))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(successStyle.Render(" ╚══════════════════════════════════════════════════════════╝"))
|
||||
s.WriteString("\n\n")
|
||||
|
||||
// Summary section
|
||||
s.WriteString(infoStyle.Render(" ─── Summary ─────────────────────────────────────────────"))
|
||||
s.WriteString("\n\n")
|
||||
|
||||
// Backup type specific info
|
||||
switch m.backupType {
|
||||
case "cluster":
|
||||
s.WriteString(" Type: Cluster Backup\n")
|
||||
if m.dbTotal > 0 {
|
||||
s.WriteString(fmt.Sprintf(" Databases: %d backed up\n", m.dbTotal))
|
||||
}
|
||||
case "single":
|
||||
s.WriteString(" Type: Single Database Backup\n")
|
||||
s.WriteString(fmt.Sprintf(" Database: %s\n", m.databaseName))
|
||||
case "sample":
|
||||
s.WriteString(" Type: Sample Backup\n")
|
||||
s.WriteString(fmt.Sprintf(" Database: %s\n", m.databaseName))
|
||||
s.WriteString(fmt.Sprintf(" Sample Ratio: %d\n", m.ratio))
|
||||
}
|
||||
|
||||
s.WriteString("\n")
|
||||
|
||||
// Timing section
|
||||
s.WriteString(infoStyle.Render(" ─── Timing ──────────────────────────────────────────────"))
|
||||
s.WriteString("\n\n")
|
||||
|
||||
elapsed := time.Since(m.startTime)
|
||||
s.WriteString(fmt.Sprintf(" Total Time: %s\n", formatBackupDuration(elapsed)))
|
||||
|
||||
if m.backupType == "cluster" && m.dbTotal > 0 {
|
||||
avgPerDB := elapsed / time.Duration(m.dbTotal)
|
||||
s.WriteString(fmt.Sprintf(" Avg per DB: %s\n", formatBackupDuration(avgPerDB)))
|
||||
}
|
||||
s.WriteString("\n [KEY] Press Enter or ESC to return to menu\n")
|
||||
|
||||
s.WriteString("\n")
|
||||
s.WriteString(infoStyle.Render(" ─────────────────────────────────────────────────────────"))
|
||||
s.WriteString("\n")
|
||||
}
|
||||
|
||||
s.WriteString("\n")
|
||||
s.WriteString(" [KEY] Press Enter or ESC to return to menu\n")
|
||||
}
|
||||
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// formatBackupDuration formats duration in human readable format
|
||||
func formatBackupDuration(d time.Duration) string {
|
||||
if d < time.Minute {
|
||||
return fmt.Sprintf("%.1fs", d.Seconds())
|
||||
}
|
||||
if d < time.Hour {
|
||||
minutes := int(d.Minutes())
|
||||
seconds := int(d.Seconds()) % 60
|
||||
return fmt.Sprintf("%dm %ds", minutes, seconds)
|
||||
}
|
||||
hours := int(d.Hours())
|
||||
minutes := int(d.Minutes()) % 60
|
||||
return fmt.Sprintf("%dh %dm", hours, minutes)
|
||||
}
|
||||
|
||||
@@ -57,6 +57,10 @@ type RestoreExecutionModel struct {
|
||||
dbTotal int
|
||||
dbDone int
|
||||
|
||||
// Timing info for database restore phase (ETA calculation)
|
||||
dbPhaseElapsed time.Duration // Elapsed time since restore phase started
|
||||
dbAvgPerDB time.Duration // Average time per database restore
|
||||
|
||||
// Results
|
||||
done bool
|
||||
cancelling bool // True when user has requested cancellation
|
||||
@@ -136,6 +140,10 @@ type sharedProgressState struct {
|
||||
dbTotal int
|
||||
dbDone int
|
||||
|
||||
// Timing info for database restore phase
|
||||
dbPhaseElapsed time.Duration // Elapsed time since restore phase started
|
||||
dbAvgPerDB time.Duration // Average time per database restore
|
||||
|
||||
// Rolling window for speed calculation
|
||||
speedSamples []restoreSpeedSample
|
||||
}
|
||||
@@ -163,12 +171,12 @@ func clearCurrentRestoreProgress() {
|
||||
currentRestoreProgressState = nil
|
||||
}
|
||||
|
||||
func getCurrentRestoreProgress() (bytesTotal, bytesDone int64, description string, hasUpdate bool, dbTotal, dbDone int, speed float64) {
|
||||
func getCurrentRestoreProgress() (bytesTotal, bytesDone int64, description string, hasUpdate bool, dbTotal, dbDone int, speed float64, dbPhaseElapsed, dbAvgPerDB time.Duration) {
|
||||
currentRestoreProgressMu.Lock()
|
||||
defer currentRestoreProgressMu.Unlock()
|
||||
|
||||
if currentRestoreProgressState == nil {
|
||||
return 0, 0, "", false, 0, 0, 0
|
||||
return 0, 0, "", false, 0, 0, 0, 0, 0
|
||||
}
|
||||
|
||||
currentRestoreProgressState.mu.Lock()
|
||||
@@ -179,7 +187,8 @@ func getCurrentRestoreProgress() (bytesTotal, bytesDone int64, description strin
|
||||
|
||||
return currentRestoreProgressState.bytesTotal, currentRestoreProgressState.bytesDone,
|
||||
currentRestoreProgressState.description, currentRestoreProgressState.hasUpdate,
|
||||
currentRestoreProgressState.dbTotal, currentRestoreProgressState.dbDone, speed
|
||||
currentRestoreProgressState.dbTotal, currentRestoreProgressState.dbDone, speed,
|
||||
currentRestoreProgressState.dbPhaseElapsed, currentRestoreProgressState.dbAvgPerDB
|
||||
}
|
||||
|
||||
// calculateRollingSpeed calculates speed from recent samples (last 5 seconds)
|
||||
@@ -304,6 +313,21 @@ func executeRestoreWithTUIProgress(parentCtx context.Context, cfg *config.Config
|
||||
progressState.bytesDone = 0
|
||||
})
|
||||
|
||||
// Set up timing-aware database progress callback for cluster restore ETA
|
||||
engine.SetDatabaseProgressWithTimingCallback(func(done, total int, dbName string, phaseElapsed, avgPerDB time.Duration) {
|
||||
progressState.mu.Lock()
|
||||
defer progressState.mu.Unlock()
|
||||
progressState.dbDone = done
|
||||
progressState.dbTotal = total
|
||||
progressState.description = fmt.Sprintf("Restoring %s", dbName)
|
||||
progressState.dbPhaseElapsed = phaseElapsed
|
||||
progressState.dbAvgPerDB = avgPerDB
|
||||
progressState.hasUpdate = true
|
||||
// Clear byte progress when switching to db progress
|
||||
progressState.bytesTotal = 0
|
||||
progressState.bytesDone = 0
|
||||
})
|
||||
|
||||
// Store progress state in a package-level variable for the ticker to access
|
||||
// This is a workaround because tea messages can't be sent from callbacks
|
||||
setCurrentRestoreProgress(progressState)
|
||||
@@ -357,7 +381,7 @@ func (m RestoreExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
m.elapsed = time.Since(m.startTime)
|
||||
|
||||
// Poll shared progress state for real-time updates
|
||||
bytesTotal, bytesDone, description, hasUpdate, dbTotal, dbDone, speed := getCurrentRestoreProgress()
|
||||
bytesTotal, bytesDone, description, hasUpdate, dbTotal, dbDone, speed, dbPhaseElapsed, dbAvgPerDB := getCurrentRestoreProgress()
|
||||
if hasUpdate && bytesTotal > 0 {
|
||||
m.bytesTotal = bytesTotal
|
||||
m.bytesDone = bytesDone
|
||||
@@ -370,9 +394,11 @@ func (m RestoreExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
m.phase = "Extracting"
|
||||
m.progress = int((bytesDone * 100) / bytesTotal)
|
||||
} else if hasUpdate && dbTotal > 0 {
|
||||
// Database count progress for cluster restore
|
||||
// Database count progress for cluster restore with timing
|
||||
m.dbTotal = dbTotal
|
||||
m.dbDone = dbDone
|
||||
m.dbPhaseElapsed = dbPhaseElapsed
|
||||
m.dbAvgPerDB = dbAvgPerDB
|
||||
m.showBytes = false
|
||||
m.status = fmt.Sprintf("Restoring database %d of %d...", dbDone+1, dbTotal)
|
||||
m.phase = "Restore"
|
||||
@@ -518,21 +544,70 @@ func (m RestoreExecutionModel) View() string {
|
||||
s.WriteString("\n")
|
||||
|
||||
if m.done {
|
||||
// Show result
|
||||
// Show result with comprehensive summary
|
||||
if m.err != nil {
|
||||
s.WriteString(errorStyle.Render("[FAIL] Restore Failed"))
|
||||
s.WriteString(errorStyle.Render("╔══════════════════════════════════════════════════════════════╗"))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(errorStyle.Render("║ [FAIL] RESTORE FAILED ║"))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(errorStyle.Render("╚══════════════════════════════════════════════════════════════╝"))
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(errorStyle.Render(fmt.Sprintf(" Error: %v", m.err)))
|
||||
s.WriteString("\n")
|
||||
} else {
|
||||
s.WriteString(successStyle.Render("[OK] Restore Completed Successfully"))
|
||||
s.WriteString(successStyle.Render("╔══════════════════════════════════════════════════════════════╗"))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(successStyle.Render("║ [OK] RESTORE COMPLETED SUCCESSFULLY ║"))
|
||||
s.WriteString("\n")
|
||||
s.WriteString(successStyle.Render("╚══════════════════════════════════════════════════════════════╝"))
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(successStyle.Render(m.result))
|
||||
|
||||
// Summary section
|
||||
s.WriteString(infoStyle.Render(" ─── Summary ───────────────────────────────────────────────"))
|
||||
s.WriteString("\n\n")
|
||||
|
||||
// Archive info
|
||||
s.WriteString(fmt.Sprintf(" Archive: %s\n", m.archive.Name))
|
||||
if m.archive.Size > 0 {
|
||||
s.WriteString(fmt.Sprintf(" Archive Size: %s\n", FormatBytes(m.archive.Size)))
|
||||
}
|
||||
|
||||
// Restore type specific info
|
||||
if m.restoreType == "restore-cluster" {
|
||||
s.WriteString(fmt.Sprintf(" Type: Cluster Restore\n"))
|
||||
if m.dbTotal > 0 {
|
||||
s.WriteString(fmt.Sprintf(" Databases: %d restored\n", m.dbTotal))
|
||||
}
|
||||
if m.cleanClusterFirst && len(m.existingDBs) > 0 {
|
||||
s.WriteString(fmt.Sprintf(" Cleaned: %d existing database(s) dropped\n", len(m.existingDBs)))
|
||||
}
|
||||
} else {
|
||||
s.WriteString(fmt.Sprintf(" Type: Single Database Restore\n"))
|
||||
s.WriteString(fmt.Sprintf(" Target DB: %s\n", m.targetDB))
|
||||
}
|
||||
|
||||
s.WriteString("\n")
|
||||
}
|
||||
|
||||
s.WriteString(fmt.Sprintf("\nElapsed Time: %s\n", formatDuration(m.elapsed)))
|
||||
// Timing section
|
||||
s.WriteString(infoStyle.Render(" ─── Timing ────────────────────────────────────────────────"))
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(fmt.Sprintf(" Total Time: %s\n", formatDuration(m.elapsed)))
|
||||
|
||||
// Calculate and show throughput if we have size info
|
||||
if m.archive.Size > 0 && m.elapsed.Seconds() > 0 {
|
||||
throughput := float64(m.archive.Size) / m.elapsed.Seconds()
|
||||
s.WriteString(fmt.Sprintf(" Throughput: %s/s (average)\n", FormatBytes(int64(throughput))))
|
||||
}
|
||||
|
||||
if m.dbTotal > 0 && m.err == nil {
|
||||
avgPerDB := m.elapsed / time.Duration(m.dbTotal)
|
||||
s.WriteString(fmt.Sprintf(" Avg per DB: %s\n", formatDuration(avgPerDB)))
|
||||
}
|
||||
|
||||
s.WriteString("\n")
|
||||
s.WriteString(infoStyle.Render(" ───────────────────────────────────────────────────────────"))
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(infoStyle.Render(" [KEYS] Press Enter to continue"))
|
||||
} else {
|
||||
// Show progress
|
||||
@@ -549,13 +624,13 @@ func (m RestoreExecutionModel) View() string {
|
||||
s.WriteString(renderDetailedProgressBarWithSpeed(m.bytesDone, m.bytesTotal, m.speed))
|
||||
s.WriteString("\n\n")
|
||||
} else if m.dbTotal > 0 {
|
||||
// Database count progress for cluster restore
|
||||
// Database count progress for cluster restore with timing
|
||||
spinner := m.spinnerFrames[m.spinnerFrame]
|
||||
s.WriteString(fmt.Sprintf("Status: %s %s\n", spinner, m.status))
|
||||
s.WriteString("\n")
|
||||
|
||||
// Show database progress bar
|
||||
s.WriteString(renderDatabaseProgressBar(m.dbDone, m.dbTotal))
|
||||
// Show database progress bar with timing and ETA
|
||||
s.WriteString(renderDatabaseProgressBarWithTiming(m.dbDone, m.dbTotal, m.dbPhaseElapsed, m.dbAvgPerDB))
|
||||
s.WriteString("\n\n")
|
||||
} else {
|
||||
// Show status with rotating spinner (for phases without detailed progress)
|
||||
@@ -678,6 +753,55 @@ func renderDatabaseProgressBar(done, total int) string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// renderDatabaseProgressBarWithTiming renders a progress bar for database count with timing and ETA
|
||||
func renderDatabaseProgressBarWithTiming(done, total int, phaseElapsed, avgPerDB time.Duration) string {
|
||||
var s strings.Builder
|
||||
|
||||
// Calculate percentage
|
||||
percent := 0
|
||||
if total > 0 {
|
||||
percent = (done * 100) / total
|
||||
if percent > 100 {
|
||||
percent = 100
|
||||
}
|
||||
}
|
||||
|
||||
// Render progress bar
|
||||
width := 30
|
||||
filled := (percent * width) / 100
|
||||
barFilled := strings.Repeat("█", filled)
|
||||
barEmpty := strings.Repeat("░", width-filled)
|
||||
|
||||
s.WriteString(successStyle.Render("["))
|
||||
s.WriteString(successStyle.Render(barFilled))
|
||||
s.WriteString(infoStyle.Render(barEmpty))
|
||||
s.WriteString(successStyle.Render("]"))
|
||||
|
||||
// Count and percentage
|
||||
s.WriteString(fmt.Sprintf(" %3d%% %d / %d databases", percent, done, total))
|
||||
|
||||
// Timing and ETA
|
||||
if phaseElapsed > 0 {
|
||||
s.WriteString(fmt.Sprintf(" [%s", FormatDurationShort(phaseElapsed)))
|
||||
|
||||
// Calculate ETA based on average time per database
|
||||
if avgPerDB > 0 && done < total {
|
||||
remainingDBs := total - done
|
||||
eta := time.Duration(remainingDBs) * avgPerDB
|
||||
s.WriteString(fmt.Sprintf(" / ETA: %s", FormatDurationShort(eta)))
|
||||
} else if done > 0 && done < total {
|
||||
// Fallback: estimate ETA from overall elapsed time
|
||||
avgElapsed := phaseElapsed / time.Duration(done)
|
||||
remainingDBs := total - done
|
||||
eta := time.Duration(remainingDBs) * avgElapsed
|
||||
s.WriteString(fmt.Sprintf(" / ETA: ~%s", FormatDurationShort(eta)))
|
||||
}
|
||||
s.WriteString("]")
|
||||
}
|
||||
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// formatDuration formats duration in human readable format
|
||||
func formatDuration(d time.Duration) string {
|
||||
if d < time.Minute {
|
||||
|
||||
Reference in New Issue
Block a user