fix: re-detect databases at execution time for cluster cleanup
All checks were successful
CI/CD / Test (push) Successful in 1m16s
CI/CD / Lint (push) Successful in 1m25s
CI/CD / Build & Release (push) Successful in 3m10s

- Detection in preview may fail or return stale results
- Re-detect user databases when cleanup is enabled at execution time
- Fall back to preview list if re-detection fails
- Ensures actual databases are dropped, not just what was detected earlier
This commit is contained in:
2026-01-17 17:00:28 +01:00
parent 9396c8e605
commit 29e089fe3b
2 changed files with 36 additions and 20 deletions

View File

@@ -4,8 +4,8 @@ This directory contains pre-compiled binaries for the DB Backup Tool across mult
## Build Information
- **Version**: 3.42.50
- **Build Time**: 2026-01-17_15:45:53_UTC
- **Git Commit**: e363e19
- **Build Time**: 2026-01-17_15:54:39_UTC
- **Git Commit**: 9396c8e
## Recent Updates (v1.1.0)
- ✅ Fixed TUI progress display with line-by-line output

View File

@@ -273,26 +273,42 @@ func executeRestoreWithTUIProgress(parentCtx context.Context, cfg *config.Config
defer dbClient.Close()
// STEP 1: Clean cluster if requested (drop all existing user databases)
if restoreType == "restore-cluster" && cleanClusterFirst && len(existingDBs) > 0 {
log.Info("Dropping existing user databases before cluster restore", "count", len(existingDBs))
// Drop databases using command-line psql (no connection required)
// This matches how cluster restore works - uses CLI tools, not database connections
droppedCount := 0
for _, dbName := range existingDBs {
// Create timeout context for each database drop (5 minutes per DB - large DBs take time)
dropCtx, dropCancel := context.WithTimeout(ctx, 5*time.Minute)
if err := dropDatabaseCLI(dropCtx, cfg, dbName); err != nil {
log.Warn("Failed to drop database", "name", dbName, "error", err)
// Continue with other databases
} else {
droppedCount++
log.Info("Dropped database", "name", dbName)
}
dropCancel() // Clean up context
if restoreType == "restore-cluster" && cleanClusterFirst {
// Re-detect databases at execution time to get current state
// The preview list may be stale or detection may have failed earlier
safety := restore.NewSafety(cfg, log)
currentDBs, err := safety.ListUserDatabases(ctx)
if err != nil {
log.Warn("Failed to list databases for cleanup, using preview list", "error", err)
currentDBs = existingDBs // Fall back to preview list
} else if len(currentDBs) > 0 {
log.Info("Re-detected user databases for cleanup", "count", len(currentDBs), "databases", currentDBs)
existingDBs = currentDBs // Update with fresh list
}
log.Info("Cluster cleanup completed", "dropped", droppedCount, "total", len(existingDBs))
if len(existingDBs) > 0 {
log.Info("Dropping existing user databases before cluster restore", "count", len(existingDBs))
// Drop databases using command-line psql (no connection required)
// This matches how cluster restore works - uses CLI tools, not database connections
droppedCount := 0
for _, dbName := range existingDBs {
// Create timeout context for each database drop (5 minutes per DB - large DBs take time)
dropCtx, dropCancel := context.WithTimeout(ctx, 5*time.Minute)
if err := dropDatabaseCLI(dropCtx, cfg, dbName); err != nil {
log.Warn("Failed to drop database", "name", dbName, "error", err)
// Continue with other databases
} else {
droppedCount++
log.Info("Dropped database", "name", dbName)
}
dropCancel() // Clean up context
}
log.Info("Cluster cleanup completed", "dropped", droppedCount, "total", len(existingDBs))
} else {
log.Info("No user databases to clean up")
}
}
// STEP 2: Create restore engine with silent progress (no stdout interference with TUI)