fix: dynamic timeouts for large archives + use WorkDir for disk checks
- CheckDiskSpace now uses GetEffectiveWorkDir() instead of BackupDir - Dynamic timeout calculation based on file size: - diagnoseClusterArchive: 5 + (GB/3) min, max 60 min - verifyWithPgRestore: 5 + (GB/5) min, max 30 min - DiagnoseClusterDumps: 10 + (GB/3) min, max 120 min - TUI safety checks: 10 + (GB/5) min, max 120 min - Timeout vs corruption differentiation (no false CORRUPTED on timeout) - Streaming tar listing to avoid OOM on large archives For 119GB archives: ~45 min timeout instead of 5 min false-positive
This commit is contained in:
@@ -229,8 +229,14 @@ func containsSQLKeywords(content string) bool {
|
||||
}
|
||||
|
||||
// CheckDiskSpace verifies sufficient disk space for restore
|
||||
// Uses the effective work directory (WorkDir if set, otherwise BackupDir) since
|
||||
// that's where extraction actually happens for large databases
|
||||
func (s *Safety) CheckDiskSpace(archivePath string, multiplier float64) error {
|
||||
return s.CheckDiskSpaceAt(archivePath, s.cfg.BackupDir, multiplier)
|
||||
checkDir := s.cfg.GetEffectiveWorkDir()
|
||||
if checkDir == "" {
|
||||
checkDir = s.cfg.BackupDir
|
||||
}
|
||||
return s.CheckDiskSpaceAt(archivePath, checkDir, multiplier)
|
||||
}
|
||||
|
||||
// CheckDiskSpaceAt verifies sufficient disk space at a specific directory
|
||||
|
||||
Reference in New Issue
Block a user