v5.4.5: Fix disk space estimation for cluster archives
- Use 1.2x multiplier for cluster .tar.gz (pre-compressed dumps) - Use 5x multiplier for single .sql.gz files (was 7x) - New CheckSystemMemoryWithType() for archive-aware estimation - 119GB archive now estimates ~143GB instead of ~833GB
This commit is contained in:
29
CHANGELOG.md
29
CHANGELOG.md
@ -5,6 +5,35 @@ All notable changes to dbbackup will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [5.4.5] - 2026-02-02
|
||||
|
||||
### Fixed
|
||||
- **Accurate Disk Space Estimation for Cluster Archives**
|
||||
- Fixed WARNING showing 836GB for 119GB archive - was using wrong compression multiplier
|
||||
- Cluster archives (.tar.gz) contain pre-compressed .dump files → now uses 1.2x multiplier
|
||||
- Single SQL files (.sql.gz) still use 5x multiplier (was 7x, slightly optimized)
|
||||
- New `CheckSystemMemoryWithType(size, isClusterArchive)` method for accurate estimates
|
||||
- 119GB cluster archive now correctly estimates ~143GB instead of ~833GB
|
||||
|
||||
## [5.4.4] - 2026-02-02
|
||||
|
||||
### Fixed
|
||||
- **TUI Header Separator Fix** - Capped separator length at 40 chars to prevent line overflow on wide terminals
|
||||
|
||||
## [5.4.3] - 2026-02-02
|
||||
|
||||
### Fixed
|
||||
- **Bulletproof SIGINT Handling** - Zero zombie processes guaranteed
|
||||
- All external commands now use `cleanup.SafeCommand()` with process group isolation
|
||||
- `KillCommandGroup()` sends signals to entire process group (-pgid)
|
||||
- No more orphaned pg_restore/pg_dump/psql/pigz processes on Ctrl+C
|
||||
- 16 files updated with proper signal handling
|
||||
|
||||
- **Eliminated External gzip Process** - The `zgrep` command was spawning `gzip -cdfq`
|
||||
- Replaced with in-process pgzip decompression in `preflight.go`
|
||||
- `estimateBlobsInSQL()` now uses pure Go pgzip.NewReader
|
||||
- Zero external gzip processes during restore
|
||||
|
||||
## [5.1.22] - 2026-02-01
|
||||
|
||||
### Added
|
||||
|
||||
@ -1371,7 +1371,7 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string, preExtr
|
||||
if statErr == nil && archiveStats != nil {
|
||||
backupSizeBytes = archiveStats.Size()
|
||||
}
|
||||
memCheck := guard.CheckSystemMemory(backupSizeBytes)
|
||||
memCheck := guard.CheckSystemMemoryWithType(backupSizeBytes, true) // true = cluster archive with pre-compressed dumps
|
||||
if memCheck != nil {
|
||||
if memCheck.Critical {
|
||||
e.log.Error("🚨 CRITICAL MEMORY WARNING", "error", memCheck.Recommendation)
|
||||
|
||||
@ -358,6 +358,14 @@ func (g *LargeDBGuard) WarnUser(strategy *RestoreStrategy, silentMode bool) {
|
||||
|
||||
// CheckSystemMemory validates system has enough memory for restore
|
||||
func (g *LargeDBGuard) CheckSystemMemory(backupSizeBytes int64) *MemoryCheck {
|
||||
return g.CheckSystemMemoryWithType(backupSizeBytes, false)
|
||||
}
|
||||
|
||||
// CheckSystemMemoryWithType validates system memory with archive type awareness
|
||||
// isClusterArchive: true for .tar.gz cluster backups (contain pre-compressed .dump files)
|
||||
//
|
||||
// false for single .sql.gz files (compressed SQL that expands significantly)
|
||||
func (g *LargeDBGuard) CheckSystemMemoryWithType(backupSizeBytes int64, isClusterArchive bool) *MemoryCheck {
|
||||
check := &MemoryCheck{
|
||||
BackupSizeGB: float64(backupSizeBytes) / (1024 * 1024 * 1024),
|
||||
}
|
||||
@ -374,8 +382,18 @@ func (g *LargeDBGuard) CheckSystemMemory(backupSizeBytes int64) *MemoryCheck {
|
||||
check.SwapTotalGB = float64(memInfo.SwapTotal) / (1024 * 1024 * 1024)
|
||||
check.SwapFreeGB = float64(memInfo.SwapFree) / (1024 * 1024 * 1024)
|
||||
|
||||
// Estimate uncompressed size (typical compression ratio 5:1 to 10:1)
|
||||
estimatedUncompressedGB := check.BackupSizeGB * 7 // Conservative estimate
|
||||
// Estimate uncompressed size based on archive type:
|
||||
// - Cluster archives (.tar.gz): contain pre-compressed .dump files, ratio ~1.2x
|
||||
// - Single SQL files (.sql.gz): compressed SQL expands significantly, ratio ~5-7x
|
||||
var compressionMultiplier float64
|
||||
if isClusterArchive {
|
||||
compressionMultiplier = 1.2 // tar.gz with already-compressed .dump files
|
||||
g.log.Debug("Using cluster archive compression ratio", "multiplier", compressionMultiplier)
|
||||
} else {
|
||||
compressionMultiplier = 5.0 // Conservative for gzipped SQL (was 7, reduced to 5)
|
||||
g.log.Debug("Using single file compression ratio", "multiplier", compressionMultiplier)
|
||||
}
|
||||
estimatedUncompressedGB := check.BackupSizeGB * compressionMultiplier
|
||||
|
||||
// Memory requirements
|
||||
// - PostgreSQL needs ~2-4GB for shared_buffers
|
||||
|
||||
Reference in New Issue
Block a user