From f033b02cecb65188d53299536d5229ae95fd8224 Mon Sep 17 00:00:00 2001 From: Alexander Renz Date: Sat, 13 Dec 2025 21:55:39 +0100 Subject: [PATCH] fix(build): move EstimateBackupSize to platform-independent file Fixes Windows, OpenBSD, and NetBSD builds by extracting EstimateBackupSize from disk_check.go (which has build tags excluding those platforms) to a new estimate.go file. --- internal/checks/disk_check.go | 25 ------------- internal/checks/estimate.go | 26 +++++++++++++ internal/engine/binlog/file_target.go | 26 ++++++------- internal/engine/binlog/s3_target.go | 26 ++++++------- internal/engine/binlog/streamer.go | 32 ++++++++-------- internal/engine/binlog/streamer_test.go | 20 +++++----- internal/engine/selector_test.go | 8 ++-- internal/engine/snapshot/btrfs.go | 30 +++++++-------- internal/engine/snapshot_engine.go | 8 ++-- internal/engine/streaming.go | 50 ++++++++++++------------- 10 files changed, 126 insertions(+), 125 deletions(-) create mode 100644 internal/checks/estimate.go diff --git a/internal/checks/disk_check.go b/internal/checks/disk_check.go index 6aa1e94..721914d 100755 --- a/internal/checks/disk_check.go +++ b/internal/checks/disk_check.go @@ -109,28 +109,3 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string { return msg } - -// EstimateBackupSize estimates backup size based on database size -func EstimateBackupSize(databaseSize uint64, compressionLevel int) uint64 { - // Typical compression ratios: - // Level 0 (no compression): 1.0x - // Level 1-3 (fast): 0.4-0.6x - // Level 4-6 (balanced): 0.3-0.4x - // Level 7-9 (best): 0.2-0.3x - - var compressionRatio float64 - if compressionLevel == 0 { - compressionRatio = 1.0 - } else if compressionLevel <= 3 { - compressionRatio = 0.5 - } else if compressionLevel <= 6 { - compressionRatio = 0.35 - } else { - compressionRatio = 0.25 - } - - estimated := uint64(float64(databaseSize) * compressionRatio) - - // Add 10% buffer for metadata, indexes, etc. - return uint64(float64(estimated) * 1.1) -} diff --git a/internal/checks/estimate.go b/internal/checks/estimate.go new file mode 100644 index 0000000..a6bf517 --- /dev/null +++ b/internal/checks/estimate.go @@ -0,0 +1,26 @@ +package checks + +// EstimateBackupSize estimates backup size based on database size +func EstimateBackupSize(databaseSize uint64, compressionLevel int) uint64 { + // Typical compression ratios: + // Level 0 (no compression): 1.0x + // Level 1-3 (fast): 0.4-0.6x + // Level 4-6 (balanced): 0.3-0.4x + // Level 7-9 (best): 0.2-0.3x + + var compressionRatio float64 + if compressionLevel == 0 { + compressionRatio = 1.0 + } else if compressionLevel <= 3 { + compressionRatio = 0.5 + } else if compressionLevel <= 6 { + compressionRatio = 0.35 + } else { + compressionRatio = 0.25 + } + + estimated := uint64(float64(databaseSize) * compressionRatio) + + // Add 10% buffer for metadata, indexes, etc. + return uint64(float64(estimated) * 1.1) +} diff --git a/internal/engine/binlog/file_target.go b/internal/engine/binlog/file_target.go index ef99ead..5d3dfda 100644 --- a/internal/engine/binlog/file_target.go +++ b/internal/engine/binlog/file_target.go @@ -16,12 +16,12 @@ type FileTarget struct { basePath string rotateSize int64 - mu sync.Mutex - current *os.File - written int64 - fileNum int - healthy bool - lastErr error + mu sync.Mutex + current *os.File + written int64 + fileNum int + healthy bool + lastErr error } // NewFileTarget creates a new file target @@ -165,13 +165,13 @@ type CompressedFileTarget struct { basePath string rotateSize int64 - mu sync.Mutex - file *os.File - gzWriter *gzip.Writer - written int64 - fileNum int - healthy bool - lastErr error + mu sync.Mutex + file *os.File + gzWriter *gzip.Writer + written int64 + fileNum int + healthy bool + lastErr error } // NewCompressedFileTarget creates a gzip-compressed file target diff --git a/internal/engine/binlog/s3_target.go b/internal/engine/binlog/s3_target.go index 55e4d65..f2fcc30 100644 --- a/internal/engine/binlog/s3_target.go +++ b/internal/engine/binlog/s3_target.go @@ -22,17 +22,17 @@ type S3Target struct { region string partSize int64 - mu sync.Mutex - buffer *bytes.Buffer - bufferSize int - currentKey string - uploadID string - parts []types.CompletedPart - partNumber int32 - fileNum int - healthy bool - lastErr error - lastWrite time.Time + mu sync.Mutex + buffer *bytes.Buffer + bufferSize int + currentKey string + uploadID string + parts []types.CompletedPart + partNumber int32 + fileNum int + healthy bool + lastErr error + lastWrite time.Time } // NewS3Target creates a new S3 target @@ -204,8 +204,8 @@ func (s *S3Target) Healthy() bool { // S3StreamingTarget supports larger files with resumable uploads type S3StreamingTarget struct { *S3Target - rotateSize int64 - currentSize int64 + rotateSize int64 + currentSize int64 } // NewS3StreamingTarget creates an S3 target with file rotation diff --git a/internal/engine/binlog/streamer.go b/internal/engine/binlog/streamer.go index f9ab134..2511d00 100644 --- a/internal/engine/binlog/streamer.go +++ b/internal/engine/binlog/streamer.go @@ -106,11 +106,11 @@ type Filter struct { // StreamerState holds the current state of the streamer type StreamerState struct { - Position Position `json:"position"` - EventCount uint64 `json:"event_count"` - ByteCount uint64 `json:"byte_count"` - LastUpdate time.Time `json:"last_update"` - StartTime time.Time `json:"start_time"` + Position Position `json:"position"` + EventCount uint64 `json:"event_count"` + ByteCount uint64 `json:"byte_count"` + LastUpdate time.Time `json:"last_update"` + StartTime time.Time `json:"start_time"` TargetStatus []TargetStatus `json:"targets"` } @@ -125,17 +125,17 @@ type TargetStatus struct { // Event represents a parsed binlog event type Event struct { - Type string `json:"type"` // "write", "update", "delete", "query", "gtid", etc. - Timestamp time.Time `json:"timestamp"` - Database string `json:"database,omitempty"` - Table string `json:"table,omitempty"` - Position Position `json:"position"` - GTID string `json:"gtid,omitempty"` - Query string `json:"query,omitempty"` // For query events - Rows []map[string]any `json:"rows,omitempty"` // For row events - OldRows []map[string]any `json:"old_rows,omitempty"` // For update events - RawData []byte `json:"-"` // Raw binlog data for replay - Extra map[string]any `json:"extra,omitempty"` + Type string `json:"type"` // "write", "update", "delete", "query", "gtid", etc. + Timestamp time.Time `json:"timestamp"` + Database string `json:"database,omitempty"` + Table string `json:"table,omitempty"` + Position Position `json:"position"` + GTID string `json:"gtid,omitempty"` + Query string `json:"query,omitempty"` // For query events + Rows []map[string]any `json:"rows,omitempty"` // For row events + OldRows []map[string]any `json:"old_rows,omitempty"` // For update events + RawData []byte `json:"-"` // Raw binlog data for replay + Extra map[string]any `json:"extra,omitempty"` } // Target interface for binlog output destinations diff --git a/internal/engine/binlog/streamer_test.go b/internal/engine/binlog/streamer_test.go index 8c679e5..2bb36fc 100644 --- a/internal/engine/binlog/streamer_test.go +++ b/internal/engine/binlog/streamer_test.go @@ -77,18 +77,18 @@ func TestEvent(t *testing.T) { func TestConfig(t *testing.T) { cfg := Config{ - Host: "localhost", - Port: 3306, - User: "repl", - Password: "secret", - ServerID: 99999, - Flavor: "mysql", - BatchMaxEvents: 1000, - BatchMaxBytes: 10 * 1024 * 1024, - BatchMaxWait: time.Second, + Host: "localhost", + Port: 3306, + User: "repl", + Password: "secret", + ServerID: 99999, + Flavor: "mysql", + BatchMaxEvents: 1000, + BatchMaxBytes: 10 * 1024 * 1024, + BatchMaxWait: time.Second, CheckpointEnabled: true, CheckpointFile: "/var/lib/dbbackup/checkpoint", - UseGTID: true, + UseGTID: true, } if cfg.Host != "localhost" { diff --git a/internal/engine/selector_test.go b/internal/engine/selector_test.go index dcdee8c..a154001 100644 --- a/internal/engine/selector_test.go +++ b/internal/engine/selector_test.go @@ -12,7 +12,7 @@ func TestSelectorConfig(t *testing.T) { User: "root", DataDir: "/var/lib/mysql", CloneMinVersion: "8.0.17", - CloneMinSize: 1024 * 1024 * 1024, // 1GB + CloneMinSize: 1024 * 1024 * 1024, // 1GB SnapshotMinSize: 10 * 1024 * 1024 * 1024, // 10GB PreferClone: true, AllowMysqldump: true, @@ -111,9 +111,9 @@ func TestSelectionReason(t *testing.T) { func TestEngineScoring(t *testing.T) { // Test that scores are calculated correctly tests := []struct { - name string - info DatabaseInfo - expectedBest string + name string + info DatabaseInfo + expectedBest string }{ { name: "large DB with clone plugin", diff --git a/internal/engine/snapshot/btrfs.go b/internal/engine/snapshot/btrfs.go index 3c25572..e0fc2de 100644 --- a/internal/engine/snapshot/btrfs.go +++ b/internal/engine/snapshot/btrfs.go @@ -110,12 +110,12 @@ func (b *BtrfsBackend) CreateSnapshot(ctx context.Context, opts SnapshotOptions) } return &Snapshot{ - ID: fullPath, - Backend: "btrfs", - Source: b.config.Subvolume, - Name: snapName, + ID: fullPath, + Backend: "btrfs", + Source: b.config.Subvolume, + Name: snapName, MountPoint: fullPath, // Btrfs snapshots are immediately accessible - CreatedAt: time.Now(), + CreatedAt: time.Now(), Metadata: map[string]string{ "subvolume": b.config.Subvolume, "snapshot_path": snapPath, @@ -279,12 +279,12 @@ func (b *BtrfsBackend) ListSnapshots(ctx context.Context) ([]*Snapshot, error) { } snapshots = append(snapshots, &Snapshot{ - ID: fullPath, - Backend: "btrfs", - Name: name, - Source: b.config.Subvolume, + ID: fullPath, + Backend: "btrfs", + Name: name, + Source: b.config.Subvolume, MountPoint: fullPath, - CreatedAt: createdAt, + CreatedAt: createdAt, Metadata: map[string]string{ "subvolume": b.config.Subvolume, }, @@ -322,12 +322,12 @@ func (b *BtrfsBackend) listSnapshotsFromDir(ctx context.Context, snapPath string } snapshots = append(snapshots, &Snapshot{ - ID: fullPath, - Backend: "btrfs", - Name: entry.Name(), - Source: b.config.Subvolume, + ID: fullPath, + Backend: "btrfs", + Name: entry.Name(), + Source: b.config.Subvolume, MountPoint: fullPath, - CreatedAt: createdAt, + CreatedAt: createdAt, Metadata: map[string]string{ "subvolume": b.config.Subvolume, }, diff --git a/internal/engine/snapshot_engine.go b/internal/engine/snapshot_engine.go index 2db418a..e28824b 100644 --- a/internal/engine/snapshot_engine.go +++ b/internal/engine/snapshot_engine.go @@ -378,10 +378,10 @@ func (e *SnapshotEngine) streamSnapshot(ctx context.Context, sourcePath, destFil // Report progress if progressFunc != nil && totalFiles > 0 { progressFunc(&Progress{ - Stage: "STREAMING", - Percent: 30 + float64(fileCount)/float64(totalFiles)*60, - BytesDone: countWriter.count, - Message: fmt.Sprintf("Processed %d/%d files (%s)", fileCount, totalFiles, formatBytes(countWriter.count)), + Stage: "STREAMING", + Percent: 30 + float64(fileCount)/float64(totalFiles)*60, + BytesDone: countWriter.count, + Message: fmt.Sprintf("Processed %d/%d files (%s)", fileCount, totalFiles, formatBytes(countWriter.count)), }) } } diff --git a/internal/engine/streaming.go b/internal/engine/streaming.go index 9968559..ef74646 100644 --- a/internal/engine/streaming.go +++ b/internal/engine/streaming.go @@ -13,10 +13,10 @@ import ( // StreamingBackupEngine wraps a backup engine with streaming capability type StreamingBackupEngine struct { - engine BackupEngine - cloudCfg parallel.Config - log logger.Logger - + engine BackupEngine + cloudCfg parallel.Config + log logger.Logger + mu sync.Mutex streamer *parallel.CloudStreamer pipe *io.PipeWriter @@ -28,19 +28,19 @@ type StreamingBackupEngine struct { // StreamingConfig holds streaming configuration type StreamingConfig struct { // Cloud configuration - Bucket string - Key string - Region string - Endpoint string - + Bucket string + Key string + Region string + Endpoint string + // Performance PartSize int64 WorkerCount int - + // Security - Encryption string - KMSKeyID string - + Encryption string + KMSKeyID string + // Progress callback OnProgress func(progress parallel.Progress) } @@ -56,7 +56,7 @@ func NewStreamingBackupEngine(engine BackupEngine, cfg StreamingConfig, log logg cloudCfg.Key = cfg.Key cloudCfg.Region = cfg.Region cloudCfg.Endpoint = cfg.Endpoint - + if cfg.PartSize > 0 { cloudCfg.PartSize = cfg.PartSize } @@ -199,23 +199,23 @@ type DirectBackupConfig struct { // Database DBType string DSN string - + // Cloud - CloudURI string // s3://bucket/path or gs://bucket/path - Region string - Endpoint string - + CloudURI string // s3://bucket/path or gs://bucket/path + Region string + Endpoint string + // Engine selection - PreferredEngine string // clone, snapshot, dump - + PreferredEngine string // clone, snapshot, dump + // Performance PartSize int64 WorkerCount int - + // Options - Compression bool - Encryption string - EncryptionKey string + Compression bool + Encryption string + EncryptionKey string } // Backup performs a direct backup to cloud