fix(build): move EstimateBackupSize to platform-independent file
Some checks failed
CI/CD / Test (push) Failing after 4s
CI/CD / Generate SBOM (push) Has been skipped
CI/CD / Lint (push) Failing after 4s
CI/CD / Build (darwin-amd64) (push) Has been skipped
CI/CD / Build (linux-amd64) (push) Has been skipped
CI/CD / Build (darwin-arm64) (push) Has been skipped
CI/CD / Build (linux-arm64) (push) Has been skipped
CI/CD / Release (push) Has been skipped
CI/CD / Build & Push Docker Image (push) Has been skipped
CI/CD / Mirror to GitHub (push) Has been skipped

Fixes Windows, OpenBSD, and NetBSD builds by extracting
EstimateBackupSize from disk_check.go (which has build tags
excluding those platforms) to a new estimate.go file.
This commit is contained in:
2025-12-13 21:55:39 +01:00
parent 573f2776d7
commit f033b02cec
10 changed files with 126 additions and 125 deletions

View File

@@ -109,28 +109,3 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
return msg return msg
} }
// EstimateBackupSize estimates backup size based on database size
func EstimateBackupSize(databaseSize uint64, compressionLevel int) uint64 {
// Typical compression ratios:
// Level 0 (no compression): 1.0x
// Level 1-3 (fast): 0.4-0.6x
// Level 4-6 (balanced): 0.3-0.4x
// Level 7-9 (best): 0.2-0.3x
var compressionRatio float64
if compressionLevel == 0 {
compressionRatio = 1.0
} else if compressionLevel <= 3 {
compressionRatio = 0.5
} else if compressionLevel <= 6 {
compressionRatio = 0.35
} else {
compressionRatio = 0.25
}
estimated := uint64(float64(databaseSize) * compressionRatio)
// Add 10% buffer for metadata, indexes, etc.
return uint64(float64(estimated) * 1.1)
}

View File

@@ -0,0 +1,26 @@
package checks
// EstimateBackupSize estimates backup size based on database size
func EstimateBackupSize(databaseSize uint64, compressionLevel int) uint64 {
// Typical compression ratios:
// Level 0 (no compression): 1.0x
// Level 1-3 (fast): 0.4-0.6x
// Level 4-6 (balanced): 0.3-0.4x
// Level 7-9 (best): 0.2-0.3x
var compressionRatio float64
if compressionLevel == 0 {
compressionRatio = 1.0
} else if compressionLevel <= 3 {
compressionRatio = 0.5
} else if compressionLevel <= 6 {
compressionRatio = 0.35
} else {
compressionRatio = 0.25
}
estimated := uint64(float64(databaseSize) * compressionRatio)
// Add 10% buffer for metadata, indexes, etc.
return uint64(float64(estimated) * 1.1)
}

View File

@@ -16,12 +16,12 @@ type FileTarget struct {
basePath string basePath string
rotateSize int64 rotateSize int64
mu sync.Mutex mu sync.Mutex
current *os.File current *os.File
written int64 written int64
fileNum int fileNum int
healthy bool healthy bool
lastErr error lastErr error
} }
// NewFileTarget creates a new file target // NewFileTarget creates a new file target
@@ -165,13 +165,13 @@ type CompressedFileTarget struct {
basePath string basePath string
rotateSize int64 rotateSize int64
mu sync.Mutex mu sync.Mutex
file *os.File file *os.File
gzWriter *gzip.Writer gzWriter *gzip.Writer
written int64 written int64
fileNum int fileNum int
healthy bool healthy bool
lastErr error lastErr error
} }
// NewCompressedFileTarget creates a gzip-compressed file target // NewCompressedFileTarget creates a gzip-compressed file target

View File

@@ -22,17 +22,17 @@ type S3Target struct {
region string region string
partSize int64 partSize int64
mu sync.Mutex mu sync.Mutex
buffer *bytes.Buffer buffer *bytes.Buffer
bufferSize int bufferSize int
currentKey string currentKey string
uploadID string uploadID string
parts []types.CompletedPart parts []types.CompletedPart
partNumber int32 partNumber int32
fileNum int fileNum int
healthy bool healthy bool
lastErr error lastErr error
lastWrite time.Time lastWrite time.Time
} }
// NewS3Target creates a new S3 target // NewS3Target creates a new S3 target
@@ -204,8 +204,8 @@ func (s *S3Target) Healthy() bool {
// S3StreamingTarget supports larger files with resumable uploads // S3StreamingTarget supports larger files with resumable uploads
type S3StreamingTarget struct { type S3StreamingTarget struct {
*S3Target *S3Target
rotateSize int64 rotateSize int64
currentSize int64 currentSize int64
} }
// NewS3StreamingTarget creates an S3 target with file rotation // NewS3StreamingTarget creates an S3 target with file rotation

View File

@@ -106,11 +106,11 @@ type Filter struct {
// StreamerState holds the current state of the streamer // StreamerState holds the current state of the streamer
type StreamerState struct { type StreamerState struct {
Position Position `json:"position"` Position Position `json:"position"`
EventCount uint64 `json:"event_count"` EventCount uint64 `json:"event_count"`
ByteCount uint64 `json:"byte_count"` ByteCount uint64 `json:"byte_count"`
LastUpdate time.Time `json:"last_update"` LastUpdate time.Time `json:"last_update"`
StartTime time.Time `json:"start_time"` StartTime time.Time `json:"start_time"`
TargetStatus []TargetStatus `json:"targets"` TargetStatus []TargetStatus `json:"targets"`
} }
@@ -125,17 +125,17 @@ type TargetStatus struct {
// Event represents a parsed binlog event // Event represents a parsed binlog event
type Event struct { type Event struct {
Type string `json:"type"` // "write", "update", "delete", "query", "gtid", etc. Type string `json:"type"` // "write", "update", "delete", "query", "gtid", etc.
Timestamp time.Time `json:"timestamp"` Timestamp time.Time `json:"timestamp"`
Database string `json:"database,omitempty"` Database string `json:"database,omitempty"`
Table string `json:"table,omitempty"` Table string `json:"table,omitempty"`
Position Position `json:"position"` Position Position `json:"position"`
GTID string `json:"gtid,omitempty"` GTID string `json:"gtid,omitempty"`
Query string `json:"query,omitempty"` // For query events Query string `json:"query,omitempty"` // For query events
Rows []map[string]any `json:"rows,omitempty"` // For row events Rows []map[string]any `json:"rows,omitempty"` // For row events
OldRows []map[string]any `json:"old_rows,omitempty"` // For update events OldRows []map[string]any `json:"old_rows,omitempty"` // For update events
RawData []byte `json:"-"` // Raw binlog data for replay RawData []byte `json:"-"` // Raw binlog data for replay
Extra map[string]any `json:"extra,omitempty"` Extra map[string]any `json:"extra,omitempty"`
} }
// Target interface for binlog output destinations // Target interface for binlog output destinations

View File

@@ -77,18 +77,18 @@ func TestEvent(t *testing.T) {
func TestConfig(t *testing.T) { func TestConfig(t *testing.T) {
cfg := Config{ cfg := Config{
Host: "localhost", Host: "localhost",
Port: 3306, Port: 3306,
User: "repl", User: "repl",
Password: "secret", Password: "secret",
ServerID: 99999, ServerID: 99999,
Flavor: "mysql", Flavor: "mysql",
BatchMaxEvents: 1000, BatchMaxEvents: 1000,
BatchMaxBytes: 10 * 1024 * 1024, BatchMaxBytes: 10 * 1024 * 1024,
BatchMaxWait: time.Second, BatchMaxWait: time.Second,
CheckpointEnabled: true, CheckpointEnabled: true,
CheckpointFile: "/var/lib/dbbackup/checkpoint", CheckpointFile: "/var/lib/dbbackup/checkpoint",
UseGTID: true, UseGTID: true,
} }
if cfg.Host != "localhost" { if cfg.Host != "localhost" {

View File

@@ -12,7 +12,7 @@ func TestSelectorConfig(t *testing.T) {
User: "root", User: "root",
DataDir: "/var/lib/mysql", DataDir: "/var/lib/mysql",
CloneMinVersion: "8.0.17", CloneMinVersion: "8.0.17",
CloneMinSize: 1024 * 1024 * 1024, // 1GB CloneMinSize: 1024 * 1024 * 1024, // 1GB
SnapshotMinSize: 10 * 1024 * 1024 * 1024, // 10GB SnapshotMinSize: 10 * 1024 * 1024 * 1024, // 10GB
PreferClone: true, PreferClone: true,
AllowMysqldump: true, AllowMysqldump: true,
@@ -111,9 +111,9 @@ func TestSelectionReason(t *testing.T) {
func TestEngineScoring(t *testing.T) { func TestEngineScoring(t *testing.T) {
// Test that scores are calculated correctly // Test that scores are calculated correctly
tests := []struct { tests := []struct {
name string name string
info DatabaseInfo info DatabaseInfo
expectedBest string expectedBest string
}{ }{
{ {
name: "large DB with clone plugin", name: "large DB with clone plugin",

View File

@@ -110,12 +110,12 @@ func (b *BtrfsBackend) CreateSnapshot(ctx context.Context, opts SnapshotOptions)
} }
return &Snapshot{ return &Snapshot{
ID: fullPath, ID: fullPath,
Backend: "btrfs", Backend: "btrfs",
Source: b.config.Subvolume, Source: b.config.Subvolume,
Name: snapName, Name: snapName,
MountPoint: fullPath, // Btrfs snapshots are immediately accessible MountPoint: fullPath, // Btrfs snapshots are immediately accessible
CreatedAt: time.Now(), CreatedAt: time.Now(),
Metadata: map[string]string{ Metadata: map[string]string{
"subvolume": b.config.Subvolume, "subvolume": b.config.Subvolume,
"snapshot_path": snapPath, "snapshot_path": snapPath,
@@ -279,12 +279,12 @@ func (b *BtrfsBackend) ListSnapshots(ctx context.Context) ([]*Snapshot, error) {
} }
snapshots = append(snapshots, &Snapshot{ snapshots = append(snapshots, &Snapshot{
ID: fullPath, ID: fullPath,
Backend: "btrfs", Backend: "btrfs",
Name: name, Name: name,
Source: b.config.Subvolume, Source: b.config.Subvolume,
MountPoint: fullPath, MountPoint: fullPath,
CreatedAt: createdAt, CreatedAt: createdAt,
Metadata: map[string]string{ Metadata: map[string]string{
"subvolume": b.config.Subvolume, "subvolume": b.config.Subvolume,
}, },
@@ -322,12 +322,12 @@ func (b *BtrfsBackend) listSnapshotsFromDir(ctx context.Context, snapPath string
} }
snapshots = append(snapshots, &Snapshot{ snapshots = append(snapshots, &Snapshot{
ID: fullPath, ID: fullPath,
Backend: "btrfs", Backend: "btrfs",
Name: entry.Name(), Name: entry.Name(),
Source: b.config.Subvolume, Source: b.config.Subvolume,
MountPoint: fullPath, MountPoint: fullPath,
CreatedAt: createdAt, CreatedAt: createdAt,
Metadata: map[string]string{ Metadata: map[string]string{
"subvolume": b.config.Subvolume, "subvolume": b.config.Subvolume,
}, },

View File

@@ -378,10 +378,10 @@ func (e *SnapshotEngine) streamSnapshot(ctx context.Context, sourcePath, destFil
// Report progress // Report progress
if progressFunc != nil && totalFiles > 0 { if progressFunc != nil && totalFiles > 0 {
progressFunc(&Progress{ progressFunc(&Progress{
Stage: "STREAMING", Stage: "STREAMING",
Percent: 30 + float64(fileCount)/float64(totalFiles)*60, Percent: 30 + float64(fileCount)/float64(totalFiles)*60,
BytesDone: countWriter.count, BytesDone: countWriter.count,
Message: fmt.Sprintf("Processed %d/%d files (%s)", fileCount, totalFiles, formatBytes(countWriter.count)), Message: fmt.Sprintf("Processed %d/%d files (%s)", fileCount, totalFiles, formatBytes(countWriter.count)),
}) })
} }
} }

View File

@@ -13,9 +13,9 @@ import (
// StreamingBackupEngine wraps a backup engine with streaming capability // StreamingBackupEngine wraps a backup engine with streaming capability
type StreamingBackupEngine struct { type StreamingBackupEngine struct {
engine BackupEngine engine BackupEngine
cloudCfg parallel.Config cloudCfg parallel.Config
log logger.Logger log logger.Logger
mu sync.Mutex mu sync.Mutex
streamer *parallel.CloudStreamer streamer *parallel.CloudStreamer
@@ -28,18 +28,18 @@ type StreamingBackupEngine struct {
// StreamingConfig holds streaming configuration // StreamingConfig holds streaming configuration
type StreamingConfig struct { type StreamingConfig struct {
// Cloud configuration // Cloud configuration
Bucket string Bucket string
Key string Key string
Region string Region string
Endpoint string Endpoint string
// Performance // Performance
PartSize int64 PartSize int64
WorkerCount int WorkerCount int
// Security // Security
Encryption string Encryption string
KMSKeyID string KMSKeyID string
// Progress callback // Progress callback
OnProgress func(progress parallel.Progress) OnProgress func(progress parallel.Progress)
@@ -201,21 +201,21 @@ type DirectBackupConfig struct {
DSN string DSN string
// Cloud // Cloud
CloudURI string // s3://bucket/path or gs://bucket/path CloudURI string // s3://bucket/path or gs://bucket/path
Region string Region string
Endpoint string Endpoint string
// Engine selection // Engine selection
PreferredEngine string // clone, snapshot, dump PreferredEngine string // clone, snapshot, dump
// Performance // Performance
PartSize int64 PartSize int64
WorkerCount int WorkerCount int
// Options // Options
Compression bool Compression bool
Encryption string Encryption string
EncryptionKey string EncryptionKey string
} }
// Backup performs a direct backup to cloud // Backup performs a direct backup to cloud