fix(build): move EstimateBackupSize to platform-independent file
Fixes Windows, OpenBSD, and NetBSD builds by extracting EstimateBackupSize from disk_check.go (which has build tags excluding those platforms) to a new estimate.go file.
This commit is contained in:
@@ -109,28 +109,3 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
// EstimateBackupSize estimates backup size based on database size
|
||||
func EstimateBackupSize(databaseSize uint64, compressionLevel int) uint64 {
|
||||
// Typical compression ratios:
|
||||
// Level 0 (no compression): 1.0x
|
||||
// Level 1-3 (fast): 0.4-0.6x
|
||||
// Level 4-6 (balanced): 0.3-0.4x
|
||||
// Level 7-9 (best): 0.2-0.3x
|
||||
|
||||
var compressionRatio float64
|
||||
if compressionLevel == 0 {
|
||||
compressionRatio = 1.0
|
||||
} else if compressionLevel <= 3 {
|
||||
compressionRatio = 0.5
|
||||
} else if compressionLevel <= 6 {
|
||||
compressionRatio = 0.35
|
||||
} else {
|
||||
compressionRatio = 0.25
|
||||
}
|
||||
|
||||
estimated := uint64(float64(databaseSize) * compressionRatio)
|
||||
|
||||
// Add 10% buffer for metadata, indexes, etc.
|
||||
return uint64(float64(estimated) * 1.1)
|
||||
}
|
||||
|
||||
26
internal/checks/estimate.go
Normal file
26
internal/checks/estimate.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package checks
|
||||
|
||||
// EstimateBackupSize estimates backup size based on database size
|
||||
func EstimateBackupSize(databaseSize uint64, compressionLevel int) uint64 {
|
||||
// Typical compression ratios:
|
||||
// Level 0 (no compression): 1.0x
|
||||
// Level 1-3 (fast): 0.4-0.6x
|
||||
// Level 4-6 (balanced): 0.3-0.4x
|
||||
// Level 7-9 (best): 0.2-0.3x
|
||||
|
||||
var compressionRatio float64
|
||||
if compressionLevel == 0 {
|
||||
compressionRatio = 1.0
|
||||
} else if compressionLevel <= 3 {
|
||||
compressionRatio = 0.5
|
||||
} else if compressionLevel <= 6 {
|
||||
compressionRatio = 0.35
|
||||
} else {
|
||||
compressionRatio = 0.25
|
||||
}
|
||||
|
||||
estimated := uint64(float64(databaseSize) * compressionRatio)
|
||||
|
||||
// Add 10% buffer for metadata, indexes, etc.
|
||||
return uint64(float64(estimated) * 1.1)
|
||||
}
|
||||
@@ -16,12 +16,12 @@ type FileTarget struct {
|
||||
basePath string
|
||||
rotateSize int64
|
||||
|
||||
mu sync.Mutex
|
||||
current *os.File
|
||||
written int64
|
||||
fileNum int
|
||||
healthy bool
|
||||
lastErr error
|
||||
mu sync.Mutex
|
||||
current *os.File
|
||||
written int64
|
||||
fileNum int
|
||||
healthy bool
|
||||
lastErr error
|
||||
}
|
||||
|
||||
// NewFileTarget creates a new file target
|
||||
@@ -165,13 +165,13 @@ type CompressedFileTarget struct {
|
||||
basePath string
|
||||
rotateSize int64
|
||||
|
||||
mu sync.Mutex
|
||||
file *os.File
|
||||
gzWriter *gzip.Writer
|
||||
written int64
|
||||
fileNum int
|
||||
healthy bool
|
||||
lastErr error
|
||||
mu sync.Mutex
|
||||
file *os.File
|
||||
gzWriter *gzip.Writer
|
||||
written int64
|
||||
fileNum int
|
||||
healthy bool
|
||||
lastErr error
|
||||
}
|
||||
|
||||
// NewCompressedFileTarget creates a gzip-compressed file target
|
||||
|
||||
@@ -22,17 +22,17 @@ type S3Target struct {
|
||||
region string
|
||||
partSize int64
|
||||
|
||||
mu sync.Mutex
|
||||
buffer *bytes.Buffer
|
||||
bufferSize int
|
||||
currentKey string
|
||||
uploadID string
|
||||
parts []types.CompletedPart
|
||||
partNumber int32
|
||||
fileNum int
|
||||
healthy bool
|
||||
lastErr error
|
||||
lastWrite time.Time
|
||||
mu sync.Mutex
|
||||
buffer *bytes.Buffer
|
||||
bufferSize int
|
||||
currentKey string
|
||||
uploadID string
|
||||
parts []types.CompletedPart
|
||||
partNumber int32
|
||||
fileNum int
|
||||
healthy bool
|
||||
lastErr error
|
||||
lastWrite time.Time
|
||||
}
|
||||
|
||||
// NewS3Target creates a new S3 target
|
||||
@@ -204,8 +204,8 @@ func (s *S3Target) Healthy() bool {
|
||||
// S3StreamingTarget supports larger files with resumable uploads
|
||||
type S3StreamingTarget struct {
|
||||
*S3Target
|
||||
rotateSize int64
|
||||
currentSize int64
|
||||
rotateSize int64
|
||||
currentSize int64
|
||||
}
|
||||
|
||||
// NewS3StreamingTarget creates an S3 target with file rotation
|
||||
|
||||
@@ -106,11 +106,11 @@ type Filter struct {
|
||||
|
||||
// StreamerState holds the current state of the streamer
|
||||
type StreamerState struct {
|
||||
Position Position `json:"position"`
|
||||
EventCount uint64 `json:"event_count"`
|
||||
ByteCount uint64 `json:"byte_count"`
|
||||
LastUpdate time.Time `json:"last_update"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
Position Position `json:"position"`
|
||||
EventCount uint64 `json:"event_count"`
|
||||
ByteCount uint64 `json:"byte_count"`
|
||||
LastUpdate time.Time `json:"last_update"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
TargetStatus []TargetStatus `json:"targets"`
|
||||
}
|
||||
|
||||
@@ -125,17 +125,17 @@ type TargetStatus struct {
|
||||
|
||||
// Event represents a parsed binlog event
|
||||
type Event struct {
|
||||
Type string `json:"type"` // "write", "update", "delete", "query", "gtid", etc.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Database string `json:"database,omitempty"`
|
||||
Table string `json:"table,omitempty"`
|
||||
Position Position `json:"position"`
|
||||
GTID string `json:"gtid,omitempty"`
|
||||
Query string `json:"query,omitempty"` // For query events
|
||||
Rows []map[string]any `json:"rows,omitempty"` // For row events
|
||||
OldRows []map[string]any `json:"old_rows,omitempty"` // For update events
|
||||
RawData []byte `json:"-"` // Raw binlog data for replay
|
||||
Extra map[string]any `json:"extra,omitempty"`
|
||||
Type string `json:"type"` // "write", "update", "delete", "query", "gtid", etc.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Database string `json:"database,omitempty"`
|
||||
Table string `json:"table,omitempty"`
|
||||
Position Position `json:"position"`
|
||||
GTID string `json:"gtid,omitempty"`
|
||||
Query string `json:"query,omitempty"` // For query events
|
||||
Rows []map[string]any `json:"rows,omitempty"` // For row events
|
||||
OldRows []map[string]any `json:"old_rows,omitempty"` // For update events
|
||||
RawData []byte `json:"-"` // Raw binlog data for replay
|
||||
Extra map[string]any `json:"extra,omitempty"`
|
||||
}
|
||||
|
||||
// Target interface for binlog output destinations
|
||||
|
||||
@@ -77,18 +77,18 @@ func TestEvent(t *testing.T) {
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
cfg := Config{
|
||||
Host: "localhost",
|
||||
Port: 3306,
|
||||
User: "repl",
|
||||
Password: "secret",
|
||||
ServerID: 99999,
|
||||
Flavor: "mysql",
|
||||
BatchMaxEvents: 1000,
|
||||
BatchMaxBytes: 10 * 1024 * 1024,
|
||||
BatchMaxWait: time.Second,
|
||||
Host: "localhost",
|
||||
Port: 3306,
|
||||
User: "repl",
|
||||
Password: "secret",
|
||||
ServerID: 99999,
|
||||
Flavor: "mysql",
|
||||
BatchMaxEvents: 1000,
|
||||
BatchMaxBytes: 10 * 1024 * 1024,
|
||||
BatchMaxWait: time.Second,
|
||||
CheckpointEnabled: true,
|
||||
CheckpointFile: "/var/lib/dbbackup/checkpoint",
|
||||
UseGTID: true,
|
||||
UseGTID: true,
|
||||
}
|
||||
|
||||
if cfg.Host != "localhost" {
|
||||
|
||||
@@ -12,7 +12,7 @@ func TestSelectorConfig(t *testing.T) {
|
||||
User: "root",
|
||||
DataDir: "/var/lib/mysql",
|
||||
CloneMinVersion: "8.0.17",
|
||||
CloneMinSize: 1024 * 1024 * 1024, // 1GB
|
||||
CloneMinSize: 1024 * 1024 * 1024, // 1GB
|
||||
SnapshotMinSize: 10 * 1024 * 1024 * 1024, // 10GB
|
||||
PreferClone: true,
|
||||
AllowMysqldump: true,
|
||||
@@ -111,9 +111,9 @@ func TestSelectionReason(t *testing.T) {
|
||||
func TestEngineScoring(t *testing.T) {
|
||||
// Test that scores are calculated correctly
|
||||
tests := []struct {
|
||||
name string
|
||||
info DatabaseInfo
|
||||
expectedBest string
|
||||
name string
|
||||
info DatabaseInfo
|
||||
expectedBest string
|
||||
}{
|
||||
{
|
||||
name: "large DB with clone plugin",
|
||||
|
||||
@@ -110,12 +110,12 @@ func (b *BtrfsBackend) CreateSnapshot(ctx context.Context, opts SnapshotOptions)
|
||||
}
|
||||
|
||||
return &Snapshot{
|
||||
ID: fullPath,
|
||||
Backend: "btrfs",
|
||||
Source: b.config.Subvolume,
|
||||
Name: snapName,
|
||||
ID: fullPath,
|
||||
Backend: "btrfs",
|
||||
Source: b.config.Subvolume,
|
||||
Name: snapName,
|
||||
MountPoint: fullPath, // Btrfs snapshots are immediately accessible
|
||||
CreatedAt: time.Now(),
|
||||
CreatedAt: time.Now(),
|
||||
Metadata: map[string]string{
|
||||
"subvolume": b.config.Subvolume,
|
||||
"snapshot_path": snapPath,
|
||||
@@ -279,12 +279,12 @@ func (b *BtrfsBackend) ListSnapshots(ctx context.Context) ([]*Snapshot, error) {
|
||||
}
|
||||
|
||||
snapshots = append(snapshots, &Snapshot{
|
||||
ID: fullPath,
|
||||
Backend: "btrfs",
|
||||
Name: name,
|
||||
Source: b.config.Subvolume,
|
||||
ID: fullPath,
|
||||
Backend: "btrfs",
|
||||
Name: name,
|
||||
Source: b.config.Subvolume,
|
||||
MountPoint: fullPath,
|
||||
CreatedAt: createdAt,
|
||||
CreatedAt: createdAt,
|
||||
Metadata: map[string]string{
|
||||
"subvolume": b.config.Subvolume,
|
||||
},
|
||||
@@ -322,12 +322,12 @@ func (b *BtrfsBackend) listSnapshotsFromDir(ctx context.Context, snapPath string
|
||||
}
|
||||
|
||||
snapshots = append(snapshots, &Snapshot{
|
||||
ID: fullPath,
|
||||
Backend: "btrfs",
|
||||
Name: entry.Name(),
|
||||
Source: b.config.Subvolume,
|
||||
ID: fullPath,
|
||||
Backend: "btrfs",
|
||||
Name: entry.Name(),
|
||||
Source: b.config.Subvolume,
|
||||
MountPoint: fullPath,
|
||||
CreatedAt: createdAt,
|
||||
CreatedAt: createdAt,
|
||||
Metadata: map[string]string{
|
||||
"subvolume": b.config.Subvolume,
|
||||
},
|
||||
|
||||
@@ -378,10 +378,10 @@ func (e *SnapshotEngine) streamSnapshot(ctx context.Context, sourcePath, destFil
|
||||
// Report progress
|
||||
if progressFunc != nil && totalFiles > 0 {
|
||||
progressFunc(&Progress{
|
||||
Stage: "STREAMING",
|
||||
Percent: 30 + float64(fileCount)/float64(totalFiles)*60,
|
||||
BytesDone: countWriter.count,
|
||||
Message: fmt.Sprintf("Processed %d/%d files (%s)", fileCount, totalFiles, formatBytes(countWriter.count)),
|
||||
Stage: "STREAMING",
|
||||
Percent: 30 + float64(fileCount)/float64(totalFiles)*60,
|
||||
BytesDone: countWriter.count,
|
||||
Message: fmt.Sprintf("Processed %d/%d files (%s)", fileCount, totalFiles, formatBytes(countWriter.count)),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,10 +13,10 @@ import (
|
||||
|
||||
// StreamingBackupEngine wraps a backup engine with streaming capability
|
||||
type StreamingBackupEngine struct {
|
||||
engine BackupEngine
|
||||
cloudCfg parallel.Config
|
||||
log logger.Logger
|
||||
|
||||
engine BackupEngine
|
||||
cloudCfg parallel.Config
|
||||
log logger.Logger
|
||||
|
||||
mu sync.Mutex
|
||||
streamer *parallel.CloudStreamer
|
||||
pipe *io.PipeWriter
|
||||
@@ -28,19 +28,19 @@ type StreamingBackupEngine struct {
|
||||
// StreamingConfig holds streaming configuration
|
||||
type StreamingConfig struct {
|
||||
// Cloud configuration
|
||||
Bucket string
|
||||
Key string
|
||||
Region string
|
||||
Endpoint string
|
||||
|
||||
Bucket string
|
||||
Key string
|
||||
Region string
|
||||
Endpoint string
|
||||
|
||||
// Performance
|
||||
PartSize int64
|
||||
WorkerCount int
|
||||
|
||||
|
||||
// Security
|
||||
Encryption string
|
||||
KMSKeyID string
|
||||
|
||||
Encryption string
|
||||
KMSKeyID string
|
||||
|
||||
// Progress callback
|
||||
OnProgress func(progress parallel.Progress)
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func NewStreamingBackupEngine(engine BackupEngine, cfg StreamingConfig, log logg
|
||||
cloudCfg.Key = cfg.Key
|
||||
cloudCfg.Region = cfg.Region
|
||||
cloudCfg.Endpoint = cfg.Endpoint
|
||||
|
||||
|
||||
if cfg.PartSize > 0 {
|
||||
cloudCfg.PartSize = cfg.PartSize
|
||||
}
|
||||
@@ -199,23 +199,23 @@ type DirectBackupConfig struct {
|
||||
// Database
|
||||
DBType string
|
||||
DSN string
|
||||
|
||||
|
||||
// Cloud
|
||||
CloudURI string // s3://bucket/path or gs://bucket/path
|
||||
Region string
|
||||
Endpoint string
|
||||
|
||||
CloudURI string // s3://bucket/path or gs://bucket/path
|
||||
Region string
|
||||
Endpoint string
|
||||
|
||||
// Engine selection
|
||||
PreferredEngine string // clone, snapshot, dump
|
||||
|
||||
PreferredEngine string // clone, snapshot, dump
|
||||
|
||||
// Performance
|
||||
PartSize int64
|
||||
WorkerCount int
|
||||
|
||||
|
||||
// Options
|
||||
Compression bool
|
||||
Encryption string
|
||||
EncryptionKey string
|
||||
Compression bool
|
||||
Encryption string
|
||||
EncryptionKey string
|
||||
}
|
||||
|
||||
// Backup performs a direct backup to cloud
|
||||
|
||||
Reference in New Issue
Block a user