Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 9e98d6fb8d | |||
| 56bb128fdb |
36
CHANGELOG.md
36
CHANGELOG.md
@ -5,6 +5,42 @@ All notable changes to dbbackup will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [4.2.4] - 2026-01-30
|
||||
|
||||
### Fixed - Comprehensive Ctrl+C Support Across All Operations
|
||||
|
||||
- **System-wide context-aware file operations**
|
||||
- All long-running I/O operations now respond to Ctrl+C
|
||||
- Added `CopyWithContext()` to cloud package for S3/Azure/GCS transfers
|
||||
- Partial files are cleaned up on cancellation
|
||||
|
||||
- **Fixed components:**
|
||||
- `internal/restore/extract.go`: Single DB extraction from cluster
|
||||
- `internal/wal/compression.go`: WAL file compression/decompression
|
||||
- `internal/restore/engine.go`: SQL restore streaming (2 paths)
|
||||
- `internal/backup/engine.go`: pg_dump/mysqldump streaming (3 paths)
|
||||
- `internal/cloud/s3.go`: S3 download interruption
|
||||
- `internal/cloud/azure.go`: Azure Blob download interruption
|
||||
- `internal/cloud/gcs.go`: GCS upload/download interruption
|
||||
- `internal/drill/engine.go`: DR drill decompression
|
||||
|
||||
## [4.2.3] - 2026-01-30
|
||||
|
||||
### Fixed - Cluster Restore Performance & Ctrl+C Handling
|
||||
|
||||
- **Removed redundant gzip validation in cluster restore**
|
||||
- `ValidateAndExtractCluster()` no longer calls `ValidateArchive()` internally
|
||||
- Previously validation happened 2x before extraction (caller + internal)
|
||||
- Eliminates duplicate gzip header reads on large archives
|
||||
- Reduces cluster restore startup time
|
||||
|
||||
- **Fixed Ctrl+C not working during extraction**
|
||||
- Added `CopyWithContext()` function for context-aware file copying
|
||||
- Extraction now checks for cancellation every 1MB of data
|
||||
- Ctrl+C immediately interrupts large file extractions
|
||||
- Partial files are cleaned up on cancellation
|
||||
- Applies to both `ExtractTarGzParallel` and `extractArchiveWithProgress`
|
||||
|
||||
## [4.2.2] - 2026-01-30
|
||||
|
||||
### Fixed - Complete pgzip Migration (Backup Side)
|
||||
|
||||
@ -760,7 +760,7 @@ func (e *Engine) executeMySQLWithProgressAndCompression(ctx context.Context, cmd
|
||||
// Copy mysqldump output through pgzip in a goroutine
|
||||
copyDone := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := io.Copy(gzWriter, pipe)
|
||||
_, err := fs.CopyWithContext(ctx, gzWriter, pipe)
|
||||
copyDone <- err
|
||||
}()
|
||||
|
||||
@ -839,7 +839,7 @@ func (e *Engine) executeMySQLWithCompression(ctx context.Context, cmdArgs []stri
|
||||
// Copy mysqldump output through pgzip in a goroutine
|
||||
copyDone := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := io.Copy(gzWriter, pipe)
|
||||
_, err := fs.CopyWithContext(ctx, gzWriter, pipe)
|
||||
copyDone <- err
|
||||
}()
|
||||
|
||||
@ -1497,7 +1497,7 @@ func (e *Engine) executeWithStreamingCompression(ctx context.Context, cmdArgs []
|
||||
// Copy from pg_dump stdout to pgzip writer in a goroutine
|
||||
copyDone := make(chan error, 1)
|
||||
go func() {
|
||||
_, copyErr := io.Copy(gzWriter, dumpStdout)
|
||||
_, copyErr := fs.CopyWithContext(ctx, gzWriter, dumpStdout)
|
||||
copyDone <- copyErr
|
||||
}()
|
||||
|
||||
|
||||
@ -312,8 +312,8 @@ func (a *AzureBackend) Download(ctx context.Context, remotePath, localPath strin
|
||||
// Wrap reader with progress tracking
|
||||
reader := NewProgressReader(resp.Body, fileSize, progress)
|
||||
|
||||
// Copy with progress
|
||||
_, err = io.Copy(file, reader)
|
||||
// Copy with progress and context awareness
|
||||
_, err = CopyWithContext(ctx, file, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write file: %w", err)
|
||||
}
|
||||
|
||||
@ -128,8 +128,8 @@ func (g *GCSBackend) Upload(ctx context.Context, localPath, remotePath string, p
|
||||
reader = NewThrottledReader(ctx, reader, g.config.BandwidthLimit)
|
||||
}
|
||||
|
||||
// Upload with progress tracking
|
||||
_, err = io.Copy(writer, reader)
|
||||
// Upload with progress tracking and context awareness
|
||||
_, err = CopyWithContext(ctx, writer, reader)
|
||||
if err != nil {
|
||||
writer.Close()
|
||||
return fmt.Errorf("failed to upload object: %w", err)
|
||||
@ -191,8 +191,8 @@ func (g *GCSBackend) Download(ctx context.Context, remotePath, localPath string,
|
||||
// Wrap reader with progress tracking
|
||||
progressReader := NewProgressReader(reader, fileSize, progress)
|
||||
|
||||
// Copy with progress
|
||||
_, err = io.Copy(file, progressReader)
|
||||
// Copy with progress and context awareness
|
||||
_, err = CopyWithContext(ctx, file, progressReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write file: %w", err)
|
||||
}
|
||||
|
||||
@ -170,3 +170,39 @@ func (pr *ProgressReader) Read(p []byte) (int, error) {
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// CopyWithContext copies data from src to dst while checking for context cancellation.
|
||||
// This allows Ctrl+C to interrupt large file transfers instead of blocking until complete.
|
||||
// Checks context every 1MB of data copied for responsive interruption.
|
||||
func CopyWithContext(ctx context.Context, dst io.Writer, src io.Reader) (int64, error) {
|
||||
buf := make([]byte, 1024*1024) // 1MB buffer - check context every 1MB
|
||||
var written int64
|
||||
for {
|
||||
// Check for cancellation before each read
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return written, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
nr, readErr := src.Read(buf)
|
||||
if nr > 0 {
|
||||
nw, writeErr := dst.Write(buf[:nr])
|
||||
if nw > 0 {
|
||||
written += int64(nw)
|
||||
}
|
||||
if writeErr != nil {
|
||||
return written, writeErr
|
||||
}
|
||||
if nr != nw {
|
||||
return written, io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
if readErr != nil {
|
||||
if readErr == io.EOF {
|
||||
return written, nil
|
||||
}
|
||||
return written, readErr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -256,7 +256,7 @@ func (s *S3Backend) Download(ctx context.Context, remotePath, localPath string,
|
||||
reader = NewProgressReader(result.Body, size, progress)
|
||||
}
|
||||
|
||||
_, err = io.Copy(outFile, reader)
|
||||
_, err = CopyWithContext(ctx, outFile, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write file: %w", err)
|
||||
}
|
||||
|
||||
@ -4,12 +4,12 @@ package drill
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"dbbackup/internal/fs"
|
||||
"dbbackup/internal/logger"
|
||||
|
||||
"github.com/klauspost/pgzip"
|
||||
@ -267,7 +267,9 @@ func (e *Engine) decompressWithPgzip(srcPath string) (string, error) {
|
||||
}
|
||||
defer dstFile.Close()
|
||||
|
||||
if _, err := io.Copy(dstFile, gz); err != nil {
|
||||
// Use context.Background() since decompressWithPgzip doesn't take context
|
||||
// The parent restoreBackup function handles context cancellation
|
||||
if _, err := fs.CopyWithContext(context.Background(), dstFile, gz); err != nil {
|
||||
os.Remove(dstPath)
|
||||
return "", fmt.Errorf("decompression failed: %w", err)
|
||||
}
|
||||
|
||||
@ -14,6 +14,42 @@ import (
|
||||
"github.com/klauspost/pgzip"
|
||||
)
|
||||
|
||||
// CopyWithContext copies data from src to dst while checking for context cancellation.
|
||||
// This allows Ctrl+C to interrupt large file extractions instead of blocking until complete.
|
||||
// Checks context every 1MB of data copied for responsive interruption.
|
||||
func CopyWithContext(ctx context.Context, dst io.Writer, src io.Reader) (int64, error) {
|
||||
buf := make([]byte, 1024*1024) // 1MB buffer - check context every 1MB
|
||||
var written int64
|
||||
for {
|
||||
// Check for cancellation before each read
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return written, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
nr, readErr := src.Read(buf)
|
||||
if nr > 0 {
|
||||
nw, writeErr := dst.Write(buf[:nr])
|
||||
if nw > 0 {
|
||||
written += int64(nw)
|
||||
}
|
||||
if writeErr != nil {
|
||||
return written, writeErr
|
||||
}
|
||||
if nr != nw {
|
||||
return written, io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
if readErr != nil {
|
||||
if readErr == io.EOF {
|
||||
return written, nil
|
||||
}
|
||||
return written, readErr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ParallelGzipWriter wraps pgzip.Writer for streaming compression
|
||||
type ParallelGzipWriter struct {
|
||||
*pgzip.Writer
|
||||
@ -134,11 +170,13 @@ func ExtractTarGzParallel(ctx context.Context, archivePath, destDir string, prog
|
||||
return fmt.Errorf("cannot create file %s: %w", targetPath, err)
|
||||
}
|
||||
|
||||
// Copy with size limit to prevent zip bombs
|
||||
written, err := io.Copy(outFile, tarReader)
|
||||
// Copy with context awareness to allow Ctrl+C interruption during large file extraction
|
||||
written, err := CopyWithContext(ctx, outFile, tarReader)
|
||||
outFile.Close()
|
||||
|
||||
if err != nil {
|
||||
// Clean up partial file on error
|
||||
os.Remove(targetPath)
|
||||
return fmt.Errorf("error writing %s: %w", targetPath, err)
|
||||
}
|
||||
|
||||
|
||||
@ -743,7 +743,7 @@ func (e *Engine) executeRestoreWithDecompression(ctx context.Context, archivePat
|
||||
// Stream decompressed data to restore command in goroutine
|
||||
copyDone := make(chan error, 1)
|
||||
go func() {
|
||||
_, copyErr := io.Copy(stdin, gz)
|
||||
_, copyErr := fs.CopyWithContext(ctx, stdin, gz)
|
||||
stdin.Close()
|
||||
copyDone <- copyErr
|
||||
}()
|
||||
@ -853,7 +853,7 @@ func (e *Engine) executeRestoreWithPgzipStream(ctx context.Context, archivePath,
|
||||
// Stream decompressed data to restore command in goroutine
|
||||
copyDone := make(chan error, 1)
|
||||
go func() {
|
||||
_, copyErr := io.Copy(stdin, gz)
|
||||
_, copyErr := fs.CopyWithContext(ctx, stdin, gz)
|
||||
stdin.Close()
|
||||
copyDone <- copyErr
|
||||
}()
|
||||
@ -1907,20 +1907,24 @@ func (e *Engine) extractArchiveWithProgress(ctx context.Context, archivePath, de
|
||||
return fmt.Errorf("failed to create file %s: %w", targetPath, err)
|
||||
}
|
||||
|
||||
// Copy file contents - use buffered I/O for turbo mode (32KB buffer)
|
||||
// Copy file contents with context awareness for Ctrl+C interruption
|
||||
// Use buffered I/O for turbo mode (32KB buffer)
|
||||
if e.cfg.BufferedIO {
|
||||
bufferedWriter := bufio.NewWriterSize(outFile, 32*1024) // 32KB buffer for faster writes
|
||||
if _, err := io.Copy(bufferedWriter, tarReader); err != nil {
|
||||
if _, err := fs.CopyWithContext(ctx, bufferedWriter, tarReader); err != nil {
|
||||
outFile.Close()
|
||||
os.Remove(targetPath) // Clean up partial file
|
||||
return fmt.Errorf("failed to write file %s: %w", targetPath, err)
|
||||
}
|
||||
if err := bufferedWriter.Flush(); err != nil {
|
||||
outFile.Close()
|
||||
os.Remove(targetPath)
|
||||
return fmt.Errorf("failed to flush buffer for %s: %w", targetPath, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := io.Copy(outFile, tarReader); err != nil {
|
||||
if _, err := fs.CopyWithContext(ctx, outFile, tarReader); err != nil {
|
||||
outFile.Close()
|
||||
os.Remove(targetPath) // Clean up partial file
|
||||
return fmt.Errorf("failed to write file %s: %w", targetPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"dbbackup/internal/fs"
|
||||
"dbbackup/internal/logger"
|
||||
"dbbackup/internal/progress"
|
||||
|
||||
@ -180,10 +181,11 @@ func ExtractDatabaseFromCluster(ctx context.Context, archivePath, dbName, output
|
||||
prog.Update(fmt.Sprintf("Extracting: %s", filename))
|
||||
}
|
||||
|
||||
written, err := io.Copy(outFile, tarReader)
|
||||
written, err := fs.CopyWithContext(ctx, outFile, tarReader)
|
||||
outFile.Close()
|
||||
if err != nil {
|
||||
close(stopTicker)
|
||||
os.Remove(extractedPath) // Clean up partial file
|
||||
return "", fmt.Errorf("extraction failed: %w", err)
|
||||
}
|
||||
|
||||
@ -309,10 +311,11 @@ func ExtractMultipleDatabasesFromCluster(ctx context.Context, archivePath string
|
||||
prog.Update(fmt.Sprintf("Extracting: %s (%d/%d)", dbName, len(extractedPaths)+1, len(dbNames)))
|
||||
}
|
||||
|
||||
written, err := io.Copy(outFile, tarReader)
|
||||
written, err := fs.CopyWithContext(ctx, outFile, tarReader)
|
||||
outFile.Close()
|
||||
if err != nil {
|
||||
close(stopTicker)
|
||||
os.Remove(extractedPath) // Clean up partial file
|
||||
return nil, fmt.Errorf("extraction failed for %s: %w", dbName, err)
|
||||
}
|
||||
|
||||
|
||||
@ -262,11 +262,11 @@ func containsSQLKeywords(content string) bool {
|
||||
// ValidateAndExtractCluster performs validation and pre-extraction for cluster restore
|
||||
// Returns path to extracted directory (in temp location) to avoid double-extraction
|
||||
// Caller must clean up the returned directory with os.RemoveAll() when done
|
||||
// NOTE: Caller should call ValidateArchive() before this function if validation is needed
|
||||
// This avoids redundant gzip header reads which can be slow on large archives
|
||||
func (s *Safety) ValidateAndExtractCluster(ctx context.Context, archivePath string) (extractedDir string, err error) {
|
||||
// First validate archive integrity (fast stream check)
|
||||
if err := s.ValidateArchive(archivePath); err != nil {
|
||||
return "", fmt.Errorf("archive validation failed: %w", err)
|
||||
}
|
||||
// Skip redundant validation here - caller already validated via ValidateArchive()
|
||||
// Opening gzip multiple times is expensive on large archives
|
||||
|
||||
// Create temp directory for extraction in configured WorkDir
|
||||
workDir := s.cfg.GetEffectiveWorkDir()
|
||||
|
||||
@ -1,14 +1,16 @@
|
||||
package wal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/klauspost/pgzip"
|
||||
|
||||
"dbbackup/internal/fs"
|
||||
"dbbackup/internal/logger"
|
||||
|
||||
"github.com/klauspost/pgzip"
|
||||
)
|
||||
|
||||
// Compressor handles WAL file compression
|
||||
@ -26,6 +28,11 @@ func NewCompressor(log logger.Logger) *Compressor {
|
||||
// CompressWALFile compresses a WAL file using parallel gzip (pgzip)
|
||||
// Returns the path to the compressed file and the compressed size
|
||||
func (c *Compressor) CompressWALFile(sourcePath, destPath string, level int) (int64, error) {
|
||||
return c.CompressWALFileContext(context.Background(), sourcePath, destPath, level)
|
||||
}
|
||||
|
||||
// CompressWALFileContext compresses a WAL file with context for cancellation support
|
||||
func (c *Compressor) CompressWALFileContext(ctx context.Context, sourcePath, destPath string, level int) (int64, error) {
|
||||
c.log.Debug("Compressing WAL file", "source", sourcePath, "dest", destPath, "level", level)
|
||||
|
||||
// Open source file
|
||||
@ -56,8 +63,8 @@ func (c *Compressor) CompressWALFile(sourcePath, destPath string, level int) (in
|
||||
}
|
||||
defer gzWriter.Close()
|
||||
|
||||
// Copy and compress
|
||||
_, err = io.Copy(gzWriter, srcFile)
|
||||
// Copy and compress with context support
|
||||
_, err = fs.CopyWithContext(ctx, gzWriter, srcFile)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("compression failed: %w", err)
|
||||
}
|
||||
@ -91,6 +98,11 @@ func (c *Compressor) CompressWALFile(sourcePath, destPath string, level int) (in
|
||||
|
||||
// DecompressWALFile decompresses a gzipped WAL file
|
||||
func (c *Compressor) DecompressWALFile(sourcePath, destPath string) (int64, error) {
|
||||
return c.DecompressWALFileContext(context.Background(), sourcePath, destPath)
|
||||
}
|
||||
|
||||
// DecompressWALFileContext decompresses a gzipped WAL file with context for cancellation
|
||||
func (c *Compressor) DecompressWALFileContext(ctx context.Context, sourcePath, destPath string) (int64, error) {
|
||||
c.log.Debug("Decompressing WAL file", "source", sourcePath, "dest", destPath)
|
||||
|
||||
// Open compressed source file
|
||||
@ -114,9 +126,10 @@ func (c *Compressor) DecompressWALFile(sourcePath, destPath string) (int64, erro
|
||||
}
|
||||
defer dstFile.Close()
|
||||
|
||||
// Decompress
|
||||
written, err := io.Copy(dstFile, gzReader)
|
||||
// Decompress with context support
|
||||
written, err := fs.CopyWithContext(ctx, dstFile, gzReader)
|
||||
if err != nil {
|
||||
os.Remove(destPath) // Clean up partial file
|
||||
return 0, fmt.Errorf("decompression failed: %w", err)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user