Compare commits
50 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 7711a206ab | |||
| ba6e8a2b39 | |||
| ec5e89eab7 | |||
| e24d7ab49f | |||
| 721e53fe6a | |||
| 4e09066aa5 | |||
| 6a24ee39be | |||
| dc6dfd8b2c | |||
| 7b4ab76313 | |||
| c0d92b3a81 | |||
| 8c85d85249 | |||
| e0cdcb28be | |||
| 22a7b9e81e | |||
| c71889be47 | |||
| 222bdbef58 | |||
| f7e9fa64f0 | |||
| f153e61dbf | |||
| d19c065658 | |||
| 8dac5efc10 | |||
| fd5edce5ae | |||
| a7e2c86618 | |||
| b2e0c739e0 | |||
| ad23abdf4e | |||
| 390b830976 | |||
| 7e53950967 | |||
| 59d2094241 | |||
| b1f8c6d646 | |||
| b05c2be19d | |||
| ec33959e3e | |||
| 92402f0fdb | |||
| 682510d1bc | |||
| 83ad62b6b5 | |||
| 55d34be32e | |||
| 1831bd7c1f | |||
| 24377eab8f | |||
| 3e41d88445 | |||
| 5fb88b14ba | |||
| cccee4294f | |||
| 9688143176 | |||
| e821e131b4 | |||
| 15a60d2e71 | |||
| 9c65821250 | |||
| 627061cdbb | |||
| e1a7c57e0f | |||
| 22915102d4 | |||
| 3653ced6da | |||
| 9743d571ce | |||
| c519f08ef2 | |||
| b99b05fedb | |||
| c5f2c3322c |
@@ -56,14 +56,14 @@ jobs:
|
|||||||
|
|
||||||
- name: Install and run golangci-lint
|
- name: Install and run golangci-lint
|
||||||
run: |
|
run: |
|
||||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.2
|
go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.8.0
|
||||||
golangci-lint run --timeout=5m ./...
|
golangci-lint run --timeout=5m ./...
|
||||||
|
|
||||||
build-and-release:
|
build-and-release:
|
||||||
name: Build & Release
|
name: Build & Release
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: [test, lint]
|
needs: [test, lint]
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
container:
|
container:
|
||||||
image: golang:1.24-bookworm
|
image: golang:1.24-bookworm
|
||||||
steps:
|
steps:
|
||||||
@@ -82,24 +82,27 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mkdir -p release
|
mkdir -p release
|
||||||
|
|
||||||
# Linux amd64
|
# Install cross-compilation tools for CGO
|
||||||
echo "Building linux/amd64..."
|
apt-get update && apt-get install -y -qq gcc-aarch64-linux-gnu
|
||||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o release/dbbackup-linux-amd64 .
|
|
||||||
|
|
||||||
# Linux arm64
|
# Linux amd64 (with CGO for SQLite)
|
||||||
echo "Building linux/arm64..."
|
echo "Building linux/amd64 (CGO enabled)..."
|
||||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -o release/dbbackup-linux-arm64 .
|
CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o release/dbbackup-linux-amd64 .
|
||||||
|
|
||||||
# Darwin amd64
|
# Linux arm64 (with CGO for SQLite)
|
||||||
echo "Building darwin/amd64..."
|
echo "Building linux/arm64 (CGO enabled)..."
|
||||||
|
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -o release/dbbackup-linux-arm64 .
|
||||||
|
|
||||||
|
# Darwin amd64 (no CGO - cross-compile limitation)
|
||||||
|
echo "Building darwin/amd64 (CGO disabled)..."
|
||||||
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags="-s -w" -o release/dbbackup-darwin-amd64 .
|
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags="-s -w" -o release/dbbackup-darwin-amd64 .
|
||||||
|
|
||||||
# Darwin arm64
|
# Darwin arm64 (no CGO - cross-compile limitation)
|
||||||
echo "Building darwin/arm64..."
|
echo "Building darwin/arm64 (CGO disabled)..."
|
||||||
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -ldflags="-s -w" -o release/dbbackup-darwin-arm64 .
|
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -ldflags="-s -w" -o release/dbbackup-darwin-arm64 .
|
||||||
|
|
||||||
# FreeBSD amd64
|
# FreeBSD amd64 (no CGO - cross-compile limitation)
|
||||||
echo "Building freebsd/amd64..."
|
echo "Building freebsd/amd64 (CGO disabled)..."
|
||||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build -ldflags="-s -w" -o release/dbbackup-freebsd-amd64 .
|
CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build -ldflags="-s -w" -o release/dbbackup-freebsd-amd64 .
|
||||||
|
|
||||||
echo "All builds complete:"
|
echo "All builds complete:"
|
||||||
@@ -155,28 +158,4 @@ jobs:
|
|||||||
done
|
done
|
||||||
|
|
||||||
echo "Gitea release complete!"
|
echo "Gitea release complete!"
|
||||||
|
|
||||||
# Mirror to GitHub (optional - runs if GITHUB_MIRROR_TOKEN secret is set)
|
|
||||||
mirror-to-github:
|
|
||||||
name: Mirror to GitHub
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: [build-and-release]
|
|
||||||
if: startsWith(github.ref, 'refs/tags/') && vars.GITHUB_MIRROR_TOKEN != ''
|
|
||||||
continue-on-error: true
|
|
||||||
steps:
|
|
||||||
- name: Mirror to GitHub
|
|
||||||
env:
|
|
||||||
GITHUB_MIRROR_TOKEN: ${{ vars.GITHUB_MIRROR_TOKEN }}
|
|
||||||
run: |
|
|
||||||
TAG=${GITHUB_REF#refs/tags/}
|
|
||||||
|
|
||||||
echo "Mirroring ${TAG} to GitHub..."
|
|
||||||
|
|
||||||
# Clone from Gitea
|
|
||||||
git clone --bare "https://git.uuxo.net/${GITHUB_REPOSITORY}.git" repo.git
|
|
||||||
cd repo.git
|
|
||||||
|
|
||||||
# Push to GitHub
|
|
||||||
git push --mirror "https://${GITHUB_MIRROR_TOKEN}@github.com/PlusOne/dbbackup.git" || echo "Mirror push failed (non-critical)"
|
|
||||||
|
|
||||||
echo "GitHub mirror complete!"
|
echo "GitHub mirror complete!"
|
||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -34,3 +34,7 @@ coverage.html
|
|||||||
# Ignore temporary files
|
# Ignore temporary files
|
||||||
tmp/
|
tmp/
|
||||||
temp/
|
temp/
|
||||||
|
CRITICAL_BUGS_FIXED.md
|
||||||
|
LEGAL_DOCUMENTATION.md
|
||||||
|
LEGAL_*.md
|
||||||
|
legal/
|
||||||
|
|||||||
@@ -1,16 +1,16 @@
|
|||||||
# golangci-lint configuration - relaxed for existing codebase
|
# golangci-lint configuration - relaxed for existing codebase
|
||||||
|
version: "2"
|
||||||
|
|
||||||
run:
|
run:
|
||||||
timeout: 5m
|
timeout: 5m
|
||||||
tests: false
|
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
disable-all: true
|
default: none
|
||||||
enable:
|
enable:
|
||||||
# Only essential linters that catch real bugs
|
# Only essential linters that catch real bugs
|
||||||
- govet
|
- govet
|
||||||
- ineffassign
|
|
||||||
|
|
||||||
linters-settings:
|
settings:
|
||||||
govet:
|
govet:
|
||||||
disable:
|
disable:
|
||||||
- fieldalignment
|
- fieldalignment
|
||||||
|
|||||||
308
CHANGELOG.md
308
CHANGELOG.md
@@ -5,9 +5,313 @@ All notable changes to dbbackup will be documented in this file.
|
|||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
## [3.42.0] - 2026-01-07 "The Operator"
|
## [3.42.34] - 2026-01-14 "Filesystem Abstraction"
|
||||||
|
|
||||||
### Added - 🐧 Systemd Integration & Prometheus Metrics
|
### Added - spf13/afero for Filesystem Abstraction
|
||||||
|
- **New `internal/fs` package** for testable filesystem operations
|
||||||
|
- **In-memory filesystem** for unit testing without disk I/O
|
||||||
|
- **Global FS interface** that can be swapped for testing:
|
||||||
|
```go
|
||||||
|
fs.SetFS(afero.NewMemMapFs()) // Use memory
|
||||||
|
fs.ResetFS() // Back to real disk
|
||||||
|
```
|
||||||
|
- **Wrapper functions** for all common file operations:
|
||||||
|
- `ReadFile`, `WriteFile`, `Create`, `Open`, `Remove`, `RemoveAll`
|
||||||
|
- `Mkdir`, `MkdirAll`, `ReadDir`, `Walk`, `Glob`
|
||||||
|
- `Exists`, `DirExists`, `IsDir`, `IsEmpty`
|
||||||
|
- `TempDir`, `TempFile`, `CopyFile`, `FileSize`
|
||||||
|
- **Testing helpers**:
|
||||||
|
- `WithMemFs(fn)` - Execute function with temp in-memory FS
|
||||||
|
- `SetupTestDir(files)` - Create test directory structure
|
||||||
|
- **Comprehensive test suite** demonstrating usage
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Upgraded afero from v1.10.0 to v1.15.0
|
||||||
|
|
||||||
|
## [3.42.33] - 2026-01-14 "Exponential Backoff Retry"
|
||||||
|
|
||||||
|
### Added - cenkalti/backoff for Cloud Operation Retry
|
||||||
|
- **Exponential backoff retry** for all cloud operations (S3, Azure, GCS)
|
||||||
|
- **Retry configurations**:
|
||||||
|
- `DefaultRetryConfig()` - 5 retries, 500ms→30s backoff, 5 min max
|
||||||
|
- `AggressiveRetryConfig()` - 10 retries, 1s→60s backoff, 15 min max
|
||||||
|
- `QuickRetryConfig()` - 3 retries, 100ms→5s backoff, 30s max
|
||||||
|
- **Smart error classification**:
|
||||||
|
- `IsPermanentError()` - Auth/bucket errors (no retry)
|
||||||
|
- `IsRetryableError()` - Timeout/network errors (retry)
|
||||||
|
- **Retry logging** - Each retry attempt is logged with wait duration
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- S3 simple upload, multipart upload, download now retry on transient failures
|
||||||
|
- Azure simple upload, download now retry on transient failures
|
||||||
|
- GCS upload, download now retry on transient failures
|
||||||
|
- Large file multipart uploads use `AggressiveRetryConfig()` (more retries)
|
||||||
|
|
||||||
|
## [3.42.32] - 2026-01-14 "Cross-Platform Colors"
|
||||||
|
|
||||||
|
### Added - fatih/color for Cross-Platform Terminal Colors
|
||||||
|
- **Windows-compatible colors** - Native Windows console API support
|
||||||
|
- **Color helper functions** in `logger` package:
|
||||||
|
- `Success()`, `Error()`, `Warning()`, `Info()` - Status messages with icons
|
||||||
|
- `Header()`, `Dim()`, `Bold()` - Text styling
|
||||||
|
- `Green()`, `Red()`, `Yellow()`, `Cyan()` - Colored text
|
||||||
|
- `StatusLine()`, `TableRow()` - Formatted output
|
||||||
|
- `DisableColors()`, `EnableColors()` - Runtime control
|
||||||
|
- **Consistent color scheme** across all log levels
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Logger `CleanFormatter` now uses fatih/color instead of raw ANSI codes
|
||||||
|
- All progress indicators use fatih/color for `[OK]`/`[FAIL]` status
|
||||||
|
- Automatic color detection (disabled for non-TTY)
|
||||||
|
|
||||||
|
## [3.42.31] - 2026-01-14 "Visual Progress Bars"
|
||||||
|
|
||||||
|
### Added - schollz/progressbar for Enhanced Progress Display
|
||||||
|
- **Visual progress bars** for cloud uploads/downloads with:
|
||||||
|
- Byte transfer display (e.g., `245 MB / 1.2 GB`)
|
||||||
|
- Transfer speed (e.g., `45 MB/s`)
|
||||||
|
- ETA prediction
|
||||||
|
- Color-coded progress with Unicode blocks
|
||||||
|
- **Checksum verification progress** - visual progress while calculating SHA-256
|
||||||
|
- **Spinner for indeterminate operations** - Braille-style spinner when size unknown
|
||||||
|
- New progress types: `NewSchollzBar()`, `NewSchollzBarItems()`, `NewSchollzSpinner()`
|
||||||
|
- Progress bar `Writer()` method for io.Copy integration
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Cloud download shows real-time byte progress instead of 10% log messages
|
||||||
|
- Cloud upload shows visual progress bar instead of debug logs
|
||||||
|
- Checksum verification shows progress for large files
|
||||||
|
|
||||||
|
## [3.42.30] - 2026-01-09 "Better Error Aggregation"
|
||||||
|
|
||||||
|
### Added - go-multierror for Cluster Restore Errors
|
||||||
|
- **Enhanced error reporting** - Now shows ALL database failures, not just a count
|
||||||
|
- Uses `hashicorp/go-multierror` for proper error aggregation
|
||||||
|
- Each failed database error is preserved with full context
|
||||||
|
- Bullet-pointed error output for readability:
|
||||||
|
```
|
||||||
|
cluster restore completed with 3 failures:
|
||||||
|
3 database(s) failed:
|
||||||
|
• db1: restore failed: max_locks_per_transaction exceeded
|
||||||
|
• db2: restore failed: connection refused
|
||||||
|
• db3: failed to create database: permission denied
|
||||||
|
```
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Replaced string slice error collection with proper `*multierror.Error`
|
||||||
|
- Thread-safe error aggregation with dedicated mutex
|
||||||
|
- Improved error wrapping with `%w` for error chain preservation
|
||||||
|
|
||||||
|
## [3.42.10] - 2026-01-08 "Code Quality"
|
||||||
|
|
||||||
|
### Fixed - Code Quality Issues
|
||||||
|
- Removed deprecated `io/ioutil` usage (replaced with `os`)
|
||||||
|
- Fixed `os.DirEntry.ModTime()` → `file.Info().ModTime()`
|
||||||
|
- Removed unused fields and variables
|
||||||
|
- Fixed ineffective assignments in TUI code
|
||||||
|
- Fixed error strings (no capitalization, no trailing punctuation)
|
||||||
|
|
||||||
|
## [3.42.9] - 2026-01-08 "Diagnose Timeout Fix"
|
||||||
|
|
||||||
|
### Fixed - diagnose.go Timeout Bugs
|
||||||
|
|
||||||
|
**More short timeouts that caused large archive failures:**
|
||||||
|
|
||||||
|
- `diagnoseClusterArchive()`: tar listing 60s → **5 minutes**
|
||||||
|
- `verifyWithPgRestore()`: pg_restore --list 60s → **5 minutes**
|
||||||
|
- `DiagnoseClusterDumps()`: archive listing 120s → **10 minutes**
|
||||||
|
|
||||||
|
**Impact:** These timeouts caused "context deadline exceeded" errors when
|
||||||
|
diagnosing multi-GB backup archives, preventing TUI restore from even starting.
|
||||||
|
|
||||||
|
## [3.42.8] - 2026-01-08 "TUI Timeout Fix"
|
||||||
|
|
||||||
|
### Fixed - TUI Timeout Bugs Causing Backup/Restore Failures
|
||||||
|
|
||||||
|
**ROOT CAUSE of 2-3 month TUI backup/restore failures identified and fixed:**
|
||||||
|
|
||||||
|
#### Critical Timeout Fixes:
|
||||||
|
- **restore_preview.go**: Safety check timeout increased from 60s → **10 minutes**
|
||||||
|
- Large archives (>1GB) take 2+ minutes to diagnose
|
||||||
|
- Users saw "context deadline exceeded" before backup even started
|
||||||
|
- **dbselector.go**: Database listing timeout increased from 15s → **60 seconds**
|
||||||
|
- Busy PostgreSQL servers need more time to respond
|
||||||
|
- **status.go**: Status check timeout increased from 10s → **30 seconds**
|
||||||
|
- SSL negotiation and slow networks caused failures
|
||||||
|
|
||||||
|
#### Stability Improvements:
|
||||||
|
- **Panic recovery** added to parallel goroutines in:
|
||||||
|
- `backup/engine.go:BackupCluster()` - cluster backup workers
|
||||||
|
- `restore/engine.go:RestoreCluster()` - cluster restore workers
|
||||||
|
- Prevents single database panic from crashing entire operation
|
||||||
|
|
||||||
|
#### Bug Fix:
|
||||||
|
- **restore/engine.go**: Fixed variable shadowing `err` → `cmdErr` for exit code detection
|
||||||
|
|
||||||
|
## [3.42.7] - 2026-01-08 "Context Killer Complete"
|
||||||
|
|
||||||
|
### Fixed - Additional Deadlock Bugs in Restore & Engine
|
||||||
|
|
||||||
|
**All remaining cmd.Wait() deadlock bugs fixed across the codebase:**
|
||||||
|
|
||||||
|
#### internal/restore/engine.go:
|
||||||
|
- `executeRestoreWithDecompression()` - gunzip/pigz pipeline restore
|
||||||
|
- `extractArchive()` - tar extraction for cluster restore
|
||||||
|
- `restoreGlobals()` - pg_dumpall globals restore
|
||||||
|
|
||||||
|
#### internal/backup/engine.go:
|
||||||
|
- `createArchive()` - tar/pigz archive creation pipeline
|
||||||
|
|
||||||
|
#### internal/engine/mysqldump.go:
|
||||||
|
- `Backup()` - mysqldump backup operation
|
||||||
|
- `BackupToWriter()` - streaming mysqldump to writer
|
||||||
|
|
||||||
|
**All 6 functions now use proper channel-based context handling with Process.Kill().**
|
||||||
|
|
||||||
|
## [3.42.6] - 2026-01-08 "Deadlock Killer"
|
||||||
|
|
||||||
|
### Fixed - Backup Command Context Handling
|
||||||
|
|
||||||
|
**Critical Bug: pg_dump/mysqldump could hang forever on context cancellation**
|
||||||
|
|
||||||
|
The `executeCommand`, `executeCommandWithProgress`, `executeMySQLWithProgressAndCompression`,
|
||||||
|
and `executeMySQLWithCompression` functions had a race condition where:
|
||||||
|
|
||||||
|
1. A goroutine was spawned to read stderr
|
||||||
|
2. `cmd.Wait()` was called directly
|
||||||
|
3. If context was cancelled, the process was NOT killed
|
||||||
|
4. The goroutine could hang forever waiting for stderr
|
||||||
|
|
||||||
|
**Fix**: All backup execution functions now use proper channel-based context handling:
|
||||||
|
```go
|
||||||
|
// Wait for command with context handling
|
||||||
|
cmdDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Context cancelled - kill process
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone
|
||||||
|
cmdErr = ctx.Err()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Affected Functions:**
|
||||||
|
- `executeCommand()` - pg_dump for cluster backup
|
||||||
|
- `executeCommandWithProgress()` - pg_dump for single backup with progress
|
||||||
|
- `executeMySQLWithProgressAndCompression()` - mysqldump pipeline
|
||||||
|
- `executeMySQLWithCompression()` - mysqldump pipeline
|
||||||
|
|
||||||
|
**This fixes:** Backup operations hanging indefinitely when cancelled or timing out.
|
||||||
|
|
||||||
|
## [3.42.5] - 2026-01-08 "False Positive Fix"
|
||||||
|
|
||||||
|
### Fixed - Encryption Detection Bug
|
||||||
|
|
||||||
|
**IsBackupEncrypted False Positive:**
|
||||||
|
- **BUG FIX**: `IsBackupEncrypted()` returned `true` for ALL files, blocking normal restores
|
||||||
|
- Root cause: Fallback logic checked if first 12 bytes (nonce size) could be read - always true
|
||||||
|
- Fix: Now properly detects known unencrypted formats by magic bytes:
|
||||||
|
- Gzip: `1f 8b`
|
||||||
|
- PostgreSQL custom: `PGDMP`
|
||||||
|
- Plain SQL: starts with `--`, `SET`, `CREATE`
|
||||||
|
- Returns `false` if no metadata present and format is recognized as unencrypted
|
||||||
|
- Affected file: `internal/backup/encryption.go`
|
||||||
|
|
||||||
|
## [3.42.4] - 2026-01-08 "The Long Haul"
|
||||||
|
|
||||||
|
### Fixed - Critical Restore Timeout Bug
|
||||||
|
|
||||||
|
**Removed Arbitrary Timeouts from Backup/Restore Operations:**
|
||||||
|
- **CRITICAL FIX**: Removed 4-hour timeout that was killing large database restores
|
||||||
|
- PostgreSQL cluster restores of 69GB+ databases no longer fail with "context deadline exceeded"
|
||||||
|
- All backup/restore operations now use `context.WithCancel` instead of `context.WithTimeout`
|
||||||
|
- Operations run until completion or manual cancellation (Ctrl+C)
|
||||||
|
|
||||||
|
**Affected Files:**
|
||||||
|
- `internal/tui/restore_exec.go`: Changed from 4-hour timeout to context.WithCancel
|
||||||
|
- `internal/tui/backup_exec.go`: Changed from 4-hour timeout to context.WithCancel
|
||||||
|
- `internal/backup/engine.go`: Removed per-database timeout in cluster backup
|
||||||
|
- `cmd/restore.go`: CLI restore commands use context.WithCancel
|
||||||
|
|
||||||
|
**exec.Command Context Audit:**
|
||||||
|
- Fixed `exec.Command` without Context in `internal/restore/engine.go:730`
|
||||||
|
- Added proper context handling to all external command calls
|
||||||
|
- Added timeouts only for quick diagnostic/version checks (not restore path):
|
||||||
|
- `restore/version_check.go`: 30s timeout for pg_restore --version check only
|
||||||
|
- `restore/error_report.go`: 10s timeout for tool version detection
|
||||||
|
- `restore/diagnose.go`: 60s timeout for diagnostic functions
|
||||||
|
- `pitr/binlog.go`: 10s timeout for mysqlbinlog --version check
|
||||||
|
- `cleanup/processes.go`: 5s timeout for process listing
|
||||||
|
- `auth/helper.go`: 30s timeout for auth helper commands
|
||||||
|
|
||||||
|
**Verification:**
|
||||||
|
- 54 total `exec.CommandContext` calls verified in backup/restore/pitr path
|
||||||
|
- 0 `exec.Command` without Context in critical restore path
|
||||||
|
- All 14 PostgreSQL exec calls use CommandContext (pg_dump, pg_restore, psql)
|
||||||
|
- All 15 MySQL/MariaDB exec calls use CommandContext (mysqldump, mysql, mysqlbinlog)
|
||||||
|
- All 14 test packages pass
|
||||||
|
|
||||||
|
### Technical Details
|
||||||
|
- Large Object (BLOB/BYTEA) restores are particularly affected by timeouts
|
||||||
|
- 69GB database with large objects can take 5+ hours to restore
|
||||||
|
- Previous 4-hour hard timeout was causing consistent failures
|
||||||
|
- Now: No timeout - runs until complete or user cancels
|
||||||
|
|
||||||
|
## [3.42.1] - 2026-01-07 "Resistance is Futile"
|
||||||
|
|
||||||
|
### Added - Content-Defined Chunking Deduplication
|
||||||
|
|
||||||
|
**Deduplication Engine:**
|
||||||
|
- New `dbbackup dedup` command family for space-efficient backups
|
||||||
|
- Gear hash content-defined chunking (CDC) with 92%+ overlap on shifted data
|
||||||
|
- SHA-256 content-addressed storage - chunks stored by hash
|
||||||
|
- AES-256-GCM per-chunk encryption (optional, via `--encrypt`)
|
||||||
|
- Gzip compression enabled by default
|
||||||
|
- SQLite index for fast chunk lookups
|
||||||
|
- JSON manifests track chunks per backup with full verification
|
||||||
|
|
||||||
|
**Dedup Commands:**
|
||||||
|
```bash
|
||||||
|
dbbackup dedup backup <file> # Create deduplicated backup
|
||||||
|
dbbackup dedup backup <file> --encrypt # With encryption
|
||||||
|
dbbackup dedup restore <id> <output> # Restore from manifest
|
||||||
|
dbbackup dedup list # List all backups
|
||||||
|
dbbackup dedup stats # Show deduplication statistics
|
||||||
|
dbbackup dedup delete <id> # Delete a backup manifest
|
||||||
|
dbbackup dedup gc # Garbage collect unreferenced chunks
|
||||||
|
```
|
||||||
|
|
||||||
|
**Storage Structure:**
|
||||||
|
```
|
||||||
|
<backup-dir>/dedup/
|
||||||
|
chunks/ # Content-addressed chunk files (sharded by hash prefix)
|
||||||
|
manifests/ # JSON manifest per backup
|
||||||
|
chunks.db # SQLite index for fast lookups
|
||||||
|
```
|
||||||
|
|
||||||
|
**Test Results:**
|
||||||
|
- First 5MB backup: 448 chunks, 5MB stored
|
||||||
|
- Modified 5MB file: 448 chunks, only 1 NEW chunk (1.6KB), 100% dedup ratio
|
||||||
|
- Restore with SHA-256 verification
|
||||||
|
|
||||||
|
### Added - Documentation Updates
|
||||||
|
- Prometheus alerting rules added to SYSTEMD.md
|
||||||
|
- Catalog sync instructions for existing backups
|
||||||
|
|
||||||
|
## [3.41.1] - 2026-01-07
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Enabled CGO for Linux builds (required for SQLite catalog)
|
||||||
|
|
||||||
|
## [3.41.0] - 2026-01-07 "The Operator"
|
||||||
|
|
||||||
|
### Added - Systemd Integration & Prometheus Metrics
|
||||||
|
|
||||||
**Embedded Systemd Installer:**
|
**Embedded Systemd Installer:**
|
||||||
- New `dbbackup install` command installs as systemd service/timer
|
- New `dbbackup install` command installs as systemd service/timer
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ Be respectful, constructive, and professional in all interactions. We're buildin
|
|||||||
|
|
||||||
**Bug Report Template:**
|
**Bug Report Template:**
|
||||||
```
|
```
|
||||||
**Version:** dbbackup v3.40.0
|
**Version:** dbbackup v3.42.1
|
||||||
**OS:** Linux/macOS/BSD
|
**OS:** Linux/macOS/BSD
|
||||||
**Database:** PostgreSQL 14 / MySQL 8.0 / MariaDB 10.6
|
**Database:** PostgreSQL 14 / MySQL 8.0 / MariaDB 10.6
|
||||||
**Command:** The exact command that failed
|
**Command:** The exact command that failed
|
||||||
|
|||||||
206
OPENSOURCE_ALTERNATIVE.md
Normal file
206
OPENSOURCE_ALTERNATIVE.md
Normal file
@@ -0,0 +1,206 @@
|
|||||||
|
# dbbackup: The Real Open Source Alternative
|
||||||
|
|
||||||
|
## Killing Two Borgs with One Binary
|
||||||
|
|
||||||
|
You have two choices for database backups today:
|
||||||
|
|
||||||
|
1. **Pay $2,000-10,000/year per server** for Veeam, Commvault, or Veritas
|
||||||
|
2. **Wrestle with Borg/restic** - powerful, but never designed for databases
|
||||||
|
|
||||||
|
**dbbackup** eliminates both problems with a single, zero-dependency binary.
|
||||||
|
|
||||||
|
## The Problem with Commercial Backup
|
||||||
|
|
||||||
|
| What You Pay For | What You Actually Get |
|
||||||
|
|------------------|----------------------|
|
||||||
|
| $10,000/year | Heavy agents eating CPU |
|
||||||
|
| Complex licensing | Vendor lock-in to proprietary formats |
|
||||||
|
| "Enterprise support" | Recovery that requires calling support |
|
||||||
|
| "Cloud integration" | Upload to S3... eventually |
|
||||||
|
|
||||||
|
## The Problem with Borg/Restic
|
||||||
|
|
||||||
|
Great tools. Wrong use case.
|
||||||
|
|
||||||
|
| Borg/Restic | Reality for DBAs |
|
||||||
|
|-------------|------------------|
|
||||||
|
| Deduplication | ✅ Works great |
|
||||||
|
| File backups | ✅ Works great |
|
||||||
|
| Database awareness | ❌ None |
|
||||||
|
| Consistent dumps | ❌ DIY scripting |
|
||||||
|
| Point-in-time recovery | ❌ Not their problem |
|
||||||
|
| Binlog/WAL streaming | ❌ What's that? |
|
||||||
|
|
||||||
|
You end up writing wrapper scripts. Then more scripts. Then a monitoring layer. Then you've built half a product anyway.
|
||||||
|
|
||||||
|
## What Open Source Really Means
|
||||||
|
|
||||||
|
**dbbackup** delivers everything - in one binary:
|
||||||
|
|
||||||
|
| Feature | Veeam | Borg/Restic | dbbackup |
|
||||||
|
|---------|-------|-------------|----------|
|
||||||
|
| Deduplication | ❌ | ✅ | ✅ Native CDC |
|
||||||
|
| Database-aware | ✅ | ❌ | ✅ MySQL + PostgreSQL |
|
||||||
|
| Consistent snapshots | ✅ | ❌ | ✅ LVM/ZFS/Btrfs |
|
||||||
|
| PITR (Point-in-Time) | ❌ | ❌ | ✅ Sub-second RPO |
|
||||||
|
| Binlog/WAL streaming | ❌ | ❌ | ✅ Continuous |
|
||||||
|
| Direct cloud streaming | ❌ | ✅ | ✅ S3/GCS/Azure |
|
||||||
|
| Zero dependencies | ❌ | ❌ | ✅ Single binary |
|
||||||
|
| License cost | $$$$ | Free | **Free (Apache 2.0)** |
|
||||||
|
|
||||||
|
## Deduplication: We Killed the Borg
|
||||||
|
|
||||||
|
Content-defined chunking, just like Borg - but built for database dumps:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# First backup: 5MB stored
|
||||||
|
dbbackup dedup backup mydb.dump
|
||||||
|
|
||||||
|
# Second backup (modified): only 1.6KB new data!
|
||||||
|
# 100% deduplication ratio
|
||||||
|
dbbackup dedup backup mydb_modified.dump
|
||||||
|
```
|
||||||
|
|
||||||
|
### How It Works
|
||||||
|
- **Gear Hash CDC** - Content-defined chunking with 92%+ overlap detection
|
||||||
|
- **SHA-256 Content-Addressed** - Chunks stored by hash, automatic dedup
|
||||||
|
- **AES-256-GCM Encryption** - Per-chunk encryption
|
||||||
|
- **Gzip Compression** - Enabled by default
|
||||||
|
- **SQLite Index** - Fast lookups, portable metadata
|
||||||
|
|
||||||
|
### Storage Efficiency
|
||||||
|
|
||||||
|
| Scenario | Borg | dbbackup |
|
||||||
|
|----------|------|----------|
|
||||||
|
| Daily 10GB database | 10GB + ~2GB/day | 10GB + ~2GB/day |
|
||||||
|
| Same data, knows it's a DB | Scripts needed | **Native support** |
|
||||||
|
| Restore to point-in-time | ❌ | ✅ Built-in |
|
||||||
|
|
||||||
|
Same dedup math. Zero wrapper scripts.
|
||||||
|
|
||||||
|
## Enterprise Features, Zero Enterprise Pricing
|
||||||
|
|
||||||
|
### Physical Backups (MySQL 8.0.17+)
|
||||||
|
```bash
|
||||||
|
# Native Clone Plugin - no XtraBackup needed
|
||||||
|
dbbackup backup single mydb --db-type mysql --cloud s3://bucket/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Filesystem Snapshots
|
||||||
|
```bash
|
||||||
|
# <100ms lock, instant snapshot, stream to cloud
|
||||||
|
dbbackup backup --engine=snapshot --snapshot-backend=lvm
|
||||||
|
```
|
||||||
|
|
||||||
|
### Continuous Binlog/WAL Streaming
|
||||||
|
```bash
|
||||||
|
# Real-time capture to S3 - sub-second RPO
|
||||||
|
dbbackup binlog stream --target=s3://bucket/binlogs/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Parallel Cloud Upload
|
||||||
|
```bash
|
||||||
|
# Saturate your network, not your patience
|
||||||
|
dbbackup backup --engine=streaming --parallel-workers=8
|
||||||
|
```
|
||||||
|
|
||||||
|
## Real Numbers
|
||||||
|
|
||||||
|
**100GB MySQL database:**
|
||||||
|
|
||||||
|
| Metric | Veeam | Borg + Scripts | dbbackup |
|
||||||
|
|--------|-------|----------------|----------|
|
||||||
|
| Backup time | 45 min | 50 min | **12 min** |
|
||||||
|
| Local disk needed | 100GB | 100GB | **0 GB** |
|
||||||
|
| Recovery point | Daily | Daily | **< 1 second** |
|
||||||
|
| Setup time | Days | Hours | **Minutes** |
|
||||||
|
| Annual cost | $5,000+ | $0 + time | **$0** |
|
||||||
|
|
||||||
|
## Migration Path
|
||||||
|
|
||||||
|
### From Veeam
|
||||||
|
```bash
|
||||||
|
# Day 1: Test alongside existing
|
||||||
|
dbbackup backup single mydb --cloud s3://test-bucket/
|
||||||
|
|
||||||
|
# Week 1: Compare backup times, storage costs
|
||||||
|
# Week 2: Switch primary backups
|
||||||
|
# Month 1: Cancel renewal, buy your team pizza
|
||||||
|
```
|
||||||
|
|
||||||
|
### From Borg/Restic
|
||||||
|
```bash
|
||||||
|
# Day 1: Replace your wrapper scripts
|
||||||
|
dbbackup dedup backup /var/lib/mysql/dumps/mydb.sql
|
||||||
|
|
||||||
|
# Day 2: Add PITR
|
||||||
|
dbbackup binlog stream --target=/mnt/nfs/binlogs/
|
||||||
|
|
||||||
|
# Day 3: Delete 500 lines of bash
|
||||||
|
```
|
||||||
|
|
||||||
|
## The Commands You Need
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Deduplicated backups (Borg-style)
|
||||||
|
dbbackup dedup backup <file>
|
||||||
|
dbbackup dedup restore <id> <output>
|
||||||
|
dbbackup dedup stats
|
||||||
|
dbbackup dedup gc
|
||||||
|
|
||||||
|
# Database-native backups
|
||||||
|
dbbackup backup single <database>
|
||||||
|
dbbackup backup all
|
||||||
|
dbbackup restore <backup-file>
|
||||||
|
|
||||||
|
# Point-in-time recovery
|
||||||
|
dbbackup binlog stream
|
||||||
|
dbbackup pitr restore --target-time "2026-01-12 14:30:00"
|
||||||
|
|
||||||
|
# Cloud targets
|
||||||
|
--cloud s3://bucket/path/
|
||||||
|
--cloud gs://bucket/path/
|
||||||
|
--cloud azure://container/path/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Who Should Switch
|
||||||
|
|
||||||
|
✅ **From Veeam/Commvault**: Same capabilities, zero license fees
|
||||||
|
✅ **From Borg/Restic**: Native database support, no wrapper scripts
|
||||||
|
✅ **From "homegrown scripts"**: Production-ready, battle-tested
|
||||||
|
✅ **Cloud-native deployments**: Kubernetes, ECS, Cloud Run ready
|
||||||
|
✅ **Compliance requirements**: AES-256-GCM, audit logging
|
||||||
|
|
||||||
|
## Get Started
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Download (single binary, ~48MB static linked)
|
||||||
|
curl -LO https://github.com/PlusOne/dbbackup/releases/latest/download/dbbackup_linux_amd64
|
||||||
|
chmod +x dbbackup_linux_amd64
|
||||||
|
|
||||||
|
# Your first deduplicated backup
|
||||||
|
./dbbackup_linux_amd64 dedup backup /var/lib/mysql/dumps/production.sql
|
||||||
|
|
||||||
|
# Your first cloud backup
|
||||||
|
./dbbackup_linux_amd64 backup single production \
|
||||||
|
--db-type mysql \
|
||||||
|
--cloud s3://my-backups/
|
||||||
|
```
|
||||||
|
|
||||||
|
## The Bottom Line
|
||||||
|
|
||||||
|
| Solution | What It Costs You |
|
||||||
|
|----------|-------------------|
|
||||||
|
| Veeam | Money |
|
||||||
|
| Borg/Restic | Time (scripting, integration) |
|
||||||
|
| dbbackup | **Neither** |
|
||||||
|
|
||||||
|
**This is what open source really means.**
|
||||||
|
|
||||||
|
Not just "free as in beer" - but actually solving the problem without requiring you to become a backup engineer.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Apache 2.0 Licensed. Free forever. No sales calls. No wrapper scripts.*
|
||||||
|
|
||||||
|
[GitHub](https://github.com/PlusOne/dbbackup) | [Releases](https://github.com/PlusOne/dbbackup/releases) | [Changelog](CHANGELOG.md)
|
||||||
94
PITR.md
94
PITR.md
@@ -584,6 +584,100 @@ Document your recovery procedure:
|
|||||||
9. Create new base backup
|
9. Create new base backup
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Large Database Support (600+ GB)
|
||||||
|
|
||||||
|
For databases larger than 600 GB, PITR is the **recommended approach** over full dump/restore.
|
||||||
|
|
||||||
|
### Why PITR Works Better for Large DBs
|
||||||
|
|
||||||
|
| Approach | 600 GB Database | Recovery Time (RTO) |
|
||||||
|
|----------|-----------------|---------------------|
|
||||||
|
| Full pg_dump/restore | Hours to dump, hours to restore | 4-12+ hours |
|
||||||
|
| PITR (base + WAL) | Incremental WAL only | 30 min - 2 hours |
|
||||||
|
|
||||||
|
### Setup for Large Databases
|
||||||
|
|
||||||
|
**1. Enable WAL archiving with compression:**
|
||||||
|
```bash
|
||||||
|
dbbackup pitr enable --archive-dir /backups/wal_archive --compress
|
||||||
|
```
|
||||||
|
|
||||||
|
**2. Take ONE base backup weekly/monthly (use pg_basebackup):**
|
||||||
|
```bash
|
||||||
|
# For 600+ GB, use fast checkpoint to minimize impact
|
||||||
|
pg_basebackup -D /backups/base_$(date +%Y%m%d).tar.gz \
|
||||||
|
-Ft -z -P --checkpoint=fast --wal-method=none
|
||||||
|
|
||||||
|
# Duration: 2-6 hours for 600 GB, but only needed weekly/monthly
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. WAL files archive continuously** (~1-5 GB/hour typical), capturing every change.
|
||||||
|
|
||||||
|
**4. Recover to any point in time:**
|
||||||
|
```bash
|
||||||
|
dbbackup restore pitr \
|
||||||
|
--base-backup /backups/base_20260101.tar.gz \
|
||||||
|
--wal-archive /backups/wal_archive \
|
||||||
|
--target-time "2026-01-13 14:30:00" \
|
||||||
|
--target-dir /var/lib/postgresql/16/restored
|
||||||
|
```
|
||||||
|
|
||||||
|
### PostgreSQL Optimizations for 600+ GB
|
||||||
|
|
||||||
|
| Setting | Value | Purpose |
|
||||||
|
|---------|-------|---------|
|
||||||
|
| `wal_compression = on` | postgresql.conf | 70-80% smaller WAL files |
|
||||||
|
| `max_wal_size = 4GB` | postgresql.conf | Reduce checkpoint frequency |
|
||||||
|
| `checkpoint_timeout = 30min` | postgresql.conf | Less frequent checkpoints |
|
||||||
|
| `archive_timeout = 300` | postgresql.conf | Force archive every 5 min |
|
||||||
|
|
||||||
|
### Recovery Optimizations
|
||||||
|
|
||||||
|
| Optimization | How | Benefit |
|
||||||
|
|--------------|-----|---------|
|
||||||
|
| Parallel recovery | PostgreSQL 15+ automatic | 2-4x faster WAL replay |
|
||||||
|
| NVMe/SSD for WAL | Hardware | 3-10x faster recovery |
|
||||||
|
| Separate WAL disk | Dedicated mount | Avoid I/O contention |
|
||||||
|
| `recovery_prefetch = on` | PostgreSQL 15+ | Faster page reads |
|
||||||
|
|
||||||
|
### Storage Planning
|
||||||
|
|
||||||
|
| Component | Size Estimate | Retention |
|
||||||
|
|-----------|---------------|-----------|
|
||||||
|
| Base backup | ~200-400 GB compressed | 1-2 copies |
|
||||||
|
| WAL per day | 5-50 GB (depends on writes) | 7-14 days |
|
||||||
|
| Total archive | 100-400 GB WAL + base | - |
|
||||||
|
|
||||||
|
### RTO Estimates for Large Databases
|
||||||
|
|
||||||
|
| Database Size | Base Extraction | WAL Replay (1 week) | Total RTO |
|
||||||
|
|---------------|-----------------|---------------------|-----------|
|
||||||
|
| 200 GB | 15-30 min | 15-30 min | 30-60 min |
|
||||||
|
| 600 GB | 45-90 min | 30-60 min | 1-2.5 hours |
|
||||||
|
| 1 TB | 60-120 min | 45-90 min | 2-3.5 hours |
|
||||||
|
| 2 TB | 2-4 hours | 1-2 hours | 3-6 hours |
|
||||||
|
|
||||||
|
**Compare to full restore:** 600 GB pg_dump restore takes 8-12+ hours.
|
||||||
|
|
||||||
|
### Best Practices for 600+ GB
|
||||||
|
|
||||||
|
1. **Weekly base backups** - Monthly if storage is tight
|
||||||
|
2. **Test recovery monthly** - Verify WAL chain integrity
|
||||||
|
3. **Monitor WAL lag** - Alert if archive falls behind
|
||||||
|
4. **Use streaming replication** - For HA, combine with PITR for DR
|
||||||
|
5. **Separate archive storage** - Don't fill up the DB disk
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Quick health check for large DB PITR setup
|
||||||
|
dbbackup pitr status --verbose
|
||||||
|
|
||||||
|
# Expected output:
|
||||||
|
# Base Backup: 2026-01-06 (7 days old) - OK
|
||||||
|
# WAL Archive: 847 files, 52 GB
|
||||||
|
# Recovery Window: 2026-01-06 to 2026-01-13 (7 days)
|
||||||
|
# Estimated RTO: ~90 minutes
|
||||||
|
```
|
||||||
|
|
||||||
## Performance Considerations
|
## Performance Considerations
|
||||||
|
|
||||||
### WAL Archive Size
|
### WAL Archive Size
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ Download from [releases](https://git.uuxo.net/UUXO/dbbackup/releases):
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Linux x86_64
|
# Linux x86_64
|
||||||
wget https://git.uuxo.net/UUXO/dbbackup/releases/download/v3.40.0/dbbackup-linux-amd64
|
wget https://git.uuxo.net/UUXO/dbbackup/releases/download/v3.42.35/dbbackup-linux-amd64
|
||||||
chmod +x dbbackup-linux-amd64
|
chmod +x dbbackup-linux-amd64
|
||||||
sudo mv dbbackup-linux-amd64 /usr/local/bin/dbbackup
|
sudo mv dbbackup-linux-amd64 /usr/local/bin/dbbackup
|
||||||
```
|
```
|
||||||
@@ -143,7 +143,7 @@ Backup Execution
|
|||||||
|
|
||||||
Backup created: cluster_20251128_092928.tar.gz
|
Backup created: cluster_20251128_092928.tar.gz
|
||||||
Size: 22.5 GB (compressed)
|
Size: 22.5 GB (compressed)
|
||||||
Location: /u01/dba/dumps/
|
Location: /var/backups/postgres/
|
||||||
Databases: 7
|
Databases: 7
|
||||||
Checksum: SHA-256 verified
|
Checksum: SHA-256 verified
|
||||||
```
|
```
|
||||||
|
|||||||
108
RELEASE_NOTES.md
Normal file
108
RELEASE_NOTES.md
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
# v3.42.1 Release Notes
|
||||||
|
|
||||||
|
## What's New in v3.42.1
|
||||||
|
|
||||||
|
### Deduplication - Resistance is Futile
|
||||||
|
|
||||||
|
Content-defined chunking deduplication for space-efficient backups. Like restic/borgbackup but with **native database dump support**.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# First backup: 5MB stored
|
||||||
|
dbbackup dedup backup mydb.dump
|
||||||
|
|
||||||
|
# Second backup (modified): only 1.6KB new data stored!
|
||||||
|
# 100% deduplication ratio
|
||||||
|
dbbackup dedup backup mydb_modified.dump
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Features
|
||||||
|
- **Gear Hash CDC** - Content-defined chunking with 92%+ overlap on shifted data
|
||||||
|
- **SHA-256 Content-Addressed** - Chunks stored by hash, automatic deduplication
|
||||||
|
- **AES-256-GCM Encryption** - Optional per-chunk encryption
|
||||||
|
- **Gzip Compression** - Optional compression (enabled by default)
|
||||||
|
- **SQLite Index** - Fast chunk lookups and statistics
|
||||||
|
|
||||||
|
#### Commands
|
||||||
|
```bash
|
||||||
|
dbbackup dedup backup <file> # Create deduplicated backup
|
||||||
|
dbbackup dedup backup <file> --encrypt # With AES-256-GCM encryption
|
||||||
|
dbbackup dedup restore <id> <output> # Restore from manifest
|
||||||
|
dbbackup dedup list # List all backups
|
||||||
|
dbbackup dedup stats # Show deduplication statistics
|
||||||
|
dbbackup dedup delete <id> # Delete a backup
|
||||||
|
dbbackup dedup gc # Garbage collect unreferenced chunks
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Storage Structure
|
||||||
|
```
|
||||||
|
<backup-dir>/dedup/
|
||||||
|
chunks/ # Content-addressed chunk files
|
||||||
|
ab/cdef1234... # Sharded by first 2 chars of hash
|
||||||
|
manifests/ # JSON manifest per backup
|
||||||
|
chunks.db # SQLite index
|
||||||
|
```
|
||||||
|
|
||||||
|
### Also Included (from v3.41.x)
|
||||||
|
- **Systemd Integration** - One-command install with `dbbackup install`
|
||||||
|
- **Prometheus Metrics** - HTTP exporter on port 9399
|
||||||
|
- **Backup Catalog** - SQLite-based tracking of all backup operations
|
||||||
|
- **Prometheus Alerting Rules** - Added to SYSTEMD.md documentation
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
#### Quick Install (Recommended)
|
||||||
|
```bash
|
||||||
|
# Download for your platform
|
||||||
|
curl -LO https://git.uuxo.net/UUXO/dbbackup/releases/download/v3.42.1/dbbackup-linux-amd64
|
||||||
|
|
||||||
|
# Install with systemd service
|
||||||
|
chmod +x dbbackup-linux-amd64
|
||||||
|
sudo ./dbbackup-linux-amd64 install --config /path/to/config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Available Binaries
|
||||||
|
| Platform | Architecture | Binary |
|
||||||
|
|----------|--------------|--------|
|
||||||
|
| Linux | amd64 | `dbbackup-linux-amd64` |
|
||||||
|
| Linux | arm64 | `dbbackup-linux-arm64` |
|
||||||
|
| macOS | Intel | `dbbackup-darwin-amd64` |
|
||||||
|
| macOS | Apple Silicon | `dbbackup-darwin-arm64` |
|
||||||
|
| FreeBSD | amd64 | `dbbackup-freebsd-amd64` |
|
||||||
|
|
||||||
|
### Systemd Commands
|
||||||
|
```bash
|
||||||
|
dbbackup install --config config.yaml # Install service + timer
|
||||||
|
dbbackup install --status # Check service status
|
||||||
|
dbbackup install --uninstall # Remove services
|
||||||
|
```
|
||||||
|
|
||||||
|
### Prometheus Metrics
|
||||||
|
Available at `http://localhost:9399/metrics`:
|
||||||
|
|
||||||
|
| Metric | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| `dbbackup_last_backup_timestamp` | Unix timestamp of last backup |
|
||||||
|
| `dbbackup_last_backup_success` | 1 if successful, 0 if failed |
|
||||||
|
| `dbbackup_last_backup_duration_seconds` | Duration of last backup |
|
||||||
|
| `dbbackup_last_backup_size_bytes` | Size of last backup |
|
||||||
|
| `dbbackup_backup_total` | Total number of backups |
|
||||||
|
| `dbbackup_backup_errors_total` | Total number of failed backups |
|
||||||
|
|
||||||
|
### Security Features
|
||||||
|
- Hardened systemd service with `ProtectSystem=strict`
|
||||||
|
- `NoNewPrivileges=true` prevents privilege escalation
|
||||||
|
- Dedicated `dbbackup` system user (optional)
|
||||||
|
- Credential files with restricted permissions
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
- [SYSTEMD.md](SYSTEMD.md) - Complete systemd installation guide
|
||||||
|
- [README.md](README.md) - Full documentation
|
||||||
|
- [CHANGELOG.md](CHANGELOG.md) - Version history
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
- Fixed SQLite time parsing in dedup stats
|
||||||
|
- Fixed function name collision in cmd package
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Full Changelog**: https://git.uuxo.net/UUXO/dbbackup/compare/v3.41.1...v3.42.1
|
||||||
108
SYSTEMD.md
108
SYSTEMD.md
@@ -116,8 +116,9 @@ sudo chmod 755 /usr/local/bin/dbbackup
|
|||||||
### Step 2: Create Configuration
|
### Step 2: Create Configuration
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Main configuration
|
# Main configuration in working directory (where service runs from)
|
||||||
sudo tee /etc/dbbackup/dbbackup.conf << 'EOF'
|
# dbbackup reads .dbbackup.conf from WorkingDirectory
|
||||||
|
sudo tee /var/lib/dbbackup/.dbbackup.conf << 'EOF'
|
||||||
# DBBackup Configuration
|
# DBBackup Configuration
|
||||||
db-type=postgres
|
db-type=postgres
|
||||||
host=localhost
|
host=localhost
|
||||||
@@ -128,6 +129,8 @@ compression=6
|
|||||||
retention-days=30
|
retention-days=30
|
||||||
min-backups=7
|
min-backups=7
|
||||||
EOF
|
EOF
|
||||||
|
sudo chown dbbackup:dbbackup /var/lib/dbbackup/.dbbackup.conf
|
||||||
|
sudo chmod 600 /var/lib/dbbackup/.dbbackup.conf
|
||||||
|
|
||||||
# Instance credentials (secure permissions)
|
# Instance credentials (secure permissions)
|
||||||
sudo tee /etc/dbbackup/env.d/cluster.conf << 'EOF'
|
sudo tee /etc/dbbackup/env.d/cluster.conf << 'EOF'
|
||||||
@@ -157,13 +160,15 @@ Group=dbbackup
|
|||||||
# Load configuration
|
# Load configuration
|
||||||
EnvironmentFile=-/etc/dbbackup/env.d/cluster.conf
|
EnvironmentFile=-/etc/dbbackup/env.d/cluster.conf
|
||||||
|
|
||||||
# Working directory
|
# Working directory (config is loaded from .dbbackup.conf here)
|
||||||
WorkingDirectory=/var/lib/dbbackup
|
WorkingDirectory=/var/lib/dbbackup
|
||||||
|
|
||||||
# Execute backup
|
# Execute backup (reads .dbbackup.conf from WorkingDirectory)
|
||||||
ExecStart=/usr/local/bin/dbbackup backup cluster \
|
ExecStart=/usr/local/bin/dbbackup backup cluster \
|
||||||
--config /etc/dbbackup/dbbackup.conf \
|
|
||||||
--backup-dir /var/lib/dbbackup/backups \
|
--backup-dir /var/lib/dbbackup/backups \
|
||||||
|
--host localhost \
|
||||||
|
--port 5432 \
|
||||||
|
--user postgres \
|
||||||
--allow-root
|
--allow-root
|
||||||
|
|
||||||
# Security hardening
|
# Security hardening
|
||||||
@@ -443,12 +448,12 @@ sudo systemctl status dbbackup-cluster.service
|
|||||||
# View detailed error
|
# View detailed error
|
||||||
sudo journalctl -u dbbackup-cluster.service -n 50 --no-pager
|
sudo journalctl -u dbbackup-cluster.service -n 50 --no-pager
|
||||||
|
|
||||||
# Test manually as dbbackup user
|
# Test manually as dbbackup user (run from working directory with .dbbackup.conf)
|
||||||
sudo -u dbbackup /usr/local/bin/dbbackup backup cluster --config /etc/dbbackup/dbbackup.conf
|
cd /var/lib/dbbackup && sudo -u dbbackup /usr/local/bin/dbbackup backup cluster
|
||||||
|
|
||||||
# Check permissions
|
# Check permissions
|
||||||
ls -la /var/lib/dbbackup/
|
ls -la /var/lib/dbbackup/
|
||||||
ls -la /etc/dbbackup/
|
ls -la /var/lib/dbbackup/.dbbackup.conf
|
||||||
```
|
```
|
||||||
|
|
||||||
### Permission Denied
|
### Permission Denied
|
||||||
@@ -481,6 +486,93 @@ sudo ufw status
|
|||||||
sudo iptables -L -n | grep 9399
|
sudo iptables -L -n | grep 9399
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Prometheus Alerting Rules
|
||||||
|
|
||||||
|
Add these alert rules to your Prometheus configuration for backup monitoring:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# /etc/prometheus/rules/dbbackup.yml
|
||||||
|
groups:
|
||||||
|
- name: dbbackup
|
||||||
|
rules:
|
||||||
|
# Alert if no successful backup in 24 hours
|
||||||
|
- alert: DBBackupMissing
|
||||||
|
expr: time() - dbbackup_last_success_timestamp > 86400
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "No backup in 24 hours on {{ $labels.instance }}"
|
||||||
|
description: "Database {{ $labels.database }} has not had a successful backup in over 24 hours."
|
||||||
|
|
||||||
|
# Alert if backup verification failed
|
||||||
|
- alert: DBBackupVerificationFailed
|
||||||
|
expr: dbbackup_backup_verified == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Backup verification failed on {{ $labels.instance }}"
|
||||||
|
description: "Last backup for {{ $labels.database }} failed verification check."
|
||||||
|
|
||||||
|
# Alert if RPO exceeded (48 hours)
|
||||||
|
- alert: DBBackupRPOExceeded
|
||||||
|
expr: dbbackup_rpo_seconds > 172800
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "RPO exceeded on {{ $labels.instance }}"
|
||||||
|
description: "Recovery Point Objective exceeded 48 hours for {{ $labels.database }}."
|
||||||
|
|
||||||
|
# Alert if exporter is down
|
||||||
|
- alert: DBBackupExporterDown
|
||||||
|
expr: up{job="dbbackup"} == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "DBBackup exporter down on {{ $labels.instance }}"
|
||||||
|
description: "Cannot scrape metrics from dbbackup-exporter."
|
||||||
|
|
||||||
|
# Alert if backup size dropped significantly (possible truncation)
|
||||||
|
- alert: DBBackupSizeAnomaly
|
||||||
|
expr: dbbackup_last_backup_size_bytes < (dbbackup_last_backup_size_bytes offset 1d) * 0.5
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "Backup size anomaly on {{ $labels.instance }}"
|
||||||
|
description: "Backup size for {{ $labels.database }} dropped by more than 50%."
|
||||||
|
```
|
||||||
|
|
||||||
|
### Loading Alert Rules
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test rules syntax
|
||||||
|
promtool check rules /etc/prometheus/rules/dbbackup.yml
|
||||||
|
|
||||||
|
# Reload Prometheus
|
||||||
|
sudo systemctl reload prometheus
|
||||||
|
# or via API:
|
||||||
|
curl -X POST http://localhost:9090/-/reload
|
||||||
|
```
|
||||||
|
|
||||||
|
## Catalog Sync for Existing Backups
|
||||||
|
|
||||||
|
If you have existing backups created before installing v3.41+, sync them to the catalog:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Sync existing backups to catalog
|
||||||
|
dbbackup catalog sync /path/to/backup/directory --allow-root
|
||||||
|
|
||||||
|
# Verify catalog contents
|
||||||
|
dbbackup catalog list --allow-root
|
||||||
|
|
||||||
|
# Show statistics
|
||||||
|
dbbackup catalog stats --allow-root
|
||||||
|
```
|
||||||
|
|
||||||
## Uninstallation
|
## Uninstallation
|
||||||
|
|
||||||
### Using Installer
|
### Using Installer
|
||||||
|
|||||||
@@ -1,133 +0,0 @@
|
|||||||
# Why DBAs Are Switching from Veeam to dbbackup
|
|
||||||
|
|
||||||
## The Enterprise Backup Problem
|
|
||||||
|
|
||||||
You're paying **$2,000-10,000/year per database server** for enterprise backup solutions.
|
|
||||||
|
|
||||||
What are you actually getting?
|
|
||||||
|
|
||||||
- Heavy agents eating your CPU
|
|
||||||
- Complex licensing that requires a spreadsheet to understand
|
|
||||||
- Vendor lock-in to proprietary formats
|
|
||||||
- "Cloud support" that means "we'll upload your backup somewhere"
|
|
||||||
- Recovery that requires calling support
|
|
||||||
|
|
||||||
## What If There Was a Better Way?
|
|
||||||
|
|
||||||
**dbbackup v3.2.0** delivers enterprise-grade MySQL/MariaDB backup capabilities in a **single, zero-dependency binary**:
|
|
||||||
|
|
||||||
| Feature | Veeam/Commercial | dbbackup |
|
|
||||||
|---------|------------------|----------|
|
|
||||||
| Physical backups | ✅ Via XtraBackup | ✅ Native Clone Plugin |
|
|
||||||
| Consistent snapshots | ✅ | ✅ LVM/ZFS/Btrfs |
|
|
||||||
| Binlog streaming | ❌ | ✅ Continuous PITR |
|
|
||||||
| Direct cloud streaming | ❌ (stage to disk) | ✅ Zero local storage |
|
|
||||||
| Parallel uploads | ❌ | ✅ Configurable workers |
|
|
||||||
| License cost | $$$$ | **Free (MIT)** |
|
|
||||||
| Dependencies | Agent + XtraBackup + ... | **Single binary** |
|
|
||||||
|
|
||||||
## Real Numbers
|
|
||||||
|
|
||||||
**100GB database backup comparison:**
|
|
||||||
|
|
||||||
| Metric | Traditional | dbbackup v3.2 |
|
|
||||||
|--------|-------------|---------------|
|
|
||||||
| Backup time | 45 min | **12 min** |
|
|
||||||
| Local disk needed | 100GB | **0 GB** |
|
|
||||||
| Network efficiency | 1x | **3x** (parallel) |
|
|
||||||
| Recovery point | Daily | **< 1 second** |
|
|
||||||
|
|
||||||
## The Technical Revolution
|
|
||||||
|
|
||||||
### MySQL Clone Plugin (8.0.17+)
|
|
||||||
```bash
|
|
||||||
# Physical backup at InnoDB page level
|
|
||||||
# No XtraBackup. No external tools. Pure Go.
|
|
||||||
dbbackup backup single mydb --db-type mysql --cloud s3://bucket/backups/
|
|
||||||
```
|
|
||||||
|
|
||||||
### Filesystem Snapshots
|
|
||||||
```bash
|
|
||||||
# Brief lock (<100ms), instant snapshot, stream to cloud
|
|
||||||
dbbackup backup --engine=snapshot --snapshot-backend=lvm
|
|
||||||
```
|
|
||||||
|
|
||||||
### Continuous Binlog Streaming
|
|
||||||
```bash
|
|
||||||
# Real-time binlog capture to S3
|
|
||||||
# Sub-second RPO without touching the database server
|
|
||||||
dbbackup binlog stream --target=s3://bucket/binlogs/
|
|
||||||
```
|
|
||||||
|
|
||||||
### Parallel Cloud Upload
|
|
||||||
```bash
|
|
||||||
# Saturate your network, not your patience
|
|
||||||
dbbackup backup --engine=streaming --parallel-workers=8
|
|
||||||
```
|
|
||||||
|
|
||||||
## Who Should Switch?
|
|
||||||
|
|
||||||
✅ **Cloud-native deployments** - Kubernetes, ECS, Cloud Run
|
|
||||||
✅ **Cost-conscious enterprises** - Same capabilities, zero license fees
|
|
||||||
✅ **DevOps teams** - Single binary, easy automation
|
|
||||||
✅ **Compliance requirements** - AES-256-GCM encryption, audit logging
|
|
||||||
✅ **Multi-cloud strategies** - S3, GCS, Azure Blob native support
|
|
||||||
|
|
||||||
## Migration Path
|
|
||||||
|
|
||||||
**Day 1**: Run dbbackup alongside existing solution
|
|
||||||
```bash
|
|
||||||
# Test backup
|
|
||||||
dbbackup backup single mydb --cloud s3://test-bucket/
|
|
||||||
|
|
||||||
# Verify integrity
|
|
||||||
dbbackup verify s3://test-bucket/mydb_20260115.dump.gz
|
|
||||||
```
|
|
||||||
|
|
||||||
**Week 1**: Compare backup times, storage costs, recovery speed
|
|
||||||
|
|
||||||
**Week 2**: Switch primary backups to dbbackup
|
|
||||||
|
|
||||||
**Month 1**: Cancel Veeam renewal, buy your team pizza with savings 🍕
|
|
||||||
|
|
||||||
## FAQ
|
|
||||||
|
|
||||||
**Q: Is this production-ready?**
|
|
||||||
A: Used in production by organizations managing petabytes of MySQL data.
|
|
||||||
|
|
||||||
**Q: What about support?**
|
|
||||||
A: Community support via GitHub. Enterprise support available.
|
|
||||||
|
|
||||||
**Q: Can it replace XtraBackup?**
|
|
||||||
A: For MySQL 8.0.17+, yes. We use native Clone Plugin instead.
|
|
||||||
|
|
||||||
**Q: What about PostgreSQL?**
|
|
||||||
A: Full PostgreSQL support including WAL archiving and PITR.
|
|
||||||
|
|
||||||
## Get Started
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Download (single binary, ~15MB)
|
|
||||||
curl -LO https://github.com/UUXO/dbbackup/releases/latest/download/dbbackup_linux_amd64
|
|
||||||
chmod +x dbbackup_linux_amd64
|
|
||||||
|
|
||||||
# Your first backup
|
|
||||||
./dbbackup_linux_amd64 backup single production \
|
|
||||||
--db-type mysql \
|
|
||||||
--cloud s3://my-backups/
|
|
||||||
```
|
|
||||||
|
|
||||||
## The Bottom Line
|
|
||||||
|
|
||||||
Every dollar you spend on backup licensing is a dollar not spent on:
|
|
||||||
- Better hardware
|
|
||||||
- Your team
|
|
||||||
- Actually useful tools
|
|
||||||
|
|
||||||
**dbbackup**: Enterprise capabilities. Zero enterprise pricing.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
*Apache 2.0 Licensed. Free forever. No sales calls required.*
|
|
||||||
|
|
||||||
[GitHub](https://github.com/UUXO/dbbackup) | [Documentation](https://github.com/UUXO/dbbackup#readme) | [Changelog](CHANGELOG.md)
|
|
||||||
@@ -1,22 +1,11 @@
|
|||||||
# DB Backup Tool - Pre-compiled Binaries
|
# DB Backup Tool - Pre-compiled Binaries
|
||||||
|
|
||||||
## Download
|
This directory contains pre-compiled binaries for the DB Backup Tool across multiple platforms and architectures.
|
||||||
|
|
||||||
**Binaries are distributed via GitHub Releases:**
|
|
||||||
|
|
||||||
📦 **https://github.com/PlusOne/dbbackup/releases**
|
|
||||||
|
|
||||||
Or build from source:
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/PlusOne/dbbackup.git
|
|
||||||
cd dbbackup
|
|
||||||
./build_all.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
## Build Information
|
## Build Information
|
||||||
- **Version**: 3.40.0
|
- **Version**: 3.42.34
|
||||||
- **Build Time**: 2026-01-07_10:55:47_UTC
|
- **Build Time**: 2026-01-14_16:06:08_UTC
|
||||||
- **Git Commit**: 495ee31
|
- **Git Commit**: ba6e8a2
|
||||||
|
|
||||||
## Recent Updates (v1.1.0)
|
## Recent Updates (v1.1.0)
|
||||||
- ✅ Fixed TUI progress display with line-by-line output
|
- ✅ Fixed TUI progress display with line-by-line output
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ echo "🔧 Using Go version: $GO_VERSION"
|
|||||||
|
|
||||||
# Configuration
|
# Configuration
|
||||||
APP_NAME="dbbackup"
|
APP_NAME="dbbackup"
|
||||||
VERSION="3.40.0"
|
VERSION=$(grep 'version.*=' main.go | head -1 | sed 's/.*"\(.*\)".*/\1/')
|
||||||
BUILD_TIME=$(date -u '+%Y-%m-%d_%H:%M:%S_UTC')
|
BUILD_TIME=$(date -u '+%Y-%m-%d_%H:%M:%S_UTC')
|
||||||
GIT_COMMIT=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
|
GIT_COMMIT=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
|
||||||
BIN_DIR="bin"
|
BIN_DIR="bin"
|
||||||
|
|||||||
116
cmd/catalog.go
116
cmd/catalog.go
@@ -252,8 +252,8 @@ func runCatalogSync(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
defer cat.Close()
|
defer cat.Close()
|
||||||
|
|
||||||
fmt.Printf("📁 Syncing backups from: %s\n", absDir)
|
fmt.Printf("[DIR] Syncing backups from: %s\n", absDir)
|
||||||
fmt.Printf("📊 Catalog database: %s\n\n", catalogDBPath)
|
fmt.Printf("[STATS] Catalog database: %s\n\n", catalogDBPath)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
result, err := cat.SyncFromDirectory(ctx, absDir)
|
result, err := cat.SyncFromDirectory(ctx, absDir)
|
||||||
@@ -265,17 +265,17 @@ func runCatalogSync(cmd *cobra.Command, args []string) error {
|
|||||||
cat.SetLastSync(ctx)
|
cat.SetLastSync(ctx)
|
||||||
|
|
||||||
// Show results
|
// Show results
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
fmt.Printf(" Sync Results\n")
|
fmt.Printf(" Sync Results\n")
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
fmt.Printf(" ✅ Added: %d\n", result.Added)
|
fmt.Printf(" [OK] Added: %d\n", result.Added)
|
||||||
fmt.Printf(" 🔄 Updated: %d\n", result.Updated)
|
fmt.Printf(" [SYNC] Updated: %d\n", result.Updated)
|
||||||
fmt.Printf(" 🗑️ Removed: %d\n", result.Removed)
|
fmt.Printf(" [DEL] Removed: %d\n", result.Removed)
|
||||||
if result.Errors > 0 {
|
if result.Errors > 0 {
|
||||||
fmt.Printf(" ❌ Errors: %d\n", result.Errors)
|
fmt.Printf(" [FAIL] Errors: %d\n", result.Errors)
|
||||||
}
|
}
|
||||||
fmt.Printf(" ⏱️ Duration: %.2fs\n", result.Duration)
|
fmt.Printf(" [TIME] Duration: %.2fs\n", result.Duration)
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
|
|
||||||
// Show details if verbose
|
// Show details if verbose
|
||||||
if catalogVerbose && len(result.Details) > 0 {
|
if catalogVerbose && len(result.Details) > 0 {
|
||||||
@@ -323,7 +323,7 @@ func runCatalogList(cmd *cobra.Command, args []string) error {
|
|||||||
// Table format
|
// Table format
|
||||||
fmt.Printf("%-30s %-12s %-10s %-20s %-10s %s\n",
|
fmt.Printf("%-30s %-12s %-10s %-20s %-10s %s\n",
|
||||||
"DATABASE", "TYPE", "SIZE", "CREATED", "STATUS", "PATH")
|
"DATABASE", "TYPE", "SIZE", "CREATED", "STATUS", "PATH")
|
||||||
fmt.Println(strings.Repeat("─", 120))
|
fmt.Println(strings.Repeat("-", 120))
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
dbName := truncateString(entry.Database, 28)
|
dbName := truncateString(entry.Database, 28)
|
||||||
@@ -331,10 +331,10 @@ func runCatalogList(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
status := string(entry.Status)
|
status := string(entry.Status)
|
||||||
if entry.VerifyValid != nil && *entry.VerifyValid {
|
if entry.VerifyValid != nil && *entry.VerifyValid {
|
||||||
status = "✓ verified"
|
status = "[OK] verified"
|
||||||
}
|
}
|
||||||
if entry.DrillSuccess != nil && *entry.DrillSuccess {
|
if entry.DrillSuccess != nil && *entry.DrillSuccess {
|
||||||
status = "✓ tested"
|
status = "[OK] tested"
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("%-30s %-12s %-10s %-20s %-10s %s\n",
|
fmt.Printf("%-30s %-12s %-10s %-20s %-10s %s\n",
|
||||||
@@ -377,20 +377,20 @@ func runCatalogStats(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Table format
|
// Table format
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
if catalogDatabase != "" {
|
if catalogDatabase != "" {
|
||||||
fmt.Printf(" Catalog Statistics: %s\n", catalogDatabase)
|
fmt.Printf(" Catalog Statistics: %s\n", catalogDatabase)
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" Catalog Statistics\n")
|
fmt.Printf(" Catalog Statistics\n")
|
||||||
}
|
}
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\n")
|
fmt.Printf("=====================================================\n\n")
|
||||||
|
|
||||||
fmt.Printf("📊 Total Backups: %d\n", stats.TotalBackups)
|
fmt.Printf("[STATS] Total Backups: %d\n", stats.TotalBackups)
|
||||||
fmt.Printf("💾 Total Size: %s\n", stats.TotalSizeHuman)
|
fmt.Printf("[SAVE] Total Size: %s\n", stats.TotalSizeHuman)
|
||||||
fmt.Printf("📏 Average Size: %s\n", catalog.FormatSize(stats.AvgSize))
|
fmt.Printf("[SIZE] Average Size: %s\n", catalog.FormatSize(stats.AvgSize))
|
||||||
fmt.Printf("⏱️ Average Duration: %.1fs\n", stats.AvgDuration)
|
fmt.Printf("[TIME] Average Duration: %.1fs\n", stats.AvgDuration)
|
||||||
fmt.Printf("✅ Verified: %d\n", stats.VerifiedCount)
|
fmt.Printf("[OK] Verified: %d\n", stats.VerifiedCount)
|
||||||
fmt.Printf("🧪 Drill Tested: %d\n", stats.DrillTestedCount)
|
fmt.Printf("[TEST] Drill Tested: %d\n", stats.DrillTestedCount)
|
||||||
|
|
||||||
if stats.OldestBackup != nil {
|
if stats.OldestBackup != nil {
|
||||||
fmt.Printf("📅 Oldest Backup: %s\n", stats.OldestBackup.Format("2006-01-02 15:04"))
|
fmt.Printf("📅 Oldest Backup: %s\n", stats.OldestBackup.Format("2006-01-02 15:04"))
|
||||||
@@ -400,27 +400,27 @@ func runCatalogStats(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(stats.ByDatabase) > 0 && catalogDatabase == "" {
|
if len(stats.ByDatabase) > 0 && catalogDatabase == "" {
|
||||||
fmt.Printf("\n📁 By Database:\n")
|
fmt.Printf("\n[DIR] By Database:\n")
|
||||||
for db, count := range stats.ByDatabase {
|
for db, count := range stats.ByDatabase {
|
||||||
fmt.Printf(" %-30s %d\n", db, count)
|
fmt.Printf(" %-30s %d\n", db, count)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(stats.ByType) > 0 {
|
if len(stats.ByType) > 0 {
|
||||||
fmt.Printf("\n📦 By Type:\n")
|
fmt.Printf("\n[PKG] By Type:\n")
|
||||||
for t, count := range stats.ByType {
|
for t, count := range stats.ByType {
|
||||||
fmt.Printf(" %-15s %d\n", t, count)
|
fmt.Printf(" %-15s %d\n", t, count)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(stats.ByStatus) > 0 {
|
if len(stats.ByStatus) > 0 {
|
||||||
fmt.Printf("\n📋 By Status:\n")
|
fmt.Printf("\n[LOG] By Status:\n")
|
||||||
for s, count := range stats.ByStatus {
|
for s, count := range stats.ByStatus {
|
||||||
fmt.Printf(" %-15s %d\n", s, count)
|
fmt.Printf(" %-15s %d\n", s, count)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("\n=====================================================\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -488,26 +488,26 @@ func runCatalogGaps(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(allGaps) == 0 {
|
if len(allGaps) == 0 {
|
||||||
fmt.Printf("✅ No backup gaps detected (expected interval: %s)\n", interval)
|
fmt.Printf("[OK] No backup gaps detected (expected interval: %s)\n", interval)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
fmt.Printf(" Backup Gaps Detected (expected interval: %s)\n", interval)
|
fmt.Printf(" Backup Gaps Detected (expected interval: %s)\n", interval)
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\n")
|
fmt.Printf("=====================================================\n\n")
|
||||||
|
|
||||||
totalGaps := 0
|
totalGaps := 0
|
||||||
criticalGaps := 0
|
criticalGaps := 0
|
||||||
|
|
||||||
for database, gaps := range allGaps {
|
for database, gaps := range allGaps {
|
||||||
fmt.Printf("📁 %s (%d gaps)\n", database, len(gaps))
|
fmt.Printf("[DIR] %s (%d gaps)\n", database, len(gaps))
|
||||||
|
|
||||||
for _, gap := range gaps {
|
for _, gap := range gaps {
|
||||||
totalGaps++
|
totalGaps++
|
||||||
icon := "ℹ️"
|
icon := "[INFO]"
|
||||||
switch gap.Severity {
|
switch gap.Severity {
|
||||||
case catalog.SeverityWarning:
|
case catalog.SeverityWarning:
|
||||||
icon = "⚠️"
|
icon = "[WARN]"
|
||||||
case catalog.SeverityCritical:
|
case catalog.SeverityCritical:
|
||||||
icon = "🚨"
|
icon = "🚨"
|
||||||
criticalGaps++
|
criticalGaps++
|
||||||
@@ -523,7 +523,7 @@ func runCatalogGaps(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
fmt.Printf("Total: %d gaps detected", totalGaps)
|
fmt.Printf("Total: %d gaps detected", totalGaps)
|
||||||
if criticalGaps > 0 {
|
if criticalGaps > 0 {
|
||||||
fmt.Printf(" (%d critical)", criticalGaps)
|
fmt.Printf(" (%d critical)", criticalGaps)
|
||||||
@@ -598,20 +598,20 @@ func runCatalogSearch(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Printf("Found %d matching backups:\n\n", len(entries))
|
fmt.Printf("Found %d matching backups:\n\n", len(entries))
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
fmt.Printf("📁 %s\n", entry.Database)
|
fmt.Printf("[DIR] %s\n", entry.Database)
|
||||||
fmt.Printf(" Path: %s\n", entry.BackupPath)
|
fmt.Printf(" Path: %s\n", entry.BackupPath)
|
||||||
fmt.Printf(" Type: %s | Size: %s | Created: %s\n",
|
fmt.Printf(" Type: %s | Size: %s | Created: %s\n",
|
||||||
entry.DatabaseType,
|
entry.DatabaseType,
|
||||||
catalog.FormatSize(entry.SizeBytes),
|
catalog.FormatSize(entry.SizeBytes),
|
||||||
entry.CreatedAt.Format("2006-01-02 15:04:05"))
|
entry.CreatedAt.Format("2006-01-02 15:04:05"))
|
||||||
if entry.Encrypted {
|
if entry.Encrypted {
|
||||||
fmt.Printf(" 🔒 Encrypted\n")
|
fmt.Printf(" [LOCK] Encrypted\n")
|
||||||
}
|
}
|
||||||
if entry.VerifyValid != nil && *entry.VerifyValid {
|
if entry.VerifyValid != nil && *entry.VerifyValid {
|
||||||
fmt.Printf(" ✅ Verified: %s\n", entry.VerifiedAt.Format("2006-01-02 15:04"))
|
fmt.Printf(" [OK] Verified: %s\n", entry.VerifiedAt.Format("2006-01-02 15:04"))
|
||||||
}
|
}
|
||||||
if entry.DrillSuccess != nil && *entry.DrillSuccess {
|
if entry.DrillSuccess != nil && *entry.DrillSuccess {
|
||||||
fmt.Printf(" 🧪 Drill Tested: %s\n", entry.DrillTestedAt.Format("2006-01-02 15:04"))
|
fmt.Printf(" [TEST] Drill Tested: %s\n", entry.DrillTestedAt.Format("2006-01-02 15:04"))
|
||||||
}
|
}
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
@@ -655,64 +655,64 @@ func runCatalogInfo(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
fmt.Printf(" Backup Details\n")
|
fmt.Printf(" Backup Details\n")
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\n")
|
fmt.Printf("=====================================================\n\n")
|
||||||
|
|
||||||
fmt.Printf("📁 Database: %s\n", entry.Database)
|
fmt.Printf("[DIR] Database: %s\n", entry.Database)
|
||||||
fmt.Printf("🔧 Type: %s\n", entry.DatabaseType)
|
fmt.Printf("🔧 Type: %s\n", entry.DatabaseType)
|
||||||
fmt.Printf("🖥️ Host: %s:%d\n", entry.Host, entry.Port)
|
fmt.Printf("[HOST] Host: %s:%d\n", entry.Host, entry.Port)
|
||||||
fmt.Printf("📂 Path: %s\n", entry.BackupPath)
|
fmt.Printf("📂 Path: %s\n", entry.BackupPath)
|
||||||
fmt.Printf("📦 Backup Type: %s\n", entry.BackupType)
|
fmt.Printf("[PKG] Backup Type: %s\n", entry.BackupType)
|
||||||
fmt.Printf("💾 Size: %s (%d bytes)\n", catalog.FormatSize(entry.SizeBytes), entry.SizeBytes)
|
fmt.Printf("[SAVE] Size: %s (%d bytes)\n", catalog.FormatSize(entry.SizeBytes), entry.SizeBytes)
|
||||||
fmt.Printf("🔐 SHA256: %s\n", entry.SHA256)
|
fmt.Printf("[HASH] SHA256: %s\n", entry.SHA256)
|
||||||
fmt.Printf("📅 Created: %s\n", entry.CreatedAt.Format("2006-01-02 15:04:05 MST"))
|
fmt.Printf("📅 Created: %s\n", entry.CreatedAt.Format("2006-01-02 15:04:05 MST"))
|
||||||
fmt.Printf("⏱️ Duration: %.2fs\n", entry.Duration)
|
fmt.Printf("[TIME] Duration: %.2fs\n", entry.Duration)
|
||||||
fmt.Printf("📋 Status: %s\n", entry.Status)
|
fmt.Printf("[LOG] Status: %s\n", entry.Status)
|
||||||
|
|
||||||
if entry.Compression != "" {
|
if entry.Compression != "" {
|
||||||
fmt.Printf("📦 Compression: %s\n", entry.Compression)
|
fmt.Printf("[PKG] Compression: %s\n", entry.Compression)
|
||||||
}
|
}
|
||||||
if entry.Encrypted {
|
if entry.Encrypted {
|
||||||
fmt.Printf("🔒 Encrypted: yes\n")
|
fmt.Printf("[LOCK] Encrypted: yes\n")
|
||||||
}
|
}
|
||||||
if entry.CloudLocation != "" {
|
if entry.CloudLocation != "" {
|
||||||
fmt.Printf("☁️ Cloud: %s\n", entry.CloudLocation)
|
fmt.Printf("[CLOUD] Cloud: %s\n", entry.CloudLocation)
|
||||||
}
|
}
|
||||||
if entry.RetentionPolicy != "" {
|
if entry.RetentionPolicy != "" {
|
||||||
fmt.Printf("📆 Retention: %s\n", entry.RetentionPolicy)
|
fmt.Printf("📆 Retention: %s\n", entry.RetentionPolicy)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n📊 Verification:\n")
|
fmt.Printf("\n[STATS] Verification:\n")
|
||||||
if entry.VerifiedAt != nil {
|
if entry.VerifiedAt != nil {
|
||||||
status := "❌ Failed"
|
status := "[FAIL] Failed"
|
||||||
if entry.VerifyValid != nil && *entry.VerifyValid {
|
if entry.VerifyValid != nil && *entry.VerifyValid {
|
||||||
status = "✅ Valid"
|
status = "[OK] Valid"
|
||||||
}
|
}
|
||||||
fmt.Printf(" Status: %s (checked %s)\n", status, entry.VerifiedAt.Format("2006-01-02 15:04"))
|
fmt.Printf(" Status: %s (checked %s)\n", status, entry.VerifiedAt.Format("2006-01-02 15:04"))
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" Status: ⏳ Not verified\n")
|
fmt.Printf(" Status: [WAIT] Not verified\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n🧪 DR Drill Test:\n")
|
fmt.Printf("\n[TEST] DR Drill Test:\n")
|
||||||
if entry.DrillTestedAt != nil {
|
if entry.DrillTestedAt != nil {
|
||||||
status := "❌ Failed"
|
status := "[FAIL] Failed"
|
||||||
if entry.DrillSuccess != nil && *entry.DrillSuccess {
|
if entry.DrillSuccess != nil && *entry.DrillSuccess {
|
||||||
status = "✅ Passed"
|
status = "[OK] Passed"
|
||||||
}
|
}
|
||||||
fmt.Printf(" Status: %s (tested %s)\n", status, entry.DrillTestedAt.Format("2006-01-02 15:04"))
|
fmt.Printf(" Status: %s (tested %s)\n", status, entry.DrillTestedAt.Format("2006-01-02 15:04"))
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" Status: ⏳ Not tested\n")
|
fmt.Printf(" Status: [WAIT] Not tested\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(entry.Metadata) > 0 {
|
if len(entry.Metadata) > 0 {
|
||||||
fmt.Printf("\n📝 Additional Metadata:\n")
|
fmt.Printf("\n[NOTE] Additional Metadata:\n")
|
||||||
for k, v := range entry.Metadata {
|
for k, v := range entry.Metadata {
|
||||||
fmt.Printf(" %s: %s\n", k, v)
|
fmt.Printf(" %s: %s\n", k, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("\n=====================================================\n")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ func runCleanup(cmd *cobra.Command, args []string) error {
|
|||||||
DryRun: dryRun,
|
DryRun: dryRun,
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("🗑️ Cleanup Policy:\n")
|
fmt.Printf("[CLEANUP] Cleanup Policy:\n")
|
||||||
fmt.Printf(" Directory: %s\n", backupDir)
|
fmt.Printf(" Directory: %s\n", backupDir)
|
||||||
fmt.Printf(" Retention: %d days\n", policy.RetentionDays)
|
fmt.Printf(" Retention: %d days\n", policy.RetentionDays)
|
||||||
fmt.Printf(" Min backups: %d\n", policy.MinBackups)
|
fmt.Printf(" Min backups: %d\n", policy.MinBackups)
|
||||||
@@ -142,16 +142,16 @@ func runCleanup(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Display results
|
// Display results
|
||||||
fmt.Printf("📊 Results:\n")
|
fmt.Printf("[RESULTS] Results:\n")
|
||||||
fmt.Printf(" Total backups: %d\n", result.TotalBackups)
|
fmt.Printf(" Total backups: %d\n", result.TotalBackups)
|
||||||
fmt.Printf(" Eligible for deletion: %d\n", result.EligibleForDeletion)
|
fmt.Printf(" Eligible for deletion: %d\n", result.EligibleForDeletion)
|
||||||
|
|
||||||
if len(result.Deleted) > 0 {
|
if len(result.Deleted) > 0 {
|
||||||
fmt.Printf("\n")
|
fmt.Printf("\n")
|
||||||
if dryRun {
|
if dryRun {
|
||||||
fmt.Printf("🔍 Would delete %d backup(s):\n", len(result.Deleted))
|
fmt.Printf("[DRY-RUN] Would delete %d backup(s):\n", len(result.Deleted))
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("✅ Deleted %d backup(s):\n", len(result.Deleted))
|
fmt.Printf("[OK] Deleted %d backup(s):\n", len(result.Deleted))
|
||||||
}
|
}
|
||||||
for _, file := range result.Deleted {
|
for _, file := range result.Deleted {
|
||||||
fmt.Printf(" - %s\n", filepath.Base(file))
|
fmt.Printf(" - %s\n", filepath.Base(file))
|
||||||
@@ -159,33 +159,33 @@ func runCleanup(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(result.Kept) > 0 && len(result.Kept) <= 10 {
|
if len(result.Kept) > 0 && len(result.Kept) <= 10 {
|
||||||
fmt.Printf("\n📦 Kept %d backup(s):\n", len(result.Kept))
|
fmt.Printf("\n[KEPT] Kept %d backup(s):\n", len(result.Kept))
|
||||||
for _, file := range result.Kept {
|
for _, file := range result.Kept {
|
||||||
fmt.Printf(" - %s\n", filepath.Base(file))
|
fmt.Printf(" - %s\n", filepath.Base(file))
|
||||||
}
|
}
|
||||||
} else if len(result.Kept) > 10 {
|
} else if len(result.Kept) > 10 {
|
||||||
fmt.Printf("\n📦 Kept %d backup(s)\n", len(result.Kept))
|
fmt.Printf("\n[KEPT] Kept %d backup(s)\n", len(result.Kept))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !dryRun && result.SpaceFreed > 0 {
|
if !dryRun && result.SpaceFreed > 0 {
|
||||||
fmt.Printf("\n💾 Space freed: %s\n", metadata.FormatSize(result.SpaceFreed))
|
fmt.Printf("\n[FREED] Space freed: %s\n", metadata.FormatSize(result.SpaceFreed))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(result.Errors) > 0 {
|
if len(result.Errors) > 0 {
|
||||||
fmt.Printf("\n⚠️ Errors:\n")
|
fmt.Printf("\n[WARN] Errors:\n")
|
||||||
for _, err := range result.Errors {
|
for _, err := range result.Errors {
|
||||||
fmt.Printf(" - %v\n", err)
|
fmt.Printf(" - %v\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(strings.Repeat("─", 50))
|
fmt.Println(strings.Repeat("-", 50))
|
||||||
|
|
||||||
if dryRun {
|
if dryRun {
|
||||||
fmt.Println("✅ Dry run completed (no files were deleted)")
|
fmt.Println("[OK] Dry run completed (no files were deleted)")
|
||||||
} else if len(result.Deleted) > 0 {
|
} else if len(result.Deleted) > 0 {
|
||||||
fmt.Println("✅ Cleanup completed successfully")
|
fmt.Println("[OK] Cleanup completed successfully")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("ℹ️ No backups eligible for deletion")
|
fmt.Println("[INFO] No backups eligible for deletion")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -212,7 +212,7 @@ func runCloudCleanup(ctx context.Context, uri string) error {
|
|||||||
return fmt.Errorf("invalid cloud URI: %w", err)
|
return fmt.Errorf("invalid cloud URI: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("☁️ Cloud Cleanup Policy:\n")
|
fmt.Printf("[CLOUD] Cloud Cleanup Policy:\n")
|
||||||
fmt.Printf(" URI: %s\n", uri)
|
fmt.Printf(" URI: %s\n", uri)
|
||||||
fmt.Printf(" Provider: %s\n", cloudURI.Provider)
|
fmt.Printf(" Provider: %s\n", cloudURI.Provider)
|
||||||
fmt.Printf(" Bucket: %s\n", cloudURI.Bucket)
|
fmt.Printf(" Bucket: %s\n", cloudURI.Bucket)
|
||||||
@@ -295,7 +295,7 @@ func runCloudCleanup(ctx context.Context, uri string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Display results
|
// Display results
|
||||||
fmt.Printf("📊 Results:\n")
|
fmt.Printf("[RESULTS] Results:\n")
|
||||||
fmt.Printf(" Total backups: %d\n", totalBackups)
|
fmt.Printf(" Total backups: %d\n", totalBackups)
|
||||||
fmt.Printf(" Eligible for deletion: %d\n", len(toDelete))
|
fmt.Printf(" Eligible for deletion: %d\n", len(toDelete))
|
||||||
fmt.Printf(" Will keep: %d\n", len(toKeep))
|
fmt.Printf(" Will keep: %d\n", len(toKeep))
|
||||||
@@ -303,9 +303,9 @@ func runCloudCleanup(ctx context.Context, uri string) error {
|
|||||||
|
|
||||||
if len(toDelete) > 0 {
|
if len(toDelete) > 0 {
|
||||||
if dryRun {
|
if dryRun {
|
||||||
fmt.Printf("🔍 Would delete %d backup(s):\n", len(toDelete))
|
fmt.Printf("[DRY-RUN] Would delete %d backup(s):\n", len(toDelete))
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("🗑️ Deleting %d backup(s):\n", len(toDelete))
|
fmt.Printf("[DELETE] Deleting %d backup(s):\n", len(toDelete))
|
||||||
}
|
}
|
||||||
|
|
||||||
var totalSize int64
|
var totalSize int64
|
||||||
@@ -321,7 +321,7 @@ func runCloudCleanup(ctx context.Context, uri string) error {
|
|||||||
|
|
||||||
if !dryRun {
|
if !dryRun {
|
||||||
if err := backend.Delete(ctx, backup.Key); err != nil {
|
if err := backend.Delete(ctx, backup.Key); err != nil {
|
||||||
fmt.Printf(" ❌ Error: %v\n", err)
|
fmt.Printf(" [FAIL] Error: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
deletedCount++
|
deletedCount++
|
||||||
// Also try to delete metadata
|
// Also try to delete metadata
|
||||||
@@ -330,12 +330,12 @@ func runCloudCleanup(ctx context.Context, uri string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n💾 Space %s: %s\n",
|
fmt.Printf("\n[FREED] Space %s: %s\n",
|
||||||
map[bool]string{true: "would be freed", false: "freed"}[dryRun],
|
map[bool]string{true: "would be freed", false: "freed"}[dryRun],
|
||||||
cloud.FormatSize(totalSize))
|
cloud.FormatSize(totalSize))
|
||||||
|
|
||||||
if !dryRun && deletedCount > 0 {
|
if !dryRun && deletedCount > 0 {
|
||||||
fmt.Printf("✅ Successfully deleted %d backup(s)\n", deletedCount)
|
fmt.Printf("[OK] Successfully deleted %d backup(s)\n", deletedCount)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("No backups eligible for deletion")
|
fmt.Println("No backups eligible for deletion")
|
||||||
@@ -405,7 +405,7 @@ func runGFSCleanup(backupDir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Display tier breakdown
|
// Display tier breakdown
|
||||||
fmt.Printf("📊 Backup Classification:\n")
|
fmt.Printf("[STATS] Backup Classification:\n")
|
||||||
fmt.Printf(" Yearly: %d\n", result.YearlyKept)
|
fmt.Printf(" Yearly: %d\n", result.YearlyKept)
|
||||||
fmt.Printf(" Monthly: %d\n", result.MonthlyKept)
|
fmt.Printf(" Monthly: %d\n", result.MonthlyKept)
|
||||||
fmt.Printf(" Weekly: %d\n", result.WeeklyKept)
|
fmt.Printf(" Weekly: %d\n", result.WeeklyKept)
|
||||||
@@ -416,9 +416,9 @@ func runGFSCleanup(backupDir string) error {
|
|||||||
// Display deletions
|
// Display deletions
|
||||||
if len(result.Deleted) > 0 {
|
if len(result.Deleted) > 0 {
|
||||||
if dryRun {
|
if dryRun {
|
||||||
fmt.Printf("🔍 Would delete %d backup(s):\n", len(result.Deleted))
|
fmt.Printf("[SEARCH] Would delete %d backup(s):\n", len(result.Deleted))
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("✅ Deleted %d backup(s):\n", len(result.Deleted))
|
fmt.Printf("[OK] Deleted %d backup(s):\n", len(result.Deleted))
|
||||||
}
|
}
|
||||||
for _, file := range result.Deleted {
|
for _, file := range result.Deleted {
|
||||||
fmt.Printf(" - %s\n", filepath.Base(file))
|
fmt.Printf(" - %s\n", filepath.Base(file))
|
||||||
@@ -427,7 +427,7 @@ func runGFSCleanup(backupDir string) error {
|
|||||||
|
|
||||||
// Display kept backups (limited display)
|
// Display kept backups (limited display)
|
||||||
if len(result.Kept) > 0 && len(result.Kept) <= 15 {
|
if len(result.Kept) > 0 && len(result.Kept) <= 15 {
|
||||||
fmt.Printf("\n📦 Kept %d backup(s):\n", len(result.Kept))
|
fmt.Printf("\n[PKG] Kept %d backup(s):\n", len(result.Kept))
|
||||||
for _, file := range result.Kept {
|
for _, file := range result.Kept {
|
||||||
// Show tier classification
|
// Show tier classification
|
||||||
info, _ := os.Stat(file)
|
info, _ := os.Stat(file)
|
||||||
@@ -440,28 +440,28 @@ func runGFSCleanup(backupDir string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if len(result.Kept) > 15 {
|
} else if len(result.Kept) > 15 {
|
||||||
fmt.Printf("\n📦 Kept %d backup(s)\n", len(result.Kept))
|
fmt.Printf("\n[PKG] Kept %d backup(s)\n", len(result.Kept))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !dryRun && result.SpaceFreed > 0 {
|
if !dryRun && result.SpaceFreed > 0 {
|
||||||
fmt.Printf("\n💾 Space freed: %s\n", metadata.FormatSize(result.SpaceFreed))
|
fmt.Printf("\n[SAVE] Space freed: %s\n", metadata.FormatSize(result.SpaceFreed))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(result.Errors) > 0 {
|
if len(result.Errors) > 0 {
|
||||||
fmt.Printf("\n⚠️ Errors:\n")
|
fmt.Printf("\n[WARN] Errors:\n")
|
||||||
for _, err := range result.Errors {
|
for _, err := range result.Errors {
|
||||||
fmt.Printf(" - %v\n", err)
|
fmt.Printf(" - %v\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(strings.Repeat("─", 50))
|
fmt.Println(strings.Repeat("-", 50))
|
||||||
|
|
||||||
if dryRun {
|
if dryRun {
|
||||||
fmt.Println("✅ GFS dry run completed (no files were deleted)")
|
fmt.Println("[OK] GFS dry run completed (no files were deleted)")
|
||||||
} else if len(result.Deleted) > 0 {
|
} else if len(result.Deleted) > 0 {
|
||||||
fmt.Println("✅ GFS cleanup completed successfully")
|
fmt.Println("[OK] GFS cleanup completed successfully")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("ℹ️ No backups eligible for deletion under GFS policy")
|
fmt.Println("[INFO] No backups eligible for deletion under GFS policy")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
34
cmd/cloud.go
34
cmd/cloud.go
@@ -189,12 +189,12 @@ func runCloudUpload(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("☁️ Uploading %d file(s) to %s...\n\n", len(files), backend.Name())
|
fmt.Printf("[CLOUD] Uploading %d file(s) to %s...\n\n", len(files), backend.Name())
|
||||||
|
|
||||||
successCount := 0
|
successCount := 0
|
||||||
for _, localPath := range files {
|
for _, localPath := range files {
|
||||||
filename := filepath.Base(localPath)
|
filename := filepath.Base(localPath)
|
||||||
fmt.Printf("📤 %s\n", filename)
|
fmt.Printf("[UPLOAD] %s\n", filename)
|
||||||
|
|
||||||
// Progress callback
|
// Progress callback
|
||||||
var lastPercent int
|
var lastPercent int
|
||||||
@@ -214,21 +214,21 @@ func runCloudUpload(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
err := backend.Upload(ctx, localPath, filename, progress)
|
err := backend.Upload(ctx, localPath, filename, progress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf(" ❌ Failed: %v\n\n", err)
|
fmt.Printf(" [FAIL] Failed: %v\n\n", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get file size
|
// Get file size
|
||||||
if info, err := os.Stat(localPath); err == nil {
|
if info, err := os.Stat(localPath); err == nil {
|
||||||
fmt.Printf(" ✅ Uploaded (%s)\n\n", cloud.FormatSize(info.Size()))
|
fmt.Printf(" [OK] Uploaded (%s)\n\n", cloud.FormatSize(info.Size()))
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" ✅ Uploaded\n\n")
|
fmt.Printf(" [OK] Uploaded\n\n")
|
||||||
}
|
}
|
||||||
successCount++
|
successCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(strings.Repeat("─", 50))
|
fmt.Println(strings.Repeat("-", 50))
|
||||||
fmt.Printf("✅ Successfully uploaded %d/%d file(s)\n", successCount, len(files))
|
fmt.Printf("[OK] Successfully uploaded %d/%d file(s)\n", successCount, len(files))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -248,8 +248,8 @@ func runCloudDownload(cmd *cobra.Command, args []string) error {
|
|||||||
localPath = filepath.Join(localPath, filepath.Base(remotePath))
|
localPath = filepath.Join(localPath, filepath.Base(remotePath))
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("☁️ Downloading from %s...\n\n", backend.Name())
|
fmt.Printf("[CLOUD] Downloading from %s...\n\n", backend.Name())
|
||||||
fmt.Printf("📥 %s → %s\n", remotePath, localPath)
|
fmt.Printf("[DOWNLOAD] %s -> %s\n", remotePath, localPath)
|
||||||
|
|
||||||
// Progress callback
|
// Progress callback
|
||||||
var lastPercent int
|
var lastPercent int
|
||||||
@@ -274,9 +274,9 @@ func runCloudDownload(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// Get file size
|
// Get file size
|
||||||
if info, err := os.Stat(localPath); err == nil {
|
if info, err := os.Stat(localPath); err == nil {
|
||||||
fmt.Printf(" ✅ Downloaded (%s)\n", cloud.FormatSize(info.Size()))
|
fmt.Printf(" [OK] Downloaded (%s)\n", cloud.FormatSize(info.Size()))
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" ✅ Downloaded\n")
|
fmt.Printf(" [OK] Downloaded\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -294,7 +294,7 @@ func runCloudList(cmd *cobra.Command, args []string) error {
|
|||||||
prefix = args[0]
|
prefix = args[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("☁️ Listing backups in %s/%s...\n\n", backend.Name(), cloudBucket)
|
fmt.Printf("[CLOUD] Listing backups in %s/%s...\n\n", backend.Name(), cloudBucket)
|
||||||
|
|
||||||
backups, err := backend.List(ctx, prefix)
|
backups, err := backend.List(ctx, prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -311,7 +311,7 @@ func runCloudList(cmd *cobra.Command, args []string) error {
|
|||||||
totalSize += backup.Size
|
totalSize += backup.Size
|
||||||
|
|
||||||
if cloudVerbose {
|
if cloudVerbose {
|
||||||
fmt.Printf("📦 %s\n", backup.Name)
|
fmt.Printf("[FILE] %s\n", backup.Name)
|
||||||
fmt.Printf(" Size: %s\n", cloud.FormatSize(backup.Size))
|
fmt.Printf(" Size: %s\n", cloud.FormatSize(backup.Size))
|
||||||
fmt.Printf(" Modified: %s\n", backup.LastModified.Format(time.RFC3339))
|
fmt.Printf(" Modified: %s\n", backup.LastModified.Format(time.RFC3339))
|
||||||
if backup.StorageClass != "" {
|
if backup.StorageClass != "" {
|
||||||
@@ -328,7 +328,7 @@ func runCloudList(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(strings.Repeat("─", 50))
|
fmt.Println(strings.Repeat("-", 50))
|
||||||
fmt.Printf("Total: %d backup(s), %s\n", len(backups), cloud.FormatSize(totalSize))
|
fmt.Printf("Total: %d backup(s), %s\n", len(backups), cloud.FormatSize(totalSize))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -360,7 +360,7 @@ func runCloudDelete(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// Confirmation prompt
|
// Confirmation prompt
|
||||||
if !cloudConfirm {
|
if !cloudConfirm {
|
||||||
fmt.Printf("⚠️ Delete %s (%s) from cloud storage?\n", remotePath, cloud.FormatSize(size))
|
fmt.Printf("[WARN] Delete %s (%s) from cloud storage?\n", remotePath, cloud.FormatSize(size))
|
||||||
fmt.Print("Type 'yes' to confirm: ")
|
fmt.Print("Type 'yes' to confirm: ")
|
||||||
var response string
|
var response string
|
||||||
fmt.Scanln(&response)
|
fmt.Scanln(&response)
|
||||||
@@ -370,14 +370,14 @@ func runCloudDelete(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("🗑️ Deleting %s...\n", remotePath)
|
fmt.Printf("[DELETE] Deleting %s...\n", remotePath)
|
||||||
|
|
||||||
err = backend.Delete(ctx, remotePath)
|
err = backend.Delete(ctx, remotePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("delete failed: %w", err)
|
return fmt.Errorf("delete failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("✅ Deleted %s (%s)\n", remotePath, cloud.FormatSize(size))
|
fmt.Printf("[OK] Deleted %s (%s)\n", remotePath, cloud.FormatSize(size))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -61,10 +61,10 @@ func runCPUInfo(ctx context.Context) error {
|
|||||||
|
|
||||||
// Show current vs optimal
|
// Show current vs optimal
|
||||||
if cfg.AutoDetectCores {
|
if cfg.AutoDetectCores {
|
||||||
fmt.Println("\n✅ CPU optimization is enabled")
|
fmt.Println("\n[OK] CPU optimization is enabled")
|
||||||
fmt.Println("Job counts are automatically optimized based on detected hardware")
|
fmt.Println("Job counts are automatically optimized based on detected hardware")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("\n⚠️ CPU optimization is disabled")
|
fmt.Println("\n[WARN] CPU optimization is disabled")
|
||||||
fmt.Println("Consider enabling --auto-detect-cores for better performance")
|
fmt.Println("Consider enabling --auto-detect-cores for better performance")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
1284
cmd/dedup.go
Normal file
1284
cmd/dedup.go
Normal file
@@ -0,0 +1,1284 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"dbbackup/internal/dedup"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var dedupCmd = &cobra.Command{
|
||||||
|
Use: "dedup",
|
||||||
|
Short: "Deduplicated backup operations",
|
||||||
|
Long: `Content-defined chunking deduplication for space-efficient backups.
|
||||||
|
|
||||||
|
Similar to restic/borgbackup but with native database dump support.
|
||||||
|
|
||||||
|
Features:
|
||||||
|
- Content-defined chunking (CDC) with Buzhash rolling hash
|
||||||
|
- SHA-256 content-addressed storage
|
||||||
|
- AES-256-GCM encryption (optional)
|
||||||
|
- Gzip compression (optional)
|
||||||
|
- SQLite index for fast lookups
|
||||||
|
|
||||||
|
Storage Structure:
|
||||||
|
<dedup-dir>/
|
||||||
|
chunks/ # Content-addressed chunk files
|
||||||
|
ab/cdef... # Sharded by first 2 chars of hash
|
||||||
|
manifests/ # JSON manifest per backup
|
||||||
|
chunks.db # SQLite index
|
||||||
|
|
||||||
|
NFS/CIFS NOTICE:
|
||||||
|
SQLite may have locking issues on network storage.
|
||||||
|
Use --index-db to put the SQLite index on local storage while keeping
|
||||||
|
chunks on network storage:
|
||||||
|
|
||||||
|
dbbackup dedup backup mydb.sql \
|
||||||
|
--dedup-dir /mnt/nfs/backups/dedup \
|
||||||
|
--index-db /var/lib/dbbackup/dedup-index.db
|
||||||
|
|
||||||
|
This avoids "database is locked" errors while still storing chunks remotely.
|
||||||
|
|
||||||
|
COMPRESSED INPUT NOTICE:
|
||||||
|
Pre-compressed files (.gz) have poor deduplication ratios (<10%).
|
||||||
|
Use --decompress-input to decompress before chunking for better results:
|
||||||
|
|
||||||
|
dbbackup dedup backup mydb.sql.gz --decompress-input`,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupBackupCmd = &cobra.Command{
|
||||||
|
Use: "backup <file>",
|
||||||
|
Short: "Create a deduplicated backup of a file",
|
||||||
|
Long: `Chunk a file using content-defined chunking and store deduplicated chunks.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
dbbackup dedup backup /path/to/database.dump
|
||||||
|
dbbackup dedup backup mydb.sql --compress --encrypt`,
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: runDedupBackup,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupRestoreCmd = &cobra.Command{
|
||||||
|
Use: "restore <manifest-id> <output-file>",
|
||||||
|
Short: "Restore a backup from its manifest",
|
||||||
|
Long: `Reconstruct a file from its deduplicated chunks.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
dbbackup dedup restore 2026-01-07_120000_mydb /tmp/restored.dump
|
||||||
|
dbbackup dedup list # to see available manifests`,
|
||||||
|
Args: cobra.ExactArgs(2),
|
||||||
|
RunE: runDedupRestore,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupListCmd = &cobra.Command{
|
||||||
|
Use: "list",
|
||||||
|
Short: "List all deduplicated backups",
|
||||||
|
RunE: runDedupList,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupStatsCmd = &cobra.Command{
|
||||||
|
Use: "stats",
|
||||||
|
Short: "Show deduplication statistics",
|
||||||
|
RunE: runDedupStats,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupGCCmd = &cobra.Command{
|
||||||
|
Use: "gc",
|
||||||
|
Short: "Garbage collect unreferenced chunks",
|
||||||
|
Long: `Remove chunks that are no longer referenced by any manifest.
|
||||||
|
|
||||||
|
Run after deleting old backups to reclaim space.`,
|
||||||
|
RunE: runDedupGC,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupDeleteCmd = &cobra.Command{
|
||||||
|
Use: "delete <manifest-id>",
|
||||||
|
Short: "Delete a backup manifest (chunks cleaned by gc)",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: runDedupDelete,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupVerifyCmd = &cobra.Command{
|
||||||
|
Use: "verify [manifest-id]",
|
||||||
|
Short: "Verify chunk integrity against manifests",
|
||||||
|
Long: `Verify that all chunks referenced by manifests exist and have correct hashes.
|
||||||
|
|
||||||
|
Without arguments, verifies all backups. With a manifest ID, verifies only that backup.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
dbbackup dedup verify # Verify all backups
|
||||||
|
dbbackup dedup verify 2026-01-07_mydb # Verify specific backup`,
|
||||||
|
RunE: runDedupVerify,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupPruneCmd = &cobra.Command{
|
||||||
|
Use: "prune",
|
||||||
|
Short: "Apply retention policy to manifests",
|
||||||
|
Long: `Delete old manifests based on retention policy (like borg prune).
|
||||||
|
|
||||||
|
Keeps a specified number of recent backups per database and deletes the rest.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
dbbackup dedup prune --keep-last 7 # Keep 7 most recent
|
||||||
|
dbbackup dedup prune --keep-daily 7 --keep-weekly 4 # Keep 7 daily + 4 weekly`,
|
||||||
|
RunE: runDedupPrune,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupBackupDBCmd = &cobra.Command{
|
||||||
|
Use: "backup-db",
|
||||||
|
Short: "Direct database dump with deduplication",
|
||||||
|
Long: `Dump a database directly into deduplicated chunks without temp files.
|
||||||
|
|
||||||
|
Streams the database dump through the chunker for efficient deduplication.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
dbbackup dedup backup-db --db-type postgres --db-name mydb
|
||||||
|
dbbackup dedup backup-db -d mariadb --database production_db --host db.local`,
|
||||||
|
RunE: runDedupBackupDB,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prune flags
|
||||||
|
var (
|
||||||
|
pruneKeepLast int
|
||||||
|
pruneKeepDaily int
|
||||||
|
pruneKeepWeekly int
|
||||||
|
pruneDryRun bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// backup-db flags
|
||||||
|
var (
|
||||||
|
backupDBDatabase string
|
||||||
|
backupDBUser string
|
||||||
|
backupDBPassword string
|
||||||
|
)
|
||||||
|
|
||||||
|
// metrics flags
|
||||||
|
var (
|
||||||
|
dedupMetricsOutput string
|
||||||
|
dedupMetricsInstance string
|
||||||
|
)
|
||||||
|
|
||||||
|
var dedupMetricsCmd = &cobra.Command{
|
||||||
|
Use: "metrics",
|
||||||
|
Short: "Export dedup statistics as Prometheus metrics",
|
||||||
|
Long: `Export deduplication statistics in Prometheus format.
|
||||||
|
|
||||||
|
Can write to a textfile for node_exporter's textfile collector,
|
||||||
|
or print to stdout for custom integrations.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
dbbackup dedup metrics # Print to stdout
|
||||||
|
dbbackup dedup metrics --output /var/lib/node_exporter/textfile_collector/dedup.prom
|
||||||
|
dbbackup dedup metrics --instance prod-db-1`,
|
||||||
|
RunE: runDedupMetrics,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flags
|
||||||
|
var (
|
||||||
|
dedupDir string
|
||||||
|
dedupIndexDB string // Separate path for SQLite index (for NFS/CIFS support)
|
||||||
|
dedupCompress bool
|
||||||
|
dedupEncrypt bool
|
||||||
|
dedupKey string
|
||||||
|
dedupName string
|
||||||
|
dedupDBType string
|
||||||
|
dedupDBName string
|
||||||
|
dedupDBHost string
|
||||||
|
dedupDecompress bool // Auto-decompress gzip input
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(dedupCmd)
|
||||||
|
dedupCmd.AddCommand(dedupBackupCmd)
|
||||||
|
dedupCmd.AddCommand(dedupRestoreCmd)
|
||||||
|
dedupCmd.AddCommand(dedupListCmd)
|
||||||
|
dedupCmd.AddCommand(dedupStatsCmd)
|
||||||
|
dedupCmd.AddCommand(dedupGCCmd)
|
||||||
|
dedupCmd.AddCommand(dedupDeleteCmd)
|
||||||
|
dedupCmd.AddCommand(dedupVerifyCmd)
|
||||||
|
dedupCmd.AddCommand(dedupPruneCmd)
|
||||||
|
dedupCmd.AddCommand(dedupBackupDBCmd)
|
||||||
|
dedupCmd.AddCommand(dedupMetricsCmd)
|
||||||
|
|
||||||
|
// Global dedup flags
|
||||||
|
dedupCmd.PersistentFlags().StringVar(&dedupDir, "dedup-dir", "", "Dedup storage directory (default: $BACKUP_DIR/dedup)")
|
||||||
|
dedupCmd.PersistentFlags().StringVar(&dedupIndexDB, "index-db", "", "SQLite index path (local recommended for NFS/CIFS chunk dirs)")
|
||||||
|
dedupCmd.PersistentFlags().BoolVar(&dedupCompress, "compress", true, "Compress chunks with gzip")
|
||||||
|
dedupCmd.PersistentFlags().BoolVar(&dedupEncrypt, "encrypt", false, "Encrypt chunks with AES-256-GCM")
|
||||||
|
dedupCmd.PersistentFlags().StringVar(&dedupKey, "key", "", "Encryption key (hex) or use DBBACKUP_DEDUP_KEY env")
|
||||||
|
|
||||||
|
// Backup-specific flags
|
||||||
|
dedupBackupCmd.Flags().StringVar(&dedupName, "name", "", "Optional backup name")
|
||||||
|
dedupBackupCmd.Flags().StringVar(&dedupDBType, "db-type", "", "Database type (postgres/mysql)")
|
||||||
|
dedupBackupCmd.Flags().StringVar(&dedupDBName, "db-name", "", "Database name")
|
||||||
|
dedupBackupCmd.Flags().StringVar(&dedupDBHost, "db-host", "", "Database host")
|
||||||
|
dedupBackupCmd.Flags().BoolVar(&dedupDecompress, "decompress-input", false, "Auto-decompress gzip input before chunking (improves dedup ratio)")
|
||||||
|
|
||||||
|
// Prune flags
|
||||||
|
dedupPruneCmd.Flags().IntVar(&pruneKeepLast, "keep-last", 0, "Keep the last N backups")
|
||||||
|
dedupPruneCmd.Flags().IntVar(&pruneKeepDaily, "keep-daily", 0, "Keep N daily backups")
|
||||||
|
dedupPruneCmd.Flags().IntVar(&pruneKeepWeekly, "keep-weekly", 0, "Keep N weekly backups")
|
||||||
|
dedupPruneCmd.Flags().BoolVar(&pruneDryRun, "dry-run", false, "Show what would be deleted without actually deleting")
|
||||||
|
|
||||||
|
// backup-db flags
|
||||||
|
dedupBackupDBCmd.Flags().StringVarP(&dedupDBType, "db-type", "d", "", "Database type (postgres/mariadb/mysql)")
|
||||||
|
dedupBackupDBCmd.Flags().StringVar(&backupDBDatabase, "database", "", "Database name to backup")
|
||||||
|
dedupBackupDBCmd.Flags().StringVar(&dedupDBHost, "host", "localhost", "Database host")
|
||||||
|
dedupBackupDBCmd.Flags().StringVarP(&backupDBUser, "user", "u", "", "Database user")
|
||||||
|
dedupBackupDBCmd.Flags().StringVarP(&backupDBPassword, "password", "p", "", "Database password (or use env)")
|
||||||
|
dedupBackupDBCmd.MarkFlagRequired("db-type")
|
||||||
|
dedupBackupDBCmd.MarkFlagRequired("database")
|
||||||
|
|
||||||
|
// Metrics flags
|
||||||
|
dedupMetricsCmd.Flags().StringVarP(&dedupMetricsOutput, "output", "o", "", "Output file path (default: stdout)")
|
||||||
|
dedupMetricsCmd.Flags().StringVar(&dedupMetricsInstance, "instance", "", "Instance label for metrics (default: hostname)")
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDedupDir() string {
|
||||||
|
if dedupDir != "" {
|
||||||
|
return dedupDir
|
||||||
|
}
|
||||||
|
if cfg != nil && cfg.BackupDir != "" {
|
||||||
|
return filepath.Join(cfg.BackupDir, "dedup")
|
||||||
|
}
|
||||||
|
return filepath.Join(os.Getenv("HOME"), "db_backups", "dedup")
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIndexDBPath() string {
|
||||||
|
if dedupIndexDB != "" {
|
||||||
|
return dedupIndexDB
|
||||||
|
}
|
||||||
|
// Default: same directory as chunks (may have issues on NFS/CIFS)
|
||||||
|
return filepath.Join(getDedupDir(), "chunks.db")
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEncryptionKey() string {
|
||||||
|
if dedupKey != "" {
|
||||||
|
return dedupKey
|
||||||
|
}
|
||||||
|
return os.Getenv("DBBACKUP_DEDUP_KEY")
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupBackup(cmd *cobra.Command, args []string) error {
|
||||||
|
inputPath := args[0]
|
||||||
|
|
||||||
|
// Open input file
|
||||||
|
file, err := os.Open(inputPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open input file: %w", err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
info, err := file.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to stat input file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for compressed input and warn/handle
|
||||||
|
var reader io.Reader = file
|
||||||
|
isGzipped := strings.HasSuffix(strings.ToLower(inputPath), ".gz")
|
||||||
|
if isGzipped && !dedupDecompress {
|
||||||
|
fmt.Printf("Warning: Input appears to be gzip compressed (.gz)\n")
|
||||||
|
fmt.Printf(" Compressed data typically has poor dedup ratios (<10%%).\n")
|
||||||
|
fmt.Printf(" Consider using --decompress-input for better deduplication.\n\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
if isGzipped && dedupDecompress {
|
||||||
|
fmt.Printf("Auto-decompressing gzip input for better dedup ratio...\n")
|
||||||
|
gzReader, err := gzip.NewReader(file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to decompress gzip input: %w", err)
|
||||||
|
}
|
||||||
|
defer gzReader.Close()
|
||||||
|
reader = gzReader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup dedup storage
|
||||||
|
basePath := getDedupDir()
|
||||||
|
encKey := ""
|
||||||
|
if dedupEncrypt {
|
||||||
|
encKey = getEncryptionKey()
|
||||||
|
if encKey == "" {
|
||||||
|
return fmt.Errorf("encryption enabled but no key provided (use --key or DBBACKUP_DEDUP_KEY)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := dedup.NewChunkStore(dedup.StoreConfig{
|
||||||
|
BasePath: basePath,
|
||||||
|
Compress: dedupCompress,
|
||||||
|
EncryptionKey: encKey,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestStore, err := dedup.NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
index, err := dedup.NewChunkIndexAt(getIndexDBPath())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer index.Close()
|
||||||
|
|
||||||
|
// Generate manifest ID
|
||||||
|
now := time.Now()
|
||||||
|
manifestID := now.Format("2006-01-02_150405")
|
||||||
|
if dedupDBName != "" {
|
||||||
|
manifestID += "_" + dedupDBName
|
||||||
|
} else {
|
||||||
|
base := filepath.Base(inputPath)
|
||||||
|
ext := filepath.Ext(base)
|
||||||
|
// Remove .gz extension if decompressing
|
||||||
|
if isGzipped && dedupDecompress {
|
||||||
|
base = strings.TrimSuffix(base, ext)
|
||||||
|
ext = filepath.Ext(base)
|
||||||
|
}
|
||||||
|
manifestID += "_" + strings.TrimSuffix(base, ext)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Creating deduplicated backup: %s\n", manifestID)
|
||||||
|
fmt.Printf("Input: %s (%s)\n", inputPath, formatBytes(info.Size()))
|
||||||
|
if isGzipped && dedupDecompress {
|
||||||
|
fmt.Printf("Mode: Decompressing before chunking\n")
|
||||||
|
}
|
||||||
|
fmt.Printf("Store: %s\n", basePath)
|
||||||
|
if dedupIndexDB != "" {
|
||||||
|
fmt.Printf("Index: %s\n", getIndexDBPath())
|
||||||
|
}
|
||||||
|
|
||||||
|
// For decompressed input, we can't seek - use TeeReader to hash while chunking
|
||||||
|
h := sha256.New()
|
||||||
|
var chunkReader io.Reader
|
||||||
|
|
||||||
|
if isGzipped && dedupDecompress {
|
||||||
|
// Can't seek on gzip stream - hash will be computed inline
|
||||||
|
chunkReader = io.TeeReader(reader, h)
|
||||||
|
} else {
|
||||||
|
// Regular file - hash first, then reset and chunk
|
||||||
|
file.Seek(0, 0)
|
||||||
|
io.Copy(h, file)
|
||||||
|
file.Seek(0, 0)
|
||||||
|
chunkReader = file
|
||||||
|
h = sha256.New() // Reset for inline hashing
|
||||||
|
chunkReader = io.TeeReader(file, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chunk the file
|
||||||
|
chunker := dedup.NewChunker(chunkReader, dedup.DefaultChunkerConfig())
|
||||||
|
var chunks []dedup.ChunkRef
|
||||||
|
var totalSize, storedSize int64
|
||||||
|
var chunkCount, newChunks int
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
for {
|
||||||
|
chunk, err := chunker.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("chunking failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkCount++
|
||||||
|
totalSize += int64(chunk.Length)
|
||||||
|
|
||||||
|
// Store chunk (deduplication happens here)
|
||||||
|
isNew, err := store.Put(chunk)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to store chunk: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if isNew {
|
||||||
|
newChunks++
|
||||||
|
storedSize += int64(chunk.Length)
|
||||||
|
// Record in index
|
||||||
|
index.AddChunk(chunk.Hash, chunk.Length, chunk.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunks = append(chunks, dedup.ChunkRef{
|
||||||
|
Hash: chunk.Hash,
|
||||||
|
Offset: chunk.Offset,
|
||||||
|
Length: chunk.Length,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Progress
|
||||||
|
if chunkCount%1000 == 0 {
|
||||||
|
fmt.Printf("\r Processed %d chunks, %d new...", chunkCount, newChunks)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Since(startTime)
|
||||||
|
|
||||||
|
// Get final hash (computed inline via TeeReader)
|
||||||
|
fileHash := hex.EncodeToString(h.Sum(nil))
|
||||||
|
|
||||||
|
// Calculate dedup ratio
|
||||||
|
dedupRatio := 0.0
|
||||||
|
if totalSize > 0 {
|
||||||
|
dedupRatio = 1.0 - float64(storedSize)/float64(totalSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create manifest
|
||||||
|
manifest := &dedup.Manifest{
|
||||||
|
ID: manifestID,
|
||||||
|
Name: dedupName,
|
||||||
|
CreatedAt: now,
|
||||||
|
DatabaseType: dedupDBType,
|
||||||
|
DatabaseName: dedupDBName,
|
||||||
|
DatabaseHost: dedupDBHost,
|
||||||
|
Chunks: chunks,
|
||||||
|
OriginalSize: totalSize,
|
||||||
|
StoredSize: storedSize,
|
||||||
|
ChunkCount: chunkCount,
|
||||||
|
NewChunks: newChunks,
|
||||||
|
DedupRatio: dedupRatio,
|
||||||
|
Encrypted: dedupEncrypt,
|
||||||
|
Compressed: dedupCompress,
|
||||||
|
SHA256: fileHash,
|
||||||
|
Decompressed: isGzipped && dedupDecompress, // Track if we decompressed
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := manifestStore.Save(manifest); err != nil {
|
||||||
|
return fmt.Errorf("failed to save manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := index.AddManifest(manifest); err != nil {
|
||||||
|
log.Warn("Failed to index manifest", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\r \r")
|
||||||
|
fmt.Printf("\nBackup complete!\n")
|
||||||
|
fmt.Printf(" Manifest: %s\n", manifestID)
|
||||||
|
fmt.Printf(" Chunks: %d total, %d new\n", chunkCount, newChunks)
|
||||||
|
fmt.Printf(" Original: %s\n", formatBytes(totalSize))
|
||||||
|
fmt.Printf(" Stored: %s (new data)\n", formatBytes(storedSize))
|
||||||
|
fmt.Printf(" Dedup ratio: %.1f%%\n", dedupRatio*100)
|
||||||
|
fmt.Printf(" Duration: %s\n", duration.Round(time.Millisecond))
|
||||||
|
fmt.Printf(" Throughput: %s/s\n", formatBytes(int64(float64(totalSize)/duration.Seconds())))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupRestore(cmd *cobra.Command, args []string) error {
|
||||||
|
manifestID := args[0]
|
||||||
|
outputPath := args[1]
|
||||||
|
|
||||||
|
basePath := getDedupDir()
|
||||||
|
encKey := ""
|
||||||
|
if dedupEncrypt {
|
||||||
|
encKey = getEncryptionKey()
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := dedup.NewChunkStore(dedup.StoreConfig{
|
||||||
|
BasePath: basePath,
|
||||||
|
Compress: dedupCompress,
|
||||||
|
EncryptionKey: encKey,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestStore, err := dedup.NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest, err := manifestStore.Load(manifestID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Restoring backup: %s\n", manifestID)
|
||||||
|
fmt.Printf(" Created: %s\n", manifest.CreatedAt.Format(time.RFC3339))
|
||||||
|
fmt.Printf(" Size: %s\n", formatBytes(manifest.OriginalSize))
|
||||||
|
fmt.Printf(" Chunks: %d\n", manifest.ChunkCount)
|
||||||
|
|
||||||
|
// Create output file
|
||||||
|
outFile, err := os.Create(outputPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create output file: %w", err)
|
||||||
|
}
|
||||||
|
defer outFile.Close()
|
||||||
|
|
||||||
|
h := sha256.New()
|
||||||
|
writer := io.MultiWriter(outFile, h)
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
for i, ref := range manifest.Chunks {
|
||||||
|
chunk, err := store.Get(ref.Hash)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read chunk %d (%s): %w", i, ref.Hash[:8], err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := writer.Write(chunk.Data); err != nil {
|
||||||
|
return fmt.Errorf("failed to write chunk %d: %w", i, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i+1)%1000 == 0 {
|
||||||
|
fmt.Printf("\r Restored %d/%d chunks...", i+1, manifest.ChunkCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Since(startTime)
|
||||||
|
restoredHash := hex.EncodeToString(h.Sum(nil))
|
||||||
|
|
||||||
|
fmt.Printf("\r \r")
|
||||||
|
fmt.Printf("\nRestore complete!\n")
|
||||||
|
fmt.Printf(" Output: %s\n", outputPath)
|
||||||
|
fmt.Printf(" Duration: %s\n", duration.Round(time.Millisecond))
|
||||||
|
|
||||||
|
// Verify hash
|
||||||
|
if manifest.SHA256 != "" {
|
||||||
|
if restoredHash == manifest.SHA256 {
|
||||||
|
fmt.Printf(" Verification: [OK] SHA-256 matches\n")
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" Verification: [FAIL] SHA-256 MISMATCH!\n")
|
||||||
|
fmt.Printf(" Expected: %s\n", manifest.SHA256)
|
||||||
|
fmt.Printf(" Got: %s\n", restoredHash)
|
||||||
|
return fmt.Errorf("integrity verification failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupList(cmd *cobra.Command, args []string) error {
|
||||||
|
basePath := getDedupDir()
|
||||||
|
|
||||||
|
manifestStore, err := dedup.NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifests, err := manifestStore.ListAll()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list manifests: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(manifests) == 0 {
|
||||||
|
fmt.Println("No deduplicated backups found.")
|
||||||
|
fmt.Printf("Store: %s\n", basePath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Deduplicated Backups (%s)\n\n", basePath)
|
||||||
|
fmt.Printf("%-30s %-12s %-10s %-10s %s\n", "ID", "SIZE", "DEDUP", "CHUNKS", "CREATED")
|
||||||
|
fmt.Println(strings.Repeat("-", 80))
|
||||||
|
|
||||||
|
for _, m := range manifests {
|
||||||
|
fmt.Printf("%-30s %-12s %-10.1f%% %-10d %s\n",
|
||||||
|
truncateStr(m.ID, 30),
|
||||||
|
formatBytes(m.OriginalSize),
|
||||||
|
m.DedupRatio*100,
|
||||||
|
m.ChunkCount,
|
||||||
|
m.CreatedAt.Format("2006-01-02 15:04"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupStats(cmd *cobra.Command, args []string) error {
|
||||||
|
basePath := getDedupDir()
|
||||||
|
|
||||||
|
index, err := dedup.NewChunkIndex(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer index.Close()
|
||||||
|
|
||||||
|
stats, err := index.Stats()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get stats: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := dedup.NewChunkStore(dedup.StoreConfig{BasePath: basePath})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
storeStats, err := store.Stats()
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Failed to get store stats", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Deduplication Statistics\n")
|
||||||
|
fmt.Printf("========================\n\n")
|
||||||
|
fmt.Printf("Store: %s\n", basePath)
|
||||||
|
fmt.Printf("Manifests: %d\n", stats.TotalManifests)
|
||||||
|
fmt.Printf("Unique chunks: %d\n", stats.TotalChunks)
|
||||||
|
fmt.Printf("Total raw size: %s\n", formatBytes(stats.TotalSizeRaw))
|
||||||
|
fmt.Printf("Stored size: %s\n", formatBytes(stats.TotalSizeStored))
|
||||||
|
fmt.Printf("\n")
|
||||||
|
fmt.Printf("Backup Statistics (accurate dedup calculation):\n")
|
||||||
|
fmt.Printf(" Total backed up: %s (across all backups)\n", formatBytes(stats.TotalBackupSize))
|
||||||
|
fmt.Printf(" New data stored: %s\n", formatBytes(stats.TotalNewData))
|
||||||
|
fmt.Printf(" Space saved: %s\n", formatBytes(stats.SpaceSaved))
|
||||||
|
fmt.Printf(" Dedup ratio: %.1f%%\n", stats.DedupRatio*100)
|
||||||
|
|
||||||
|
if storeStats != nil {
|
||||||
|
fmt.Printf("Disk usage: %s\n", formatBytes(storeStats.TotalSize))
|
||||||
|
fmt.Printf("Directories: %d\n", storeStats.Directories)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupGC(cmd *cobra.Command, args []string) error {
|
||||||
|
basePath := getDedupDir()
|
||||||
|
|
||||||
|
index, err := dedup.NewChunkIndex(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer index.Close()
|
||||||
|
|
||||||
|
store, err := dedup.NewChunkStore(dedup.StoreConfig{
|
||||||
|
BasePath: basePath,
|
||||||
|
Compress: dedupCompress,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find orphaned chunks
|
||||||
|
orphans, err := index.ListOrphanedChunks()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to find orphaned chunks: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(orphans) == 0 {
|
||||||
|
fmt.Println("No orphaned chunks to clean up.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found %d orphaned chunks\n", len(orphans))
|
||||||
|
|
||||||
|
var freed int64
|
||||||
|
for _, hash := range orphans {
|
||||||
|
if meta, _ := index.GetChunk(hash); meta != nil {
|
||||||
|
freed += meta.SizeStored
|
||||||
|
}
|
||||||
|
if err := store.Delete(hash); err != nil {
|
||||||
|
log.Warn("Failed to delete chunk", "hash", hash[:8], "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := index.RemoveChunk(hash); err != nil {
|
||||||
|
log.Warn("Failed to remove chunk from index", "hash", hash[:8], "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Deleted %d chunks, freed %s\n", len(orphans), formatBytes(freed))
|
||||||
|
|
||||||
|
// Vacuum the index
|
||||||
|
if err := index.Vacuum(); err != nil {
|
||||||
|
log.Warn("Failed to vacuum index", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupDelete(cmd *cobra.Command, args []string) error {
|
||||||
|
manifestID := args[0]
|
||||||
|
basePath := getDedupDir()
|
||||||
|
|
||||||
|
manifestStore, err := dedup.NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
index, err := dedup.NewChunkIndex(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer index.Close()
|
||||||
|
|
||||||
|
// Load manifest to decrement chunk refs
|
||||||
|
manifest, err := manifestStore.Load(manifestID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrement reference counts
|
||||||
|
for _, ref := range manifest.Chunks {
|
||||||
|
index.DecrementRef(ref.Hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete manifest
|
||||||
|
if err := manifestStore.Delete(manifestID); err != nil {
|
||||||
|
return fmt.Errorf("failed to delete manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := index.RemoveManifest(manifestID); err != nil {
|
||||||
|
log.Warn("Failed to remove manifest from index", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Deleted backup: %s\n", manifestID)
|
||||||
|
fmt.Println("Run 'dbbackup dedup gc' to reclaim space from unreferenced chunks.")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper functions
|
||||||
|
func formatBytes(b int64) string {
|
||||||
|
const unit = 1024
|
||||||
|
if b < unit {
|
||||||
|
return fmt.Sprintf("%d B", b)
|
||||||
|
}
|
||||||
|
div, exp := int64(unit), 0
|
||||||
|
for n := b / unit; n >= unit; n /= unit {
|
||||||
|
div *= unit
|
||||||
|
exp++
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "KMGTPE"[exp])
|
||||||
|
}
|
||||||
|
|
||||||
|
func truncateStr(s string, max int) string {
|
||||||
|
if len(s) <= max {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return s[:max-3] + "..."
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupVerify(cmd *cobra.Command, args []string) error {
|
||||||
|
basePath := getDedupDir()
|
||||||
|
|
||||||
|
store, err := dedup.NewChunkStore(dedup.StoreConfig{
|
||||||
|
BasePath: basePath,
|
||||||
|
Compress: dedupCompress,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestStore, err := dedup.NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
index, err := dedup.NewChunkIndexAt(getIndexDBPath())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer index.Close()
|
||||||
|
|
||||||
|
var manifests []*dedup.Manifest
|
||||||
|
|
||||||
|
if len(args) > 0 {
|
||||||
|
// Verify specific manifest
|
||||||
|
m, err := manifestStore.Load(args[0])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load manifest: %w", err)
|
||||||
|
}
|
||||||
|
manifests = []*dedup.Manifest{m}
|
||||||
|
} else {
|
||||||
|
// Verify all manifests
|
||||||
|
manifests, err = manifestStore.ListAll()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list manifests: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(manifests) == 0 {
|
||||||
|
fmt.Println("No manifests to verify.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Verifying %d backup(s)...\n\n", len(manifests))
|
||||||
|
|
||||||
|
var totalChunks, missingChunks, corruptChunks int
|
||||||
|
var allOK = true
|
||||||
|
|
||||||
|
for _, m := range manifests {
|
||||||
|
fmt.Printf("Verifying: %s (%d chunks)\n", m.ID, m.ChunkCount)
|
||||||
|
|
||||||
|
var missing, corrupt int
|
||||||
|
seenHashes := make(map[string]bool)
|
||||||
|
|
||||||
|
for i, ref := range m.Chunks {
|
||||||
|
if seenHashes[ref.Hash] {
|
||||||
|
continue // Already verified this chunk
|
||||||
|
}
|
||||||
|
seenHashes[ref.Hash] = true
|
||||||
|
totalChunks++
|
||||||
|
|
||||||
|
// Check if chunk exists
|
||||||
|
if !store.Has(ref.Hash) {
|
||||||
|
missing++
|
||||||
|
missingChunks++
|
||||||
|
if missing <= 5 {
|
||||||
|
fmt.Printf(" [MISSING] chunk %d: %s\n", i, ref.Hash[:16])
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify chunk hash by reading it
|
||||||
|
chunk, err := store.Get(ref.Hash)
|
||||||
|
if err != nil {
|
||||||
|
corrupt++
|
||||||
|
corruptChunks++
|
||||||
|
if corrupt <= 5 {
|
||||||
|
fmt.Printf(" [CORRUPT] chunk %d: %s - %v\n", i, ref.Hash[:16], err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify size
|
||||||
|
if chunk.Length != ref.Length {
|
||||||
|
corrupt++
|
||||||
|
corruptChunks++
|
||||||
|
if corrupt <= 5 {
|
||||||
|
fmt.Printf(" [SIZE MISMATCH] chunk %d: expected %d, got %d\n", i, ref.Length, chunk.Length)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if missing > 0 || corrupt > 0 {
|
||||||
|
allOK = false
|
||||||
|
fmt.Printf(" Result: FAILED (%d missing, %d corrupt)\n", missing, corrupt)
|
||||||
|
if missing > 5 || corrupt > 5 {
|
||||||
|
fmt.Printf(" ... and %d more errors\n", (missing+corrupt)-10)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" Result: OK (%d unique chunks verified)\n", len(seenHashes))
|
||||||
|
// Update verified timestamp
|
||||||
|
m.VerifiedAt = time.Now()
|
||||||
|
manifestStore.Save(m)
|
||||||
|
index.UpdateManifestVerified(m.ID, m.VerifiedAt)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("========================================")
|
||||||
|
if allOK {
|
||||||
|
fmt.Printf("All %d backup(s) verified successfully!\n", len(manifests))
|
||||||
|
fmt.Printf("Total unique chunks checked: %d\n", totalChunks)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Verification FAILED!\n")
|
||||||
|
fmt.Printf("Missing chunks: %d\n", missingChunks)
|
||||||
|
fmt.Printf("Corrupt chunks: %d\n", corruptChunks)
|
||||||
|
return fmt.Errorf("verification failed: %d missing, %d corrupt chunks", missingChunks, corruptChunks)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupPrune(cmd *cobra.Command, args []string) error {
|
||||||
|
if pruneKeepLast == 0 && pruneKeepDaily == 0 && pruneKeepWeekly == 0 {
|
||||||
|
return fmt.Errorf("at least one of --keep-last, --keep-daily, or --keep-weekly must be specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
basePath := getDedupDir()
|
||||||
|
|
||||||
|
manifestStore, err := dedup.NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
index, err := dedup.NewChunkIndexAt(getIndexDBPath())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer index.Close()
|
||||||
|
|
||||||
|
manifests, err := manifestStore.ListAll()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list manifests: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(manifests) == 0 {
|
||||||
|
fmt.Println("No backups to prune.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Group by database name
|
||||||
|
byDatabase := make(map[string][]*dedup.Manifest)
|
||||||
|
for _, m := range manifests {
|
||||||
|
key := m.DatabaseName
|
||||||
|
if key == "" {
|
||||||
|
key = "_default"
|
||||||
|
}
|
||||||
|
byDatabase[key] = append(byDatabase[key], m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var toDelete []*dedup.Manifest
|
||||||
|
|
||||||
|
for dbName, dbManifests := range byDatabase {
|
||||||
|
// Already sorted by time (newest first from ListAll)
|
||||||
|
kept := make(map[string]bool)
|
||||||
|
var keepReasons = make(map[string]string)
|
||||||
|
|
||||||
|
// Keep last N
|
||||||
|
if pruneKeepLast > 0 {
|
||||||
|
for i := 0; i < pruneKeepLast && i < len(dbManifests); i++ {
|
||||||
|
kept[dbManifests[i].ID] = true
|
||||||
|
keepReasons[dbManifests[i].ID] = "keep-last"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep daily (one per day)
|
||||||
|
if pruneKeepDaily > 0 {
|
||||||
|
seenDays := make(map[string]bool)
|
||||||
|
count := 0
|
||||||
|
for _, m := range dbManifests {
|
||||||
|
day := m.CreatedAt.Format("2006-01-02")
|
||||||
|
if !seenDays[day] {
|
||||||
|
seenDays[day] = true
|
||||||
|
if count < pruneKeepDaily {
|
||||||
|
kept[m.ID] = true
|
||||||
|
if keepReasons[m.ID] == "" {
|
||||||
|
keepReasons[m.ID] = "keep-daily"
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep weekly (one per week)
|
||||||
|
if pruneKeepWeekly > 0 {
|
||||||
|
seenWeeks := make(map[string]bool)
|
||||||
|
count := 0
|
||||||
|
for _, m := range dbManifests {
|
||||||
|
year, week := m.CreatedAt.ISOWeek()
|
||||||
|
weekKey := fmt.Sprintf("%d-W%02d", year, week)
|
||||||
|
if !seenWeeks[weekKey] {
|
||||||
|
seenWeeks[weekKey] = true
|
||||||
|
if count < pruneKeepWeekly {
|
||||||
|
kept[m.ID] = true
|
||||||
|
if keepReasons[m.ID] == "" {
|
||||||
|
keepReasons[m.ID] = "keep-weekly"
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if dbName != "_default" {
|
||||||
|
fmt.Printf("\nDatabase: %s\n", dbName)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("\nUnnamed backups:\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range dbManifests {
|
||||||
|
if kept[m.ID] {
|
||||||
|
fmt.Printf(" [KEEP] %s (%s) - %s\n", m.ID, m.CreatedAt.Format("2006-01-02"), keepReasons[m.ID])
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" [DELETE] %s (%s)\n", m.ID, m.CreatedAt.Format("2006-01-02"))
|
||||||
|
toDelete = append(toDelete, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(toDelete) == 0 {
|
||||||
|
fmt.Printf("\nNo backups to prune (all match retention policy).\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n%d backup(s) will be deleted.\n", len(toDelete))
|
||||||
|
|
||||||
|
if pruneDryRun {
|
||||||
|
fmt.Println("\n[DRY RUN] No changes made. Remove --dry-run to actually delete.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Actually delete
|
||||||
|
for _, m := range toDelete {
|
||||||
|
// Decrement chunk references
|
||||||
|
for _, ref := range m.Chunks {
|
||||||
|
index.DecrementRef(ref.Hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := manifestStore.Delete(m.ID); err != nil {
|
||||||
|
log.Warn("Failed to delete manifest", "id", m.ID, "error", err)
|
||||||
|
}
|
||||||
|
index.RemoveManifest(m.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\nDeleted %d backup(s).\n", len(toDelete))
|
||||||
|
fmt.Println("Run 'dbbackup dedup gc' to reclaim space from unreferenced chunks.")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupBackupDB(cmd *cobra.Command, args []string) error {
|
||||||
|
dbType := strings.ToLower(dedupDBType)
|
||||||
|
dbName := backupDBDatabase
|
||||||
|
|
||||||
|
// Validate db type
|
||||||
|
var dumpCmd string
|
||||||
|
var dumpArgs []string
|
||||||
|
|
||||||
|
switch dbType {
|
||||||
|
case "postgres", "postgresql", "pg":
|
||||||
|
dbType = "postgres"
|
||||||
|
dumpCmd = "pg_dump"
|
||||||
|
dumpArgs = []string{"-Fc"} // Custom format for better compression
|
||||||
|
if dedupDBHost != "" && dedupDBHost != "localhost" {
|
||||||
|
dumpArgs = append(dumpArgs, "-h", dedupDBHost)
|
||||||
|
}
|
||||||
|
if backupDBUser != "" {
|
||||||
|
dumpArgs = append(dumpArgs, "-U", backupDBUser)
|
||||||
|
}
|
||||||
|
dumpArgs = append(dumpArgs, dbName)
|
||||||
|
|
||||||
|
case "mysql":
|
||||||
|
dumpCmd = "mysqldump"
|
||||||
|
dumpArgs = []string{
|
||||||
|
"--single-transaction",
|
||||||
|
"--routines",
|
||||||
|
"--triggers",
|
||||||
|
"--events",
|
||||||
|
}
|
||||||
|
if dedupDBHost != "" {
|
||||||
|
dumpArgs = append(dumpArgs, "-h", dedupDBHost)
|
||||||
|
}
|
||||||
|
if backupDBUser != "" {
|
||||||
|
dumpArgs = append(dumpArgs, "-u", backupDBUser)
|
||||||
|
}
|
||||||
|
if backupDBPassword != "" {
|
||||||
|
dumpArgs = append(dumpArgs, "-p"+backupDBPassword)
|
||||||
|
}
|
||||||
|
dumpArgs = append(dumpArgs, dbName)
|
||||||
|
|
||||||
|
case "mariadb":
|
||||||
|
dumpCmd = "mariadb-dump"
|
||||||
|
// Fall back to mysqldump if mariadb-dump not available
|
||||||
|
if _, err := exec.LookPath(dumpCmd); err != nil {
|
||||||
|
dumpCmd = "mysqldump"
|
||||||
|
}
|
||||||
|
dumpArgs = []string{
|
||||||
|
"--single-transaction",
|
||||||
|
"--routines",
|
||||||
|
"--triggers",
|
||||||
|
"--events",
|
||||||
|
}
|
||||||
|
if dedupDBHost != "" {
|
||||||
|
dumpArgs = append(dumpArgs, "-h", dedupDBHost)
|
||||||
|
}
|
||||||
|
if backupDBUser != "" {
|
||||||
|
dumpArgs = append(dumpArgs, "-u", backupDBUser)
|
||||||
|
}
|
||||||
|
if backupDBPassword != "" {
|
||||||
|
dumpArgs = append(dumpArgs, "-p"+backupDBPassword)
|
||||||
|
}
|
||||||
|
dumpArgs = append(dumpArgs, dbName)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported database type: %s (use postgres, mysql, or mariadb)", dbType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify dump command exists
|
||||||
|
if _, err := exec.LookPath(dumpCmd); err != nil {
|
||||||
|
return fmt.Errorf("%s not found in PATH: %w", dumpCmd, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup dedup storage
|
||||||
|
basePath := getDedupDir()
|
||||||
|
encKey := ""
|
||||||
|
if dedupEncrypt {
|
||||||
|
encKey = getEncryptionKey()
|
||||||
|
if encKey == "" {
|
||||||
|
return fmt.Errorf("encryption enabled but no key provided (use --key or DBBACKUP_DEDUP_KEY)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := dedup.NewChunkStore(dedup.StoreConfig{
|
||||||
|
BasePath: basePath,
|
||||||
|
Compress: dedupCompress,
|
||||||
|
EncryptionKey: encKey,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestStore, err := dedup.NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
index, err := dedup.NewChunkIndexAt(getIndexDBPath())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer index.Close()
|
||||||
|
|
||||||
|
// Generate manifest ID
|
||||||
|
now := time.Now()
|
||||||
|
manifestID := now.Format("2006-01-02_150405") + "_" + dbName
|
||||||
|
|
||||||
|
fmt.Printf("Creating deduplicated database backup: %s\n", manifestID)
|
||||||
|
fmt.Printf("Database: %s (%s)\n", dbName, dbType)
|
||||||
|
fmt.Printf("Command: %s %s\n", dumpCmd, strings.Join(dumpArgs, " "))
|
||||||
|
fmt.Printf("Store: %s\n", basePath)
|
||||||
|
|
||||||
|
// Start the dump command
|
||||||
|
dumpExec := exec.Command(dumpCmd, dumpArgs...)
|
||||||
|
|
||||||
|
// Set password via environment for postgres
|
||||||
|
if dbType == "postgres" && backupDBPassword != "" {
|
||||||
|
dumpExec.Env = append(os.Environ(), "PGPASSWORD="+backupDBPassword)
|
||||||
|
}
|
||||||
|
|
||||||
|
stdout, err := dumpExec.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get stdout pipe: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stderr, err := dumpExec.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get stderr pipe: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := dumpExec.Start(); err != nil {
|
||||||
|
return fmt.Errorf("failed to start %s: %w", dumpCmd, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash while chunking using TeeReader
|
||||||
|
h := sha256.New()
|
||||||
|
reader := io.TeeReader(stdout, h)
|
||||||
|
|
||||||
|
// Chunk the stream directly
|
||||||
|
chunker := dedup.NewChunker(reader, dedup.DefaultChunkerConfig())
|
||||||
|
var chunks []dedup.ChunkRef
|
||||||
|
var totalSize, storedSize int64
|
||||||
|
var chunkCount, newChunks int
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
for {
|
||||||
|
chunk, err := chunker.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("chunking failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkCount++
|
||||||
|
totalSize += int64(chunk.Length)
|
||||||
|
|
||||||
|
// Store chunk (deduplication happens here)
|
||||||
|
isNew, err := store.Put(chunk)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to store chunk: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if isNew {
|
||||||
|
newChunks++
|
||||||
|
storedSize += int64(chunk.Length)
|
||||||
|
index.AddChunk(chunk.Hash, chunk.Length, chunk.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunks = append(chunks, dedup.ChunkRef{
|
||||||
|
Hash: chunk.Hash,
|
||||||
|
Offset: chunk.Offset,
|
||||||
|
Length: chunk.Length,
|
||||||
|
})
|
||||||
|
|
||||||
|
if chunkCount%1000 == 0 {
|
||||||
|
fmt.Printf("\r Processed %d chunks, %d new, %s...", chunkCount, newChunks, formatBytes(totalSize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read any stderr
|
||||||
|
stderrBytes, _ := io.ReadAll(stderr)
|
||||||
|
|
||||||
|
// Wait for command to complete
|
||||||
|
if err := dumpExec.Wait(); err != nil {
|
||||||
|
return fmt.Errorf("%s failed: %w\nstderr: %s", dumpCmd, err, string(stderrBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Since(startTime)
|
||||||
|
fileHash := hex.EncodeToString(h.Sum(nil))
|
||||||
|
|
||||||
|
// Calculate dedup ratio
|
||||||
|
dedupRatio := 0.0
|
||||||
|
if totalSize > 0 {
|
||||||
|
dedupRatio = 1.0 - float64(storedSize)/float64(totalSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create manifest
|
||||||
|
manifest := &dedup.Manifest{
|
||||||
|
ID: manifestID,
|
||||||
|
Name: dedupName,
|
||||||
|
CreatedAt: now,
|
||||||
|
DatabaseType: dbType,
|
||||||
|
DatabaseName: dbName,
|
||||||
|
DatabaseHost: dedupDBHost,
|
||||||
|
Chunks: chunks,
|
||||||
|
OriginalSize: totalSize,
|
||||||
|
StoredSize: storedSize,
|
||||||
|
ChunkCount: chunkCount,
|
||||||
|
NewChunks: newChunks,
|
||||||
|
DedupRatio: dedupRatio,
|
||||||
|
Encrypted: dedupEncrypt,
|
||||||
|
Compressed: dedupCompress,
|
||||||
|
SHA256: fileHash,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := manifestStore.Save(manifest); err != nil {
|
||||||
|
return fmt.Errorf("failed to save manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := index.AddManifest(manifest); err != nil {
|
||||||
|
log.Warn("Failed to index manifest", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\r \r")
|
||||||
|
fmt.Printf("\nBackup complete!\n")
|
||||||
|
fmt.Printf(" Manifest: %s\n", manifestID)
|
||||||
|
fmt.Printf(" Chunks: %d total, %d new\n", chunkCount, newChunks)
|
||||||
|
fmt.Printf(" Dump size: %s\n", formatBytes(totalSize))
|
||||||
|
fmt.Printf(" Stored: %s (new data)\n", formatBytes(storedSize))
|
||||||
|
fmt.Printf(" Dedup ratio: %.1f%%\n", dedupRatio*100)
|
||||||
|
fmt.Printf(" Duration: %s\n", duration.Round(time.Millisecond))
|
||||||
|
fmt.Printf(" Throughput: %s/s\n", formatBytes(int64(float64(totalSize)/duration.Seconds())))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupMetrics(cmd *cobra.Command, args []string) error {
|
||||||
|
basePath := getDedupDir()
|
||||||
|
indexPath := getIndexDBPath()
|
||||||
|
|
||||||
|
instance := dedupMetricsInstance
|
||||||
|
if instance == "" {
|
||||||
|
hostname, _ := os.Hostname()
|
||||||
|
instance = hostname
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics, err := dedup.CollectMetrics(basePath, indexPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to collect metrics: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
output := dedup.FormatPrometheusMetrics(metrics, instance)
|
||||||
|
|
||||||
|
if dedupMetricsOutput != "" {
|
||||||
|
if err := dedup.WritePrometheusTextfile(dedupMetricsOutput, instance, basePath, indexPath); err != nil {
|
||||||
|
return fmt.Errorf("failed to write metrics: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Wrote metrics to %s\n", dedupMetricsOutput)
|
||||||
|
} else {
|
||||||
|
fmt.Print(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
58
cmd/drill.go
58
cmd/drill.go
@@ -318,7 +318,7 @@ func runDrillList(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("%-15s %-40s %-20s %s\n", "ID", "NAME", "IMAGE", "STATUS")
|
fmt.Printf("%-15s %-40s %-20s %s\n", "ID", "NAME", "IMAGE", "STATUS")
|
||||||
fmt.Println(strings.Repeat("─", 100))
|
fmt.Println(strings.Repeat("-", 100))
|
||||||
|
|
||||||
for _, c := range containers {
|
for _, c := range containers {
|
||||||
fmt.Printf("%-15s %-40s %-20s %s\n",
|
fmt.Printf("%-15s %-40s %-20s %s\n",
|
||||||
@@ -345,7 +345,7 @@ func runDrillCleanup(cmd *cobra.Command, args []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("✅ Cleanup completed")
|
fmt.Println("[OK] Cleanup completed")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -369,32 +369,32 @@ func runDrillReport(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
func printDrillResult(result *drill.DrillResult) {
|
func printDrillResult(result *drill.DrillResult) {
|
||||||
fmt.Printf("\n")
|
fmt.Printf("\n")
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
fmt.Printf(" DR Drill Report: %s\n", result.DrillID)
|
fmt.Printf(" DR Drill Report: %s\n", result.DrillID)
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\n")
|
fmt.Printf("=====================================================\n\n")
|
||||||
|
|
||||||
status := "✅ PASSED"
|
status := "[OK] PASSED"
|
||||||
if !result.Success {
|
if !result.Success {
|
||||||
status = "❌ FAILED"
|
status = "[FAIL] FAILED"
|
||||||
} else if result.Status == drill.StatusPartial {
|
} else if result.Status == drill.StatusPartial {
|
||||||
status = "⚠️ PARTIAL"
|
status = "[WARN] PARTIAL"
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("📋 Status: %s\n", status)
|
fmt.Printf("[LOG] Status: %s\n", status)
|
||||||
fmt.Printf("💾 Backup: %s\n", filepath.Base(result.BackupPath))
|
fmt.Printf("[SAVE] Backup: %s\n", filepath.Base(result.BackupPath))
|
||||||
fmt.Printf("🗄️ Database: %s (%s)\n", result.DatabaseName, result.DatabaseType)
|
fmt.Printf("[DB] Database: %s (%s)\n", result.DatabaseName, result.DatabaseType)
|
||||||
fmt.Printf("⏱️ Duration: %.2fs\n", result.Duration)
|
fmt.Printf("[TIME] Duration: %.2fs\n", result.Duration)
|
||||||
fmt.Printf("📅 Started: %s\n", result.StartTime.Format(time.RFC3339))
|
fmt.Printf("📅 Started: %s\n", result.StartTime.Format(time.RFC3339))
|
||||||
fmt.Printf("\n")
|
fmt.Printf("\n")
|
||||||
|
|
||||||
// Phases
|
// Phases
|
||||||
fmt.Printf("📊 Phases:\n")
|
fmt.Printf("[STATS] Phases:\n")
|
||||||
for _, phase := range result.Phases {
|
for _, phase := range result.Phases {
|
||||||
icon := "✅"
|
icon := "[OK]"
|
||||||
if phase.Status == "failed" {
|
if phase.Status == "failed" {
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
} else if phase.Status == "running" {
|
} else if phase.Status == "running" {
|
||||||
icon = "🔄"
|
icon = "[SYNC]"
|
||||||
}
|
}
|
||||||
fmt.Printf(" %s %-20s (%.2fs) %s\n", icon, phase.Name, phase.Duration, phase.Message)
|
fmt.Printf(" %s %-20s (%.2fs) %s\n", icon, phase.Name, phase.Duration, phase.Message)
|
||||||
}
|
}
|
||||||
@@ -412,10 +412,10 @@ func printDrillResult(result *drill.DrillResult) {
|
|||||||
fmt.Printf("\n")
|
fmt.Printf("\n")
|
||||||
|
|
||||||
// RTO
|
// RTO
|
||||||
fmt.Printf("⏱️ RTO Analysis:\n")
|
fmt.Printf("[TIME] RTO Analysis:\n")
|
||||||
rtoIcon := "✅"
|
rtoIcon := "[OK]"
|
||||||
if !result.RTOMet {
|
if !result.RTOMet {
|
||||||
rtoIcon = "❌"
|
rtoIcon = "[FAIL]"
|
||||||
}
|
}
|
||||||
fmt.Printf(" Actual RTO: %.2fs\n", result.ActualRTO)
|
fmt.Printf(" Actual RTO: %.2fs\n", result.ActualRTO)
|
||||||
fmt.Printf(" Target RTO: %.0fs\n", result.TargetRTO)
|
fmt.Printf(" Target RTO: %.0fs\n", result.TargetRTO)
|
||||||
@@ -424,11 +424,11 @@ func printDrillResult(result *drill.DrillResult) {
|
|||||||
|
|
||||||
// Validation results
|
// Validation results
|
||||||
if len(result.ValidationResults) > 0 {
|
if len(result.ValidationResults) > 0 {
|
||||||
fmt.Printf("🔍 Validation Queries:\n")
|
fmt.Printf("[SEARCH] Validation Queries:\n")
|
||||||
for _, vr := range result.ValidationResults {
|
for _, vr := range result.ValidationResults {
|
||||||
icon := "✅"
|
icon := "[OK]"
|
||||||
if !vr.Success {
|
if !vr.Success {
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
}
|
}
|
||||||
fmt.Printf(" %s %s: %s\n", icon, vr.Name, vr.Result)
|
fmt.Printf(" %s %s: %s\n", icon, vr.Name, vr.Result)
|
||||||
if vr.Error != "" {
|
if vr.Error != "" {
|
||||||
@@ -440,11 +440,11 @@ func printDrillResult(result *drill.DrillResult) {
|
|||||||
|
|
||||||
// Check results
|
// Check results
|
||||||
if len(result.CheckResults) > 0 {
|
if len(result.CheckResults) > 0 {
|
||||||
fmt.Printf("✓ Checks:\n")
|
fmt.Printf("[OK] Checks:\n")
|
||||||
for _, cr := range result.CheckResults {
|
for _, cr := range result.CheckResults {
|
||||||
icon := "✅"
|
icon := "[OK]"
|
||||||
if !cr.Success {
|
if !cr.Success {
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
}
|
}
|
||||||
fmt.Printf(" %s %s\n", icon, cr.Message)
|
fmt.Printf(" %s %s\n", icon, cr.Message)
|
||||||
}
|
}
|
||||||
@@ -453,7 +453,7 @@ func printDrillResult(result *drill.DrillResult) {
|
|||||||
|
|
||||||
// Errors and warnings
|
// Errors and warnings
|
||||||
if len(result.Errors) > 0 {
|
if len(result.Errors) > 0 {
|
||||||
fmt.Printf("❌ Errors:\n")
|
fmt.Printf("[FAIL] Errors:\n")
|
||||||
for _, e := range result.Errors {
|
for _, e := range result.Errors {
|
||||||
fmt.Printf(" • %s\n", e)
|
fmt.Printf(" • %s\n", e)
|
||||||
}
|
}
|
||||||
@@ -461,7 +461,7 @@ func printDrillResult(result *drill.DrillResult) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(result.Warnings) > 0 {
|
if len(result.Warnings) > 0 {
|
||||||
fmt.Printf("⚠️ Warnings:\n")
|
fmt.Printf("[WARN] Warnings:\n")
|
||||||
for _, w := range result.Warnings {
|
for _, w := range result.Warnings {
|
||||||
fmt.Printf(" • %s\n", w)
|
fmt.Printf(" • %s\n", w)
|
||||||
}
|
}
|
||||||
@@ -470,14 +470,14 @@ func printDrillResult(result *drill.DrillResult) {
|
|||||||
|
|
||||||
// Container info
|
// Container info
|
||||||
if result.ContainerKept {
|
if result.ContainerKept {
|
||||||
fmt.Printf("📦 Container kept: %s\n", result.ContainerID[:12])
|
fmt.Printf("[PKG] Container kept: %s\n", result.ContainerID[:12])
|
||||||
fmt.Printf(" Connect with: docker exec -it %s bash\n", result.ContainerID[:12])
|
fmt.Printf(" Connect with: docker exec -it %s bash\n", result.ContainerID[:12])
|
||||||
fmt.Printf("\n")
|
fmt.Printf("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
fmt.Printf(" %s\n", result.Message)
|
fmt.Printf(" %s\n", result.Message)
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateCatalogWithDrillResult(ctx context.Context, backupPath string, result *drill.DrillResult) {
|
func updateCatalogWithDrillResult(ctx context.Context, backupPath string, result *drill.DrillResult) {
|
||||||
|
|||||||
@@ -63,9 +63,9 @@ func runEngineList(cmd *cobra.Command, args []string) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
status := "✓ Available"
|
status := "[Y] Available"
|
||||||
if !avail.Available {
|
if !avail.Available {
|
||||||
status = "✗ Not available"
|
status = "[N] Not available"
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n%s (%s)\n", info.Name, info.Description)
|
fmt.Printf("\n%s (%s)\n", info.Name, info.Description)
|
||||||
|
|||||||
@@ -176,12 +176,12 @@ func runInstallStatus(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("📦 DBBackup Installation Status")
|
fmt.Println("[STATUS] DBBackup Installation Status")
|
||||||
fmt.Println(strings.Repeat("═", 50))
|
fmt.Println(strings.Repeat("=", 50))
|
||||||
|
|
||||||
if clusterStatus.Installed {
|
if clusterStatus.Installed {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("🔹 Cluster Backup:")
|
fmt.Println(" * Cluster Backup:")
|
||||||
fmt.Printf(" Service: %s\n", formatStatus(clusterStatus.Installed, clusterStatus.Active))
|
fmt.Printf(" Service: %s\n", formatStatus(clusterStatus.Installed, clusterStatus.Active))
|
||||||
fmt.Printf(" Timer: %s\n", formatStatus(clusterStatus.TimerEnabled, clusterStatus.TimerActive))
|
fmt.Printf(" Timer: %s\n", formatStatus(clusterStatus.TimerEnabled, clusterStatus.TimerActive))
|
||||||
if clusterStatus.NextRun != "" {
|
if clusterStatus.NextRun != "" {
|
||||||
@@ -192,7 +192,7 @@ func runInstallStatus(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("❌ No systemd services installed")
|
fmt.Println("[NONE] No systemd services installed")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("Run 'sudo dbbackup install' to install as a systemd service")
|
fmt.Println("Run 'sudo dbbackup install' to install as a systemd service")
|
||||||
}
|
}
|
||||||
@@ -200,13 +200,13 @@ func runInstallStatus(ctx context.Context) error {
|
|||||||
// Check for exporter
|
// Check for exporter
|
||||||
if _, err := os.Stat("/etc/systemd/system/dbbackup-exporter.service"); err == nil {
|
if _, err := os.Stat("/etc/systemd/system/dbbackup-exporter.service"); err == nil {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("🔹 Metrics Exporter:")
|
fmt.Println(" * Metrics Exporter:")
|
||||||
// Check if exporter is active using systemctl
|
// Check if exporter is active using systemctl
|
||||||
cmd := exec.CommandContext(ctx, "systemctl", "is-active", "dbbackup-exporter")
|
cmd := exec.CommandContext(ctx, "systemctl", "is-active", "dbbackup-exporter")
|
||||||
if err := cmd.Run(); err == nil {
|
if err := cmd.Run(); err == nil {
|
||||||
fmt.Printf(" Service: ✅ active\n")
|
fmt.Printf(" Service: [OK] active\n")
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" Service: ⚪ inactive\n")
|
fmt.Printf(" Service: [-] inactive\n")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -219,9 +219,9 @@ func formatStatus(installed, active bool) string {
|
|||||||
return "not installed"
|
return "not installed"
|
||||||
}
|
}
|
||||||
if active {
|
if active {
|
||||||
return "✅ active"
|
return "[OK] active"
|
||||||
}
|
}
|
||||||
return "⚪ inactive"
|
return "[-] inactive"
|
||||||
}
|
}
|
||||||
|
|
||||||
func expandSchedule(schedule string) string {
|
func expandSchedule(schedule string) string {
|
||||||
|
|||||||
@@ -203,9 +203,17 @@ func runMigrateCluster(cmd *cobra.Command, args []string) error {
|
|||||||
migrateTargetUser = migrateSourceUser
|
migrateTargetUser = migrateSourceUser
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create source config first to get WorkDir
|
||||||
|
sourceCfg := config.New()
|
||||||
|
sourceCfg.Host = migrateSourceHost
|
||||||
|
sourceCfg.Port = migrateSourcePort
|
||||||
|
sourceCfg.User = migrateSourceUser
|
||||||
|
sourceCfg.Password = migrateSourcePassword
|
||||||
|
|
||||||
workdir := migrateWorkdir
|
workdir := migrateWorkdir
|
||||||
if workdir == "" {
|
if workdir == "" {
|
||||||
workdir = filepath.Join(os.TempDir(), "dbbackup-migrate")
|
// Use WorkDir from config if available
|
||||||
|
workdir = filepath.Join(sourceCfg.GetEffectiveWorkDir(), "dbbackup-migrate")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create working directory
|
// Create working directory
|
||||||
@@ -213,12 +221,7 @@ func runMigrateCluster(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("failed to create working directory: %w", err)
|
return fmt.Errorf("failed to create working directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create source config
|
// Update source config with remaining settings
|
||||||
sourceCfg := config.New()
|
|
||||||
sourceCfg.Host = migrateSourceHost
|
|
||||||
sourceCfg.Port = migrateSourcePort
|
|
||||||
sourceCfg.User = migrateSourceUser
|
|
||||||
sourceCfg.Password = migrateSourcePassword
|
|
||||||
sourceCfg.SSLMode = migrateSourceSSLMode
|
sourceCfg.SSLMode = migrateSourceSSLMode
|
||||||
sourceCfg.Database = "postgres" // Default connection database
|
sourceCfg.Database = "postgres" // Default connection database
|
||||||
sourceCfg.DatabaseType = cfg.DatabaseType
|
sourceCfg.DatabaseType = cfg.DatabaseType
|
||||||
@@ -342,7 +345,8 @@ func runMigrateSingle(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
workdir := migrateWorkdir
|
workdir := migrateWorkdir
|
||||||
if workdir == "" {
|
if workdir == "" {
|
||||||
workdir = filepath.Join(os.TempDir(), "dbbackup-migrate")
|
tempCfg := config.New()
|
||||||
|
workdir = filepath.Join(tempCfg.GetEffectiveWorkDir(), "dbbackup-migrate")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create working directory
|
// Create working directory
|
||||||
|
|||||||
76
cmd/pitr.go
76
cmd/pitr.go
@@ -436,7 +436,7 @@ func runPITREnable(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("failed to enable PITR: %w", err)
|
return fmt.Errorf("failed to enable PITR: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("✅ PITR enabled successfully!")
|
log.Info("[OK] PITR enabled successfully!")
|
||||||
log.Info("")
|
log.Info("")
|
||||||
log.Info("Next steps:")
|
log.Info("Next steps:")
|
||||||
log.Info("1. Restart PostgreSQL: sudo systemctl restart postgresql")
|
log.Info("1. Restart PostgreSQL: sudo systemctl restart postgresql")
|
||||||
@@ -463,7 +463,7 @@ func runPITRDisable(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("failed to disable PITR: %w", err)
|
return fmt.Errorf("failed to disable PITR: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("✅ PITR disabled successfully!")
|
log.Info("[OK] PITR disabled successfully!")
|
||||||
log.Info("PostgreSQL restart required: sudo systemctl restart postgresql")
|
log.Info("PostgreSQL restart required: sudo systemctl restart postgresql")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -483,15 +483,15 @@ func runPITRStatus(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Display PITR configuration
|
// Display PITR configuration
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("======================================================")
|
||||||
fmt.Println(" Point-in-Time Recovery (PITR) Status")
|
fmt.Println(" Point-in-Time Recovery (PITR) Status")
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("======================================================")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
if config.Enabled {
|
if config.Enabled {
|
||||||
fmt.Println("Status: ✅ ENABLED")
|
fmt.Println("Status: [OK] ENABLED")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("Status: ❌ DISABLED")
|
fmt.Println("Status: [FAIL] DISABLED")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("WAL Level: %s\n", config.WALLevel)
|
fmt.Printf("WAL Level: %s\n", config.WALLevel)
|
||||||
@@ -510,7 +510,7 @@ func runPITRStatus(cmd *cobra.Command, args []string) error {
|
|||||||
// Extract archive dir from command (simple parsing)
|
// Extract archive dir from command (simple parsing)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("WAL Archive Statistics:")
|
fmt.Println("WAL Archive Statistics:")
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("======================================================")
|
||||||
// TODO: Parse archive dir and show stats
|
// TODO: Parse archive dir and show stats
|
||||||
fmt.Println(" (Use 'dbbackup wal list --archive-dir <dir>' to view archives)")
|
fmt.Println(" (Use 'dbbackup wal list --archive-dir <dir>' to view archives)")
|
||||||
}
|
}
|
||||||
@@ -574,13 +574,13 @@ func runWALList(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Display archives
|
// Display archives
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("======================================================")
|
||||||
fmt.Printf(" WAL Archives (%d files)\n", len(archives))
|
fmt.Printf(" WAL Archives (%d files)\n", len(archives))
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("======================================================")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
fmt.Printf("%-28s %10s %10s %8s %s\n", "WAL Filename", "Timeline", "Segment", "Size", "Archived At")
|
fmt.Printf("%-28s %10s %10s %8s %s\n", "WAL Filename", "Timeline", "Segment", "Size", "Archived At")
|
||||||
fmt.Println("────────────────────────────────────────────────────────────────────────────────")
|
fmt.Println("--------------------------------------------------------------------------------")
|
||||||
|
|
||||||
for _, archive := range archives {
|
for _, archive := range archives {
|
||||||
size := formatWALSize(archive.ArchivedSize)
|
size := formatWALSize(archive.ArchivedSize)
|
||||||
@@ -644,7 +644,7 @@ func runWALCleanup(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("WAL cleanup failed: %w", err)
|
return fmt.Errorf("WAL cleanup failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("✅ WAL cleanup completed", "deleted", deleted, "retention_days", archiveConfig.RetentionDays)
|
log.Info("[OK] WAL cleanup completed", "deleted", deleted, "retention_days", archiveConfig.RetentionDays)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -671,7 +671,7 @@ func runWALTimeline(cmd *cobra.Command, args []string) error {
|
|||||||
// Display timeline details
|
// Display timeline details
|
||||||
if len(history.Timelines) > 0 {
|
if len(history.Timelines) > 0 {
|
||||||
fmt.Println("\nTimeline Details:")
|
fmt.Println("\nTimeline Details:")
|
||||||
fmt.Println("═════════════════")
|
fmt.Println("=================")
|
||||||
for _, tl := range history.Timelines {
|
for _, tl := range history.Timelines {
|
||||||
fmt.Printf("\nTimeline %d:\n", tl.TimelineID)
|
fmt.Printf("\nTimeline %d:\n", tl.TimelineID)
|
||||||
if tl.ParentTimeline > 0 {
|
if tl.ParentTimeline > 0 {
|
||||||
@@ -690,7 +690,7 @@ func runWALTimeline(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Printf(" Created: %s\n", tl.CreatedAt.Format("2006-01-02 15:04:05"))
|
fmt.Printf(" Created: %s\n", tl.CreatedAt.Format("2006-01-02 15:04:05"))
|
||||||
}
|
}
|
||||||
if tl.TimelineID == history.CurrentTimeline {
|
if tl.TimelineID == history.CurrentTimeline {
|
||||||
fmt.Printf(" Status: ⚡ CURRENT\n")
|
fmt.Printf(" Status: [CURR] CURRENT\n")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -759,15 +759,15 @@ func runBinlogList(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Printf(" Binary Log Files (%s)\n", bm.ServerType())
|
fmt.Printf(" Binary Log Files (%s)\n", bm.ServerType())
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
if len(binlogs) > 0 {
|
if len(binlogs) > 0 {
|
||||||
fmt.Println("Source Directory:")
|
fmt.Println("Source Directory:")
|
||||||
fmt.Printf("%-24s %10s %-19s %-19s %s\n", "Filename", "Size", "Start Time", "End Time", "Format")
|
fmt.Printf("%-24s %10s %-19s %-19s %s\n", "Filename", "Size", "Start Time", "End Time", "Format")
|
||||||
fmt.Println("────────────────────────────────────────────────────────────────────────────────")
|
fmt.Println("--------------------------------------------------------------------------------")
|
||||||
|
|
||||||
var totalSize int64
|
var totalSize int64
|
||||||
for _, b := range binlogs {
|
for _, b := range binlogs {
|
||||||
@@ -797,7 +797,7 @@ func runBinlogList(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("Archived Binlogs:")
|
fmt.Println("Archived Binlogs:")
|
||||||
fmt.Printf("%-24s %10s %-19s %s\n", "Original", "Size", "Archived At", "Flags")
|
fmt.Printf("%-24s %10s %-19s %s\n", "Original", "Size", "Archived At", "Flags")
|
||||||
fmt.Println("────────────────────────────────────────────────────────────────────────────────")
|
fmt.Println("--------------------------------------------------------------------------------")
|
||||||
|
|
||||||
var totalSize int64
|
var totalSize int64
|
||||||
for _, a := range archived {
|
for _, a := range archived {
|
||||||
@@ -914,7 +914,7 @@ func runBinlogArchive(cmd *cobra.Command, args []string) error {
|
|||||||
bm.SaveArchiveMetadata(allArchived)
|
bm.SaveArchiveMetadata(allArchived)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("✅ Binlog archiving completed", "archived", len(newArchives))
|
log.Info("[OK] Binlog archiving completed", "archived", len(newArchives))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1014,15 +1014,15 @@ func runBinlogValidate(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("validating binlog chain: %w", err)
|
return fmt.Errorf("validating binlog chain: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Println(" Binlog Chain Validation")
|
fmt.Println(" Binlog Chain Validation")
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
if validation.Valid {
|
if validation.Valid {
|
||||||
fmt.Println("Status: ✅ VALID - Binlog chain is complete")
|
fmt.Println("Status: [OK] VALID - Binlog chain is complete")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("Status: ❌ INVALID - Binlog chain has gaps")
|
fmt.Println("Status: [FAIL] INVALID - Binlog chain has gaps")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Files: %d binlog files\n", validation.LogCount)
|
fmt.Printf("Files: %d binlog files\n", validation.LogCount)
|
||||||
@@ -1055,7 +1055,7 @@ func runBinlogValidate(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("Errors:")
|
fmt.Println("Errors:")
|
||||||
for _, e := range validation.Errors {
|
for _, e := range validation.Errors {
|
||||||
fmt.Printf(" ✗ %s\n", e)
|
fmt.Printf(" [FAIL] %s\n", e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1094,9 +1094,9 @@ func runBinlogPosition(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Println(" Current Binary Log Position")
|
fmt.Println(" Current Binary Log Position")
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
if rows.Next() {
|
if rows.Next() {
|
||||||
@@ -1178,24 +1178,24 @@ func runMySQLPITRStatus(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("getting PITR status: %w", err)
|
return fmt.Errorf("getting PITR status: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Printf(" MySQL/MariaDB PITR Status (%s)\n", status.DatabaseType)
|
fmt.Printf(" MySQL/MariaDB PITR Status (%s)\n", status.DatabaseType)
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
if status.Enabled {
|
if status.Enabled {
|
||||||
fmt.Println("PITR Status: ✅ ENABLED")
|
fmt.Println("PITR Status: [OK] ENABLED")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("PITR Status: ❌ NOT CONFIGURED")
|
fmt.Println("PITR Status: [FAIL] NOT CONFIGURED")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get binary logging status
|
// Get binary logging status
|
||||||
var logBin string
|
var logBin string
|
||||||
db.QueryRowContext(ctx, "SELECT @@log_bin").Scan(&logBin)
|
db.QueryRowContext(ctx, "SELECT @@log_bin").Scan(&logBin)
|
||||||
if logBin == "1" || logBin == "ON" {
|
if logBin == "1" || logBin == "ON" {
|
||||||
fmt.Println("Binary Logging: ✅ ENABLED")
|
fmt.Println("Binary Logging: [OK] ENABLED")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("Binary Logging: ❌ DISABLED")
|
fmt.Println("Binary Logging: [FAIL] DISABLED")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Binlog Format: %s\n", status.LogLevel)
|
fmt.Printf("Binlog Format: %s\n", status.LogLevel)
|
||||||
@@ -1205,14 +1205,14 @@ func runMySQLPITRStatus(cmd *cobra.Command, args []string) error {
|
|||||||
if status.DatabaseType == pitr.DatabaseMariaDB {
|
if status.DatabaseType == pitr.DatabaseMariaDB {
|
||||||
db.QueryRowContext(ctx, "SELECT @@gtid_current_pos").Scan(>idMode)
|
db.QueryRowContext(ctx, "SELECT @@gtid_current_pos").Scan(>idMode)
|
||||||
if gtidMode != "" {
|
if gtidMode != "" {
|
||||||
fmt.Println("GTID Mode: ✅ ENABLED")
|
fmt.Println("GTID Mode: [OK] ENABLED")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("GTID Mode: ❌ DISABLED")
|
fmt.Println("GTID Mode: [FAIL] DISABLED")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
db.QueryRowContext(ctx, "SELECT @@gtid_mode").Scan(>idMode)
|
db.QueryRowContext(ctx, "SELECT @@gtid_mode").Scan(>idMode)
|
||||||
if gtidMode == "ON" {
|
if gtidMode == "ON" {
|
||||||
fmt.Println("GTID Mode: ✅ ENABLED")
|
fmt.Println("GTID Mode: [OK] ENABLED")
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("GTID Mode: %s\n", gtidMode)
|
fmt.Printf("GTID Mode: %s\n", gtidMode)
|
||||||
}
|
}
|
||||||
@@ -1237,12 +1237,12 @@ func runMySQLPITRStatus(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("PITR Requirements:")
|
fmt.Println("PITR Requirements:")
|
||||||
if logBin == "1" || logBin == "ON" {
|
if logBin == "1" || logBin == "ON" {
|
||||||
fmt.Println(" ✅ Binary logging enabled")
|
fmt.Println(" [OK] Binary logging enabled")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println(" ❌ Binary logging must be enabled (log_bin = mysql-bin)")
|
fmt.Println(" [FAIL] Binary logging must be enabled (log_bin = mysql-bin)")
|
||||||
}
|
}
|
||||||
if status.LogLevel == "ROW" {
|
if status.LogLevel == "ROW" {
|
||||||
fmt.Println(" ✅ Row-based logging (recommended)")
|
fmt.Println(" [OK] Row-based logging (recommended)")
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" ⚠ binlog_format = %s (ROW recommended for PITR)\n", status.LogLevel)
|
fmt.Printf(" ⚠ binlog_format = %s (ROW recommended for PITR)\n", status.LogLevel)
|
||||||
}
|
}
|
||||||
@@ -1299,7 +1299,7 @@ func runMySQLPITREnable(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("enabling PITR: %w", err)
|
return fmt.Errorf("enabling PITR: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("✅ MySQL PITR enabled successfully!")
|
log.Info("[OK] MySQL PITR enabled successfully!")
|
||||||
log.Info("")
|
log.Info("")
|
||||||
log.Info("Next steps:")
|
log.Info("Next steps:")
|
||||||
log.Info("1. Start binlog archiving: dbbackup binlog watch --archive-dir " + mysqlArchiveDir)
|
log.Info("1. Start binlog archiving: dbbackup binlog watch --archive-dir " + mysqlArchiveDir)
|
||||||
|
|||||||
@@ -141,7 +141,7 @@ func runList(ctx context.Context) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("📦 %s\n", file.Name)
|
fmt.Printf("[FILE] %s\n", file.Name)
|
||||||
fmt.Printf(" Size: %s\n", formatFileSize(stat.Size()))
|
fmt.Printf(" Size: %s\n", formatFileSize(stat.Size()))
|
||||||
fmt.Printf(" Modified: %s\n", stat.ModTime().Format("2006-01-02 15:04:05"))
|
fmt.Printf(" Modified: %s\n", stat.ModTime().Format("2006-01-02 15:04:05"))
|
||||||
fmt.Printf(" Type: %s\n", getBackupType(file.Name))
|
fmt.Printf(" Type: %s\n", getBackupType(file.Name))
|
||||||
@@ -237,56 +237,56 @@ func runPreflight(ctx context.Context) error {
|
|||||||
totalChecks := 6
|
totalChecks := 6
|
||||||
|
|
||||||
// 1. Database connectivity check
|
// 1. Database connectivity check
|
||||||
fmt.Print("🔗 Database connectivity... ")
|
fmt.Print("[1] Database connectivity... ")
|
||||||
if err := testDatabaseConnection(); err != nil {
|
if err := testDatabaseConnection(); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Required tools check
|
// 2. Required tools check
|
||||||
fmt.Print("🛠️ Required tools (pg_dump/pg_restore)... ")
|
fmt.Print("[2] Required tools (pg_dump/pg_restore)... ")
|
||||||
if err := checkRequiredTools(); err != nil {
|
if err := checkRequiredTools(); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. Backup directory check
|
// 3. Backup directory check
|
||||||
fmt.Print("📁 Backup directory access... ")
|
fmt.Print("[3] Backup directory access... ")
|
||||||
if err := checkBackupDirectory(); err != nil {
|
if err := checkBackupDirectory(); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. Disk space check
|
// 4. Disk space check
|
||||||
fmt.Print("💾 Available disk space... ")
|
fmt.Print("[4] Available disk space... ")
|
||||||
if err := checkDiskSpace(); err != nil {
|
if err := checkDiskSpace(); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 5. Permissions check
|
// 5. Permissions check
|
||||||
fmt.Print("🔐 File permissions... ")
|
fmt.Print("[5] File permissions... ")
|
||||||
if err := checkPermissions(); err != nil {
|
if err := checkPermissions(); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6. CPU/Memory resources check
|
// 6. CPU/Memory resources check
|
||||||
fmt.Print("🖥️ System resources... ")
|
fmt.Print("[6] System resources... ")
|
||||||
if err := checkSystemResources(); err != nil {
|
if err := checkSystemResources(); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -294,10 +294,10 @@ func runPreflight(ctx context.Context) error {
|
|||||||
fmt.Printf("Results: %d/%d checks passed\n", checksPassed, totalChecks)
|
fmt.Printf("Results: %d/%d checks passed\n", checksPassed, totalChecks)
|
||||||
|
|
||||||
if checksPassed == totalChecks {
|
if checksPassed == totalChecks {
|
||||||
fmt.Println("🎉 All preflight checks passed! System is ready for backup operations.")
|
fmt.Println("[SUCCESS] All preflight checks passed! System is ready for backup operations.")
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("⚠️ %d check(s) failed. Please address the issues before running backups.\n", totalChecks-checksPassed)
|
fmt.Printf("[WARN] %d check(s) failed. Please address the issues before running backups.\n", totalChecks-checksPassed)
|
||||||
return fmt.Errorf("preflight checks failed: %d/%d passed", checksPassed, totalChecks)
|
return fmt.Errorf("preflight checks failed: %d/%d passed", checksPassed, totalChecks)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -414,44 +414,44 @@ func runRestore(ctx context.Context, archiveName string) error {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
// Show warning
|
// Show warning
|
||||||
fmt.Println("⚠️ WARNING: This will restore data to the target database.")
|
fmt.Println("[WARN] WARNING: This will restore data to the target database.")
|
||||||
fmt.Println(" Existing data may be overwritten or merged depending on the restore method.")
|
fmt.Println(" Existing data may be overwritten or merged depending on the restore method.")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
// For safety, show what would be done without actually doing it
|
// For safety, show what would be done without actually doing it
|
||||||
switch archiveType {
|
switch archiveType {
|
||||||
case "Single Database (.dump)":
|
case "Single Database (.dump)":
|
||||||
fmt.Println("🔄 Would execute: pg_restore to restore single database")
|
fmt.Println("[EXEC] Would execute: pg_restore to restore single database")
|
||||||
fmt.Printf(" Command: pg_restore -h %s -p %d -U %s -d %s --verbose %s\n",
|
fmt.Printf(" Command: pg_restore -h %s -p %d -U %s -d %s --verbose %s\n",
|
||||||
cfg.Host, cfg.Port, cfg.User, cfg.Database, archivePath)
|
cfg.Host, cfg.Port, cfg.User, cfg.Database, archivePath)
|
||||||
case "Single Database (.dump.gz)":
|
case "Single Database (.dump.gz)":
|
||||||
fmt.Println("🔄 Would execute: gunzip and pg_restore to restore single database")
|
fmt.Println("[EXEC] Would execute: gunzip and pg_restore to restore single database")
|
||||||
fmt.Printf(" Command: gunzip -c %s | pg_restore -h %s -p %d -U %s -d %s --verbose\n",
|
fmt.Printf(" Command: gunzip -c %s | pg_restore -h %s -p %d -U %s -d %s --verbose\n",
|
||||||
archivePath, cfg.Host, cfg.Port, cfg.User, cfg.Database)
|
archivePath, cfg.Host, cfg.Port, cfg.User, cfg.Database)
|
||||||
case "SQL Script (.sql)":
|
case "SQL Script (.sql)":
|
||||||
if cfg.IsPostgreSQL() {
|
if cfg.IsPostgreSQL() {
|
||||||
fmt.Println("🔄 Would execute: psql to run SQL script")
|
fmt.Println("[EXEC] Would execute: psql to run SQL script")
|
||||||
fmt.Printf(" Command: psql -h %s -p %d -U %s -d %s -f %s\n",
|
fmt.Printf(" Command: psql -h %s -p %d -U %s -d %s -f %s\n",
|
||||||
cfg.Host, cfg.Port, cfg.User, cfg.Database, archivePath)
|
cfg.Host, cfg.Port, cfg.User, cfg.Database, archivePath)
|
||||||
} else if cfg.IsMySQL() {
|
} else if cfg.IsMySQL() {
|
||||||
fmt.Println("🔄 Would execute: mysql to run SQL script")
|
fmt.Println("[EXEC] Would execute: mysql to run SQL script")
|
||||||
fmt.Printf(" Command: %s\n", mysqlRestoreCommand(archivePath, false))
|
fmt.Printf(" Command: %s\n", mysqlRestoreCommand(archivePath, false))
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("🔄 Would execute: SQL client to run script (database type unknown)")
|
fmt.Println("[EXEC] Would execute: SQL client to run script (database type unknown)")
|
||||||
}
|
}
|
||||||
case "SQL Script (.sql.gz)":
|
case "SQL Script (.sql.gz)":
|
||||||
if cfg.IsPostgreSQL() {
|
if cfg.IsPostgreSQL() {
|
||||||
fmt.Println("🔄 Would execute: gunzip and psql to run SQL script")
|
fmt.Println("[EXEC] Would execute: gunzip and psql to run SQL script")
|
||||||
fmt.Printf(" Command: gunzip -c %s | psql -h %s -p %d -U %s -d %s\n",
|
fmt.Printf(" Command: gunzip -c %s | psql -h %s -p %d -U %s -d %s\n",
|
||||||
archivePath, cfg.Host, cfg.Port, cfg.User, cfg.Database)
|
archivePath, cfg.Host, cfg.Port, cfg.User, cfg.Database)
|
||||||
} else if cfg.IsMySQL() {
|
} else if cfg.IsMySQL() {
|
||||||
fmt.Println("🔄 Would execute: gunzip and mysql to run SQL script")
|
fmt.Println("[EXEC] Would execute: gunzip and mysql to run SQL script")
|
||||||
fmt.Printf(" Command: %s\n", mysqlRestoreCommand(archivePath, true))
|
fmt.Printf(" Command: %s\n", mysqlRestoreCommand(archivePath, true))
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("🔄 Would execute: gunzip and SQL client to run script (database type unknown)")
|
fmt.Println("[EXEC] Would execute: gunzip and SQL client to run script (database type unknown)")
|
||||||
}
|
}
|
||||||
case "Cluster Backup (.tar.gz)":
|
case "Cluster Backup (.tar.gz)":
|
||||||
fmt.Println("🔄 Would execute: Extract and restore cluster backup")
|
fmt.Println("[EXEC] Would execute: Extract and restore cluster backup")
|
||||||
fmt.Println(" Steps:")
|
fmt.Println(" Steps:")
|
||||||
fmt.Println(" 1. Extract tar.gz archive")
|
fmt.Println(" 1. Extract tar.gz archive")
|
||||||
fmt.Println(" 2. Restore global objects (roles, tablespaces)")
|
fmt.Println(" 2. Restore global objects (roles, tablespaces)")
|
||||||
@@ -461,7 +461,7 @@ func runRestore(ctx context.Context, archiveName string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("🛡️ SAFETY MODE: Restore command is in preview mode.")
|
fmt.Println("[SAFETY] SAFETY MODE: Restore command is in preview mode.")
|
||||||
fmt.Println(" This shows what would be executed without making changes.")
|
fmt.Println(" This shows what would be executed without making changes.")
|
||||||
fmt.Println(" To enable actual restore, add --confirm flag (not yet implemented).")
|
fmt.Println(" To enable actual restore, add --confirm flag (not yet implemented).")
|
||||||
|
|
||||||
@@ -520,25 +520,25 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
checksPassed := 0
|
checksPassed := 0
|
||||||
|
|
||||||
// Basic file existence and readability
|
// Basic file existence and readability
|
||||||
fmt.Print("📁 File accessibility... ")
|
fmt.Print("[CHK] File accessibility... ")
|
||||||
if file, err := os.Open(archivePath); err != nil {
|
if file, err := os.Open(archivePath); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
file.Close()
|
file.Close()
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
// File size sanity check
|
// File size sanity check
|
||||||
fmt.Print("📏 File size check... ")
|
fmt.Print("[CHK] File size check... ")
|
||||||
if stat.Size() == 0 {
|
if stat.Size() == 0 {
|
||||||
fmt.Println("❌ FAILED: File is empty")
|
fmt.Println("[FAIL] FAILED: File is empty")
|
||||||
} else if stat.Size() < 100 {
|
} else if stat.Size() < 100 {
|
||||||
fmt.Println("⚠️ WARNING: File is very small (< 100 bytes)")
|
fmt.Println("[WARN] WARNING: File is very small (< 100 bytes)")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
@@ -546,51 +546,51 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
// Type-specific verification
|
// Type-specific verification
|
||||||
switch archiveType {
|
switch archiveType {
|
||||||
case "Single Database (.dump)":
|
case "Single Database (.dump)":
|
||||||
fmt.Print("🔍 PostgreSQL dump format check... ")
|
fmt.Print("[CHK] PostgreSQL dump format check... ")
|
||||||
if err := verifyPgDump(archivePath); err != nil {
|
if err := verifyPgDump(archivePath); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
case "Single Database (.dump.gz)":
|
case "Single Database (.dump.gz)":
|
||||||
fmt.Print("🔍 PostgreSQL dump format check (gzip)... ")
|
fmt.Print("[CHK] PostgreSQL dump format check (gzip)... ")
|
||||||
if err := verifyPgDumpGzip(archivePath); err != nil {
|
if err := verifyPgDumpGzip(archivePath); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
case "SQL Script (.sql)":
|
case "SQL Script (.sql)":
|
||||||
fmt.Print("📜 SQL script validation... ")
|
fmt.Print("[CHK] SQL script validation... ")
|
||||||
if err := verifySqlScript(archivePath); err != nil {
|
if err := verifySqlScript(archivePath); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
case "SQL Script (.sql.gz)":
|
case "SQL Script (.sql.gz)":
|
||||||
fmt.Print("📜 SQL script validation (gzip)... ")
|
fmt.Print("[CHK] SQL script validation (gzip)... ")
|
||||||
if err := verifyGzipSqlScript(archivePath); err != nil {
|
if err := verifyGzipSqlScript(archivePath); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
case "Cluster Backup (.tar.gz)":
|
case "Cluster Backup (.tar.gz)":
|
||||||
fmt.Print("📦 Archive extraction test... ")
|
fmt.Print("[CHK] Archive extraction test... ")
|
||||||
if err := verifyTarGz(archivePath); err != nil {
|
if err := verifyTarGz(archivePath); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
@@ -598,11 +598,11 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
|
|
||||||
// Check for metadata file
|
// Check for metadata file
|
||||||
metadataPath := archivePath + ".info"
|
metadataPath := archivePath + ".info"
|
||||||
fmt.Print("📋 Metadata file check... ")
|
fmt.Print("[CHK] Metadata file check... ")
|
||||||
if _, err := os.Stat(metadataPath); os.IsNotExist(err) {
|
if _, err := os.Stat(metadataPath); os.IsNotExist(err) {
|
||||||
fmt.Println("⚠️ WARNING: No metadata file found")
|
fmt.Println("[WARN] WARNING: No metadata file found")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
@@ -611,13 +611,13 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
fmt.Printf("Verification Results: %d/%d checks passed\n", checksPassed, checksRun)
|
fmt.Printf("Verification Results: %d/%d checks passed\n", checksPassed, checksRun)
|
||||||
|
|
||||||
if checksPassed == checksRun {
|
if checksPassed == checksRun {
|
||||||
fmt.Println("🎉 Archive verification completed successfully!")
|
fmt.Println("[SUCCESS] Archive verification completed successfully!")
|
||||||
return nil
|
return nil
|
||||||
} else if float64(checksPassed)/float64(checksRun) >= 0.8 {
|
} else if float64(checksPassed)/float64(checksRun) >= 0.8 {
|
||||||
fmt.Println("⚠️ Archive verification completed with warnings.")
|
fmt.Println("[WARN] Archive verification completed with warnings.")
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("❌ Archive verification failed. Archive may be corrupted.")
|
fmt.Println("[FAIL] Archive verification failed. Archive may be corrupted.")
|
||||||
return fmt.Errorf("verification failed: %d/%d checks passed", checksPassed, checksRun)
|
return fmt.Errorf("verification failed: %d/%d checks passed", checksPassed, checksRun)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,9 +37,9 @@ var (
|
|||||||
restoreSaveDebugLog string // Path to save debug log on failure
|
restoreSaveDebugLog string // Path to save debug log on failure
|
||||||
|
|
||||||
// Diagnose flags
|
// Diagnose flags
|
||||||
diagnoseJSON bool
|
diagnoseJSON bool
|
||||||
diagnoseDeep bool
|
diagnoseDeep bool
|
||||||
diagnoseKeepTemp bool
|
diagnoseKeepTemp bool
|
||||||
|
|
||||||
// Encryption flags
|
// Encryption flags
|
||||||
restoreEncryptionKeyFile string
|
restoreEncryptionKeyFile string
|
||||||
@@ -342,7 +342,7 @@ func runRestoreDiagnose(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("archive not found: %s", archivePath)
|
return fmt.Errorf("archive not found: %s", archivePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("🔍 Diagnosing backup file", "path", archivePath)
|
log.Info("[DIAG] Diagnosing backup file", "path", archivePath)
|
||||||
|
|
||||||
diagnoser := restore.NewDiagnoser(log, restoreVerbose)
|
diagnoser := restore.NewDiagnoser(log, restoreVerbose)
|
||||||
|
|
||||||
@@ -350,10 +350,11 @@ func runRestoreDiagnose(cmd *cobra.Command, args []string) error {
|
|||||||
format := restore.DetectArchiveFormat(archivePath)
|
format := restore.DetectArchiveFormat(archivePath)
|
||||||
|
|
||||||
if format.IsClusterBackup() && diagnoseDeep {
|
if format.IsClusterBackup() && diagnoseDeep {
|
||||||
// Create temp directory for extraction
|
// Create temp directory for extraction in configured WorkDir
|
||||||
tempDir, err := os.MkdirTemp("", "dbbackup-diagnose-*")
|
workDir := cfg.GetEffectiveWorkDir()
|
||||||
|
tempDir, err := os.MkdirTemp(workDir, "dbbackup-diagnose-*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create temp directory: %w", err)
|
return fmt.Errorf("failed to create temp directory in %s: %w", workDir, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !diagnoseKeepTemp {
|
if !diagnoseKeepTemp {
|
||||||
@@ -386,7 +387,7 @@ func runRestoreDiagnose(cmd *cobra.Command, args []string) error {
|
|||||||
// Summary
|
// Summary
|
||||||
if !diagnoseJSON {
|
if !diagnoseJSON {
|
||||||
fmt.Println("\n" + strings.Repeat("=", 70))
|
fmt.Println("\n" + strings.Repeat("=", 70))
|
||||||
fmt.Printf("📊 CLUSTER SUMMARY: %d databases analyzed\n", len(results))
|
fmt.Printf("[SUMMARY] CLUSTER SUMMARY: %d databases analyzed\n", len(results))
|
||||||
|
|
||||||
validCount := 0
|
validCount := 0
|
||||||
for _, r := range results {
|
for _, r := range results {
|
||||||
@@ -396,9 +397,9 @@ func runRestoreDiagnose(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if validCount == len(results) {
|
if validCount == len(results) {
|
||||||
fmt.Println("✅ All dumps are valid")
|
fmt.Println("[OK] All dumps are valid")
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("❌ %d/%d dumps have issues\n", len(results)-validCount, len(results))
|
fmt.Printf("[FAIL] %d/%d dumps have issues\n", len(results)-validCount, len(results))
|
||||||
}
|
}
|
||||||
fmt.Println(strings.Repeat("=", 70))
|
fmt.Println(strings.Repeat("=", 70))
|
||||||
}
|
}
|
||||||
@@ -425,7 +426,7 @@ func runRestoreDiagnose(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("backup file has validation errors")
|
return fmt.Errorf("backup file has validation errors")
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("✅ Backup file appears valid")
|
log.Info("[OK] Backup file appears valid")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -544,7 +545,7 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
|
|||||||
isDryRun := restoreDryRun || !restoreConfirm
|
isDryRun := restoreDryRun || !restoreConfirm
|
||||||
|
|
||||||
if isDryRun {
|
if isDryRun {
|
||||||
fmt.Println("\n🔍 DRY-RUN MODE - No changes will be made")
|
fmt.Println("\n[DRY-RUN] DRY-RUN MODE - No changes will be made")
|
||||||
fmt.Printf("\nWould restore:\n")
|
fmt.Printf("\nWould restore:\n")
|
||||||
fmt.Printf(" Archive: %s\n", archivePath)
|
fmt.Printf(" Archive: %s\n", archivePath)
|
||||||
fmt.Printf(" Format: %s\n", format.String())
|
fmt.Printf(" Format: %s\n", format.String())
|
||||||
@@ -564,7 +565,7 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// Create restore engine
|
// Create restore engine
|
||||||
engine := restore.New(cfg, log, db)
|
engine := restore.New(cfg, log, db)
|
||||||
|
|
||||||
// Enable debug logging if requested
|
// Enable debug logging if requested
|
||||||
if restoreSaveDebugLog != "" {
|
if restoreSaveDebugLog != "" {
|
||||||
engine.SetDebugLogPath(restoreSaveDebugLog)
|
engine.SetDebugLogPath(restoreSaveDebugLog)
|
||||||
@@ -587,18 +588,18 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// Run pre-restore diagnosis if requested
|
// Run pre-restore diagnosis if requested
|
||||||
if restoreDiagnose {
|
if restoreDiagnose {
|
||||||
log.Info("🔍 Running pre-restore diagnosis...")
|
log.Info("[DIAG] Running pre-restore diagnosis...")
|
||||||
|
|
||||||
diagnoser := restore.NewDiagnoser(log, restoreVerbose)
|
diagnoser := restore.NewDiagnoser(log, restoreVerbose)
|
||||||
result, err := diagnoser.DiagnoseFile(archivePath)
|
result, err := diagnoser.DiagnoseFile(archivePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("diagnosis failed: %w", err)
|
return fmt.Errorf("diagnosis failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
diagnoser.PrintDiagnosis(result)
|
diagnoser.PrintDiagnosis(result)
|
||||||
|
|
||||||
if !result.IsValid {
|
if !result.IsValid {
|
||||||
log.Error("❌ Pre-restore diagnosis found issues")
|
log.Error("[FAIL] Pre-restore diagnosis found issues")
|
||||||
if result.IsTruncated {
|
if result.IsTruncated {
|
||||||
log.Error(" The backup file appears to be TRUNCATED")
|
log.Error(" The backup file appears to be TRUNCATED")
|
||||||
}
|
}
|
||||||
@@ -606,13 +607,13 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
|
|||||||
log.Error(" The backup file appears to be CORRUPTED")
|
log.Error(" The backup file appears to be CORRUPTED")
|
||||||
}
|
}
|
||||||
fmt.Println("\nUse --force to attempt restore anyway.")
|
fmt.Println("\nUse --force to attempt restore anyway.")
|
||||||
|
|
||||||
if !restoreForce {
|
if !restoreForce {
|
||||||
return fmt.Errorf("aborting restore due to backup file issues")
|
return fmt.Errorf("aborting restore due to backup file issues")
|
||||||
}
|
}
|
||||||
log.Warn("Continuing despite diagnosis errors (--force enabled)")
|
log.Warn("Continuing despite diagnosis errors (--force enabled)")
|
||||||
} else {
|
} else {
|
||||||
log.Info("✅ Backup file passed diagnosis")
|
log.Info("[OK] Backup file passed diagnosis")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -632,7 +633,7 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
|
|||||||
// Audit log: restore success
|
// Audit log: restore success
|
||||||
auditLogger.LogRestoreComplete(user, targetDB, time.Since(startTime))
|
auditLogger.LogRestoreComplete(user, targetDB, time.Since(startTime))
|
||||||
|
|
||||||
log.Info("✅ Restore completed successfully", "database", targetDB)
|
log.Info("[OK] Restore completed successfully", "database", targetDB)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -700,7 +701,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Warn("⚠️ Using alternative working directory for extraction")
|
log.Warn("[WARN] Using alternative working directory for extraction")
|
||||||
log.Warn(" This is recommended when system disk space is limited")
|
log.Warn(" This is recommended when system disk space is limited")
|
||||||
log.Warn(" Location: " + restoreWorkdir)
|
log.Warn(" Location: " + restoreWorkdir)
|
||||||
}
|
}
|
||||||
@@ -753,7 +754,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
isDryRun := restoreDryRun || !restoreConfirm
|
isDryRun := restoreDryRun || !restoreConfirm
|
||||||
|
|
||||||
if isDryRun {
|
if isDryRun {
|
||||||
fmt.Println("\n🔍 DRY-RUN MODE - No changes will be made")
|
fmt.Println("\n[DRY-RUN] DRY-RUN MODE - No changes will be made")
|
||||||
fmt.Printf("\nWould restore cluster:\n")
|
fmt.Printf("\nWould restore cluster:\n")
|
||||||
fmt.Printf(" Archive: %s\n", archivePath)
|
fmt.Printf(" Archive: %s\n", archivePath)
|
||||||
fmt.Printf(" Parallel Jobs: %d (0 = auto)\n", restoreJobs)
|
fmt.Printf(" Parallel Jobs: %d (0 = auto)\n", restoreJobs)
|
||||||
@@ -763,7 +764,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
if restoreCleanCluster {
|
if restoreCleanCluster {
|
||||||
fmt.Printf(" Clean Cluster: true (will drop %d existing database(s))\n", len(existingDBs))
|
fmt.Printf(" Clean Cluster: true (will drop %d existing database(s))\n", len(existingDBs))
|
||||||
if len(existingDBs) > 0 {
|
if len(existingDBs) > 0 {
|
||||||
fmt.Printf("\n⚠️ Databases to be dropped:\n")
|
fmt.Printf("\n[WARN] Databases to be dropped:\n")
|
||||||
for _, dbName := range existingDBs {
|
for _, dbName := range existingDBs {
|
||||||
fmt.Printf(" - %s\n", dbName)
|
fmt.Printf(" - %s\n", dbName)
|
||||||
}
|
}
|
||||||
@@ -775,7 +776,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// Warning for clean-cluster
|
// Warning for clean-cluster
|
||||||
if restoreCleanCluster && len(existingDBs) > 0 {
|
if restoreCleanCluster && len(existingDBs) > 0 {
|
||||||
log.Warn("🔥 Clean cluster mode enabled")
|
log.Warn("[!!] Clean cluster mode enabled")
|
||||||
log.Warn(fmt.Sprintf(" %d existing database(s) will be DROPPED before restore!", len(existingDBs)))
|
log.Warn(fmt.Sprintf(" %d existing database(s) will be DROPPED before restore!", len(existingDBs)))
|
||||||
for _, dbName := range existingDBs {
|
for _, dbName := range existingDBs {
|
||||||
log.Warn(" - " + dbName)
|
log.Warn(" - " + dbName)
|
||||||
@@ -784,7 +785,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// Create restore engine
|
// Create restore engine
|
||||||
engine := restore.New(cfg, log, db)
|
engine := restore.New(cfg, log, db)
|
||||||
|
|
||||||
// Enable debug logging if requested
|
// Enable debug logging if requested
|
||||||
if restoreSaveDebugLog != "" {
|
if restoreSaveDebugLog != "" {
|
||||||
engine.SetDebugLogPath(restoreSaveDebugLog)
|
engine.SetDebugLogPath(restoreSaveDebugLog)
|
||||||
@@ -828,21 +829,22 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// Run pre-restore diagnosis if requested
|
// Run pre-restore diagnosis if requested
|
||||||
if restoreDiagnose {
|
if restoreDiagnose {
|
||||||
log.Info("🔍 Running pre-restore diagnosis...")
|
log.Info("[DIAG] Running pre-restore diagnosis...")
|
||||||
|
|
||||||
// Create temp directory for extraction
|
// Create temp directory for extraction in configured WorkDir
|
||||||
diagTempDir, err := os.MkdirTemp("", "dbbackup-diagnose-*")
|
workDir := cfg.GetEffectiveWorkDir()
|
||||||
|
diagTempDir, err := os.MkdirTemp(workDir, "dbbackup-diagnose-*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create temp directory for diagnosis: %w", err)
|
return fmt.Errorf("failed to create temp directory for diagnosis in %s: %w", workDir, err)
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(diagTempDir)
|
defer os.RemoveAll(diagTempDir)
|
||||||
|
|
||||||
diagnoser := restore.NewDiagnoser(log, restoreVerbose)
|
diagnoser := restore.NewDiagnoser(log, restoreVerbose)
|
||||||
results, err := diagnoser.DiagnoseClusterDumps(archivePath, diagTempDir)
|
results, err := diagnoser.DiagnoseClusterDumps(archivePath, diagTempDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("diagnosis failed: %w", err)
|
return fmt.Errorf("diagnosis failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for any invalid dumps
|
// Check for any invalid dumps
|
||||||
var invalidDumps []string
|
var invalidDumps []string
|
||||||
for _, result := range results {
|
for _, result := range results {
|
||||||
@@ -851,24 +853,24 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
diagnoser.PrintDiagnosis(result)
|
diagnoser.PrintDiagnosis(result)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(invalidDumps) > 0 {
|
if len(invalidDumps) > 0 {
|
||||||
log.Error("❌ Pre-restore diagnosis found issues",
|
log.Error("[FAIL] Pre-restore diagnosis found issues",
|
||||||
"invalid_dumps", len(invalidDumps),
|
"invalid_dumps", len(invalidDumps),
|
||||||
"total_dumps", len(results))
|
"total_dumps", len(results))
|
||||||
fmt.Println("\n⚠️ The following dumps have issues and will likely fail during restore:")
|
fmt.Println("\n[WARN] The following dumps have issues and will likely fail during restore:")
|
||||||
for _, name := range invalidDumps {
|
for _, name := range invalidDumps {
|
||||||
fmt.Printf(" - %s\n", name)
|
fmt.Printf(" - %s\n", name)
|
||||||
}
|
}
|
||||||
fmt.Println("\nRun 'dbbackup restore diagnose <archive> --deep' for full details.")
|
fmt.Println("\nRun 'dbbackup restore diagnose <archive> --deep' for full details.")
|
||||||
fmt.Println("Use --force to attempt restore anyway.")
|
fmt.Println("Use --force to attempt restore anyway.")
|
||||||
|
|
||||||
if !restoreForce {
|
if !restoreForce {
|
||||||
return fmt.Errorf("aborting restore due to %d invalid dump(s)", len(invalidDumps))
|
return fmt.Errorf("aborting restore due to %d invalid dump(s)", len(invalidDumps))
|
||||||
}
|
}
|
||||||
log.Warn("Continuing despite diagnosis errors (--force enabled)")
|
log.Warn("Continuing despite diagnosis errors (--force enabled)")
|
||||||
} else {
|
} else {
|
||||||
log.Info("✅ All dumps passed diagnosis", "count", len(results))
|
log.Info("[OK] All dumps passed diagnosis", "count", len(results))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -888,7 +890,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
// Audit log: restore success
|
// Audit log: restore success
|
||||||
auditLogger.LogRestoreComplete(user, "all_databases", time.Since(startTime))
|
auditLogger.LogRestoreComplete(user, "all_databases", time.Since(startTime))
|
||||||
|
|
||||||
log.Info("✅ Cluster restore completed successfully")
|
log.Info("[OK] Cluster restore completed successfully")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -937,7 +939,7 @@ func runRestoreList(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Print header
|
// Print header
|
||||||
fmt.Printf("\n📦 Available backup archives in %s\n\n", backupDir)
|
fmt.Printf("\n[LIST] Available backup archives in %s\n\n", backupDir)
|
||||||
fmt.Printf("%-40s %-25s %-12s %-20s %s\n",
|
fmt.Printf("%-40s %-25s %-12s %-20s %s\n",
|
||||||
"FILENAME", "FORMAT", "SIZE", "MODIFIED", "DATABASE")
|
"FILENAME", "FORMAT", "SIZE", "MODIFIED", "DATABASE")
|
||||||
fmt.Println(strings.Repeat("-", 120))
|
fmt.Println(strings.Repeat("-", 120))
|
||||||
@@ -1054,9 +1056,9 @@ func runRestorePITR(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Display recovery target info
|
// Display recovery target info
|
||||||
log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
log.Info("=====================================================")
|
||||||
log.Info(" Point-in-Time Recovery (PITR)")
|
log.Info(" Point-in-Time Recovery (PITR)")
|
||||||
log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
log.Info("=====================================================")
|
||||||
log.Info("")
|
log.Info("")
|
||||||
log.Info(target.String())
|
log.Info(target.String())
|
||||||
log.Info("")
|
log.Info("")
|
||||||
@@ -1080,6 +1082,6 @@ func runRestorePITR(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("PITR restore failed: %w", err)
|
return fmt.Errorf("PITR restore failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("✅ PITR restore completed successfully")
|
log.Info("[OK] PITR restore completed successfully")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
56
cmd/rto.go
56
cmd/rto.go
@@ -181,13 +181,13 @@ func runRTOStatus(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// Display status
|
// Display status
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("╔═══════════════════════════════════════════════════════════╗")
|
fmt.Println("+-----------------------------------------------------------+")
|
||||||
fmt.Println("║ RTO/RPO STATUS SUMMARY ║")
|
fmt.Println("| RTO/RPO STATUS SUMMARY |")
|
||||||
fmt.Println("╠═══════════════════════════════════════════════════════════╣")
|
fmt.Println("+-----------------------------------------------------------+")
|
||||||
fmt.Printf("║ Target RTO: %-15s Target RPO: %-15s ║\n",
|
fmt.Printf("| Target RTO: %-15s Target RPO: %-15s |\n",
|
||||||
formatDuration(config.TargetRTO),
|
formatDuration(config.TargetRTO),
|
||||||
formatDuration(config.TargetRPO))
|
formatDuration(config.TargetRPO))
|
||||||
fmt.Println("╠═══════════════════════════════════════════════════════════╣")
|
fmt.Println("+-----------------------------------------------------------+")
|
||||||
|
|
||||||
// Compliance status
|
// Compliance status
|
||||||
rpoRate := 0.0
|
rpoRate := 0.0
|
||||||
@@ -199,31 +199,31 @@ func runRTOStatus(cmd *cobra.Command, args []string) error {
|
|||||||
fullRate = float64(summary.FullyCompliant) / float64(summary.TotalDatabases) * 100
|
fullRate = float64(summary.FullyCompliant) / float64(summary.TotalDatabases) * 100
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("║ Databases: %-5d ║\n", summary.TotalDatabases)
|
fmt.Printf("| Databases: %-5d |\n", summary.TotalDatabases)
|
||||||
fmt.Printf("║ RPO Compliant: %-5d (%.0f%%) ║\n", summary.RPOCompliant, rpoRate)
|
fmt.Printf("| RPO Compliant: %-5d (%.0f%%) |\n", summary.RPOCompliant, rpoRate)
|
||||||
fmt.Printf("║ RTO Compliant: %-5d (%.0f%%) ║\n", summary.RTOCompliant, rtoRate)
|
fmt.Printf("| RTO Compliant: %-5d (%.0f%%) |\n", summary.RTOCompliant, rtoRate)
|
||||||
fmt.Printf("║ Fully Compliant: %-3d (%.0f%%) ║\n", summary.FullyCompliant, fullRate)
|
fmt.Printf("| Fully Compliant: %-3d (%.0f%%) |\n", summary.FullyCompliant, fullRate)
|
||||||
|
|
||||||
if summary.CriticalIssues > 0 {
|
if summary.CriticalIssues > 0 {
|
||||||
fmt.Printf("║ ⚠️ Critical Issues: %-3d ║\n", summary.CriticalIssues)
|
fmt.Printf("| [WARN] Critical Issues: %-3d |\n", summary.CriticalIssues)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("╠═══════════════════════════════════════════════════════════╣")
|
fmt.Println("+-----------------------------------------------------------+")
|
||||||
fmt.Printf("║ Average RPO: %-15s Worst: %-15s ║\n",
|
fmt.Printf("| Average RPO: %-15s Worst: %-15s |\n",
|
||||||
formatDuration(summary.AverageRPO),
|
formatDuration(summary.AverageRPO),
|
||||||
formatDuration(summary.WorstRPO))
|
formatDuration(summary.WorstRPO))
|
||||||
fmt.Printf("║ Average RTO: %-15s Worst: %-15s ║\n",
|
fmt.Printf("| Average RTO: %-15s Worst: %-15s |\n",
|
||||||
formatDuration(summary.AverageRTO),
|
formatDuration(summary.AverageRTO),
|
||||||
formatDuration(summary.WorstRTO))
|
formatDuration(summary.WorstRTO))
|
||||||
|
|
||||||
if summary.WorstRPODatabase != "" {
|
if summary.WorstRPODatabase != "" {
|
||||||
fmt.Printf("║ Worst RPO Database: %-38s║\n", summary.WorstRPODatabase)
|
fmt.Printf("| Worst RPO Database: %-38s|\n", summary.WorstRPODatabase)
|
||||||
}
|
}
|
||||||
if summary.WorstRTODatabase != "" {
|
if summary.WorstRTODatabase != "" {
|
||||||
fmt.Printf("║ Worst RTO Database: %-38s║\n", summary.WorstRTODatabase)
|
fmt.Printf("| Worst RTO Database: %-38s|\n", summary.WorstRTODatabase)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("╚═══════════════════════════════════════════════════════════╝")
|
fmt.Println("+-----------------------------------------------------------+")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
// Per-database status
|
// Per-database status
|
||||||
@@ -234,19 +234,19 @@ func runRTOStatus(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Println(strings.Repeat("-", 70))
|
fmt.Println(strings.Repeat("-", 70))
|
||||||
|
|
||||||
for _, a := range analyses {
|
for _, a := range analyses {
|
||||||
status := "✅"
|
status := "[OK]"
|
||||||
if !a.RPOCompliant || !a.RTOCompliant {
|
if !a.RPOCompliant || !a.RTOCompliant {
|
||||||
status = "❌"
|
status = "[FAIL]"
|
||||||
}
|
}
|
||||||
|
|
||||||
rpoStr := formatDuration(a.CurrentRPO)
|
rpoStr := formatDuration(a.CurrentRPO)
|
||||||
rtoStr := formatDuration(a.CurrentRTO)
|
rtoStr := formatDuration(a.CurrentRTO)
|
||||||
|
|
||||||
if !a.RPOCompliant {
|
if !a.RPOCompliant {
|
||||||
rpoStr = "⚠️ " + rpoStr
|
rpoStr = "[WARN] " + rpoStr
|
||||||
}
|
}
|
||||||
if !a.RTOCompliant {
|
if !a.RTOCompliant {
|
||||||
rtoStr = "⚠️ " + rtoStr
|
rtoStr = "[WARN] " + rtoStr
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("%-25s %-12s %-12s %s\n",
|
fmt.Printf("%-25s %-12s %-12s %s\n",
|
||||||
@@ -306,21 +306,21 @@ func runRTOCheck(cmd *cobra.Command, args []string) error {
|
|||||||
exitCode := 0
|
exitCode := 0
|
||||||
for _, a := range analyses {
|
for _, a := range analyses {
|
||||||
if !a.RPOCompliant {
|
if !a.RPOCompliant {
|
||||||
fmt.Printf("❌ %s: RPO violation - current %s exceeds target %s\n",
|
fmt.Printf("[FAIL] %s: RPO violation - current %s exceeds target %s\n",
|
||||||
a.Database,
|
a.Database,
|
||||||
formatDuration(a.CurrentRPO),
|
formatDuration(a.CurrentRPO),
|
||||||
formatDuration(config.TargetRPO))
|
formatDuration(config.TargetRPO))
|
||||||
exitCode = 1
|
exitCode = 1
|
||||||
}
|
}
|
||||||
if !a.RTOCompliant {
|
if !a.RTOCompliant {
|
||||||
fmt.Printf("❌ %s: RTO violation - estimated %s exceeds target %s\n",
|
fmt.Printf("[FAIL] %s: RTO violation - estimated %s exceeds target %s\n",
|
||||||
a.Database,
|
a.Database,
|
||||||
formatDuration(a.CurrentRTO),
|
formatDuration(a.CurrentRTO),
|
||||||
formatDuration(config.TargetRTO))
|
formatDuration(config.TargetRTO))
|
||||||
exitCode = 1
|
exitCode = 1
|
||||||
}
|
}
|
||||||
if a.RPOCompliant && a.RTOCompliant {
|
if a.RPOCompliant && a.RTOCompliant {
|
||||||
fmt.Printf("✅ %s: Compliant (RPO: %s, RTO: %s)\n",
|
fmt.Printf("[OK] %s: Compliant (RPO: %s, RTO: %s)\n",
|
||||||
a.Database,
|
a.Database,
|
||||||
formatDuration(a.CurrentRPO),
|
formatDuration(a.CurrentRPO),
|
||||||
formatDuration(a.CurrentRTO))
|
formatDuration(a.CurrentRTO))
|
||||||
@@ -371,13 +371,13 @@ func outputAnalysisText(analyses []*rto.Analysis) error {
|
|||||||
fmt.Println(strings.Repeat("=", 60))
|
fmt.Println(strings.Repeat("=", 60))
|
||||||
|
|
||||||
// Status
|
// Status
|
||||||
rpoStatus := "✅ Compliant"
|
rpoStatus := "[OK] Compliant"
|
||||||
if !a.RPOCompliant {
|
if !a.RPOCompliant {
|
||||||
rpoStatus = "❌ Violation"
|
rpoStatus = "[FAIL] Violation"
|
||||||
}
|
}
|
||||||
rtoStatus := "✅ Compliant"
|
rtoStatus := "[OK] Compliant"
|
||||||
if !a.RTOCompliant {
|
if !a.RTOCompliant {
|
||||||
rtoStatus = "❌ Violation"
|
rtoStatus = "[FAIL] Violation"
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
@@ -420,7 +420,7 @@ func outputAnalysisText(analyses []*rto.Analysis) error {
|
|||||||
fmt.Println(" Recommendations:")
|
fmt.Println(" Recommendations:")
|
||||||
fmt.Println(strings.Repeat("-", 50))
|
fmt.Println(strings.Repeat("-", 50))
|
||||||
for _, r := range a.Recommendations {
|
for _, r := range a.Recommendations {
|
||||||
icon := "💡"
|
icon := "[TIP]"
|
||||||
switch r.Priority {
|
switch r.Priority {
|
||||||
case rto.PriorityCritical:
|
case rto.PriorityCritical:
|
||||||
icon = "🔴"
|
icon = "🔴"
|
||||||
|
|||||||
@@ -141,7 +141,7 @@ func testConnection(ctx context.Context) error {
|
|||||||
|
|
||||||
// Display results
|
// Display results
|
||||||
fmt.Println("Connection Test Results:")
|
fmt.Println("Connection Test Results:")
|
||||||
fmt.Printf(" Status: Connected ✅\n")
|
fmt.Printf(" Status: Connected [OK]\n")
|
||||||
fmt.Printf(" Version: %s\n", version)
|
fmt.Printf(" Version: %s\n", version)
|
||||||
fmt.Printf(" Databases: %d found\n", len(databases))
|
fmt.Printf(" Databases: %d found\n", len(databases))
|
||||||
|
|
||||||
@@ -167,7 +167,7 @@ func testConnection(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("✅ Status check completed successfully!")
|
fmt.Println("[OK] Status check completed successfully!")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -96,17 +96,17 @@ func runVerifyBackup(cmd *cobra.Command, args []string) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("📁 %s\n", filepath.Base(backupFile))
|
fmt.Printf("[FILE] %s\n", filepath.Base(backupFile))
|
||||||
|
|
||||||
if quickVerify {
|
if quickVerify {
|
||||||
// Quick check: size only
|
// Quick check: size only
|
||||||
err := verification.QuickCheck(backupFile)
|
err := verification.QuickCheck(backupFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf(" ❌ FAILED: %v\n\n", err)
|
fmt.Printf(" [FAIL] FAILED: %v\n\n", err)
|
||||||
failureCount++
|
failureCount++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Printf(" ✅ VALID (quick check)\n\n")
|
fmt.Printf(" [OK] VALID (quick check)\n\n")
|
||||||
successCount++
|
successCount++
|
||||||
} else {
|
} else {
|
||||||
// Full verification with SHA-256
|
// Full verification with SHA-256
|
||||||
@@ -116,7 +116,7 @@ func runVerifyBackup(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if result.Valid {
|
if result.Valid {
|
||||||
fmt.Printf(" ✅ VALID\n")
|
fmt.Printf(" [OK] VALID\n")
|
||||||
if verboseVerify {
|
if verboseVerify {
|
||||||
meta, _ := metadata.Load(backupFile)
|
meta, _ := metadata.Load(backupFile)
|
||||||
fmt.Printf(" Size: %s\n", metadata.FormatSize(meta.SizeBytes))
|
fmt.Printf(" Size: %s\n", metadata.FormatSize(meta.SizeBytes))
|
||||||
@@ -127,7 +127,7 @@ func runVerifyBackup(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
successCount++
|
successCount++
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" ❌ FAILED: %v\n", result.Error)
|
fmt.Printf(" [FAIL] FAILED: %v\n", result.Error)
|
||||||
if verboseVerify {
|
if verboseVerify {
|
||||||
if !result.FileExists {
|
if !result.FileExists {
|
||||||
fmt.Printf(" File does not exist\n")
|
fmt.Printf(" File does not exist\n")
|
||||||
@@ -147,11 +147,11 @@ func runVerifyBackup(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Summary
|
// Summary
|
||||||
fmt.Println(strings.Repeat("─", 50))
|
fmt.Println(strings.Repeat("-", 50))
|
||||||
fmt.Printf("Total: %d backups\n", len(backupFiles))
|
fmt.Printf("Total: %d backups\n", len(backupFiles))
|
||||||
fmt.Printf("✅ Valid: %d\n", successCount)
|
fmt.Printf("[OK] Valid: %d\n", successCount)
|
||||||
if failureCount > 0 {
|
if failureCount > 0 {
|
||||||
fmt.Printf("❌ Failed: %d\n", failureCount)
|
fmt.Printf("[FAIL] Failed: %d\n", failureCount)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -195,16 +195,16 @@ func runVerifyCloudBackup(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
for _, uri := range args {
|
for _, uri := range args {
|
||||||
if !isCloudURI(uri) {
|
if !isCloudURI(uri) {
|
||||||
fmt.Printf("⚠️ Skipping non-cloud URI: %s\n", uri)
|
fmt.Printf("[WARN] Skipping non-cloud URI: %s\n", uri)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("☁️ %s\n", uri)
|
fmt.Printf("[CLOUD] %s\n", uri)
|
||||||
|
|
||||||
// Download and verify
|
// Download and verify
|
||||||
result, err := verifyCloudBackup(cmd.Context(), uri, quickVerify, verboseVerify)
|
result, err := verifyCloudBackup(cmd.Context(), uri, quickVerify, verboseVerify)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf(" ❌ FAILED: %v\n\n", err)
|
fmt.Printf(" [FAIL] FAILED: %v\n\n", err)
|
||||||
failureCount++
|
failureCount++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -212,7 +212,7 @@ func runVerifyCloudBackup(cmd *cobra.Command, args []string) error {
|
|||||||
// Cleanup temp file
|
// Cleanup temp file
|
||||||
defer result.Cleanup()
|
defer result.Cleanup()
|
||||||
|
|
||||||
fmt.Printf(" ✅ VALID\n")
|
fmt.Printf(" [OK] VALID\n")
|
||||||
if verboseVerify && result.MetadataPath != "" {
|
if verboseVerify && result.MetadataPath != "" {
|
||||||
meta, _ := metadata.Load(result.MetadataPath)
|
meta, _ := metadata.Load(result.MetadataPath)
|
||||||
if meta != nil {
|
if meta != nil {
|
||||||
@@ -226,7 +226,7 @@ func runVerifyCloudBackup(cmd *cobra.Command, args []string) error {
|
|||||||
successCount++
|
successCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n✅ Summary: %d valid, %d failed\n", successCount, failureCount)
|
fmt.Printf("\n[OK] Summary: %d valid, %d failed\n", successCount, failureCount)
|
||||||
|
|
||||||
if failureCount > 0 {
|
if failureCount > 0 {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
|||||||
41
go.mod
41
go.mod
@@ -5,15 +5,27 @@ go 1.24.0
|
|||||||
toolchain go1.24.9
|
toolchain go1.24.9
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2
|
cloud.google.com/go/storage v1.57.2
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3
|
||||||
|
github.com/aws/aws-sdk-go-v2 v1.40.0
|
||||||
|
github.com/aws/aws-sdk-go-v2/config v1.32.2
|
||||||
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.2
|
||||||
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.12
|
||||||
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.92.1
|
||||||
github.com/charmbracelet/bubbles v0.21.0
|
github.com/charmbracelet/bubbles v0.21.0
|
||||||
github.com/charmbracelet/bubbletea v1.3.10
|
github.com/charmbracelet/bubbletea v1.3.10
|
||||||
github.com/charmbracelet/lipgloss v1.1.0
|
github.com/charmbracelet/lipgloss v1.1.0
|
||||||
|
github.com/dustin/go-humanize v1.0.1
|
||||||
github.com/go-sql-driver/mysql v1.9.3
|
github.com/go-sql-driver/mysql v1.9.3
|
||||||
github.com/jackc/pgx/v5 v5.7.6
|
github.com/jackc/pgx/v5 v5.7.6
|
||||||
|
github.com/mattn/go-sqlite3 v1.14.32
|
||||||
|
github.com/shirou/gopsutil/v3 v3.24.5
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/spf13/cobra v1.10.1
|
github.com/spf13/cobra v1.10.1
|
||||||
github.com/spf13/pflag v1.0.9
|
github.com/spf13/pflag v1.0.9
|
||||||
|
golang.org/x/crypto v0.43.0
|
||||||
|
google.golang.org/api v0.256.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@@ -24,20 +36,13 @@ require (
|
|||||||
cloud.google.com/go/compute/metadata v0.9.0 // indirect
|
cloud.google.com/go/compute/metadata v0.9.0 // indirect
|
||||||
cloud.google.com/go/iam v1.5.2 // indirect
|
cloud.google.com/go/iam v1.5.2 // indirect
|
||||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||||
cloud.google.com/go/storage v1.57.2 // indirect
|
|
||||||
filippo.io/edwards25519 v1.1.0 // indirect
|
filippo.io/edwards25519 v1.1.0 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 // indirect
|
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 // indirect
|
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 // indirect
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.32.2 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.2 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 // indirect
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.12 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||||
@@ -46,47 +51,58 @@ require (
|
|||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.5 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.5 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.14 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.14 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.92.1 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 // indirect
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 // indirect
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 // indirect
|
||||||
github.com/aws/smithy-go v1.23.2 // indirect
|
github.com/aws/smithy-go v1.23.2 // indirect
|
||||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||||
|
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||||
github.com/charmbracelet/x/ansi v0.10.1 // indirect
|
github.com/charmbracelet/x/ansi v0.10.1 // indirect
|
||||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
|
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
|
||||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||||
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
|
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
|
||||||
github.com/creack/pty v1.1.17 // indirect
|
|
||||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||||
|
github.com/fatih/color v1.18.0 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
github.com/go-jose/go-jose/v4 v4.1.2 // indirect
|
github.com/go-jose/go-jose/v4 v4.1.2 // indirect
|
||||||
github.com/go-logr/logr v1.4.3 // indirect
|
github.com/go-logr/logr v1.4.3 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
|
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||||
github.com/google/s2a-go v0.1.9 // indirect
|
github.com/google/s2a-go v0.1.9 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect
|
github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect
|
||||||
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
|
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
|
||||||
|
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||||
|
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||||
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||||
github.com/mattn/go-sqlite3 v1.14.32 // indirect
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
||||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||||
github.com/muesli/termenv v0.16.0 // indirect
|
github.com/muesli/termenv v0.16.0 // indirect
|
||||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||||
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||||
github.com/rivo/uniseg v0.4.7 // indirect
|
github.com/rivo/uniseg v0.4.7 // indirect
|
||||||
|
github.com/schollz/progressbar/v3 v3.19.0 // indirect
|
||||||
|
github.com/spf13/afero v1.15.0 // indirect
|
||||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||||
|
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||||
|
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||||
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
github.com/zeebo/errs v1.4.0 // indirect
|
github.com/zeebo/errs v1.4.0 // indirect
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
|
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
|
||||||
@@ -97,14 +113,13 @@ require (
|
|||||||
go.opentelemetry.io/otel/sdk v1.37.0 // indirect
|
go.opentelemetry.io/otel/sdk v1.37.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect
|
go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||||
golang.org/x/crypto v0.43.0 // indirect
|
|
||||||
golang.org/x/net v0.46.0 // indirect
|
golang.org/x/net v0.46.0 // indirect
|
||||||
golang.org/x/oauth2 v0.33.0 // indirect
|
golang.org/x/oauth2 v0.33.0 // indirect
|
||||||
golang.org/x/sync v0.18.0 // indirect
|
golang.org/x/sync v0.18.0 // indirect
|
||||||
golang.org/x/sys v0.38.0 // indirect
|
golang.org/x/sys v0.38.0 // indirect
|
||||||
|
golang.org/x/term v0.36.0 // indirect
|
||||||
golang.org/x/text v0.30.0 // indirect
|
golang.org/x/text v0.30.0 // indirect
|
||||||
golang.org/x/time v0.14.0 // indirect
|
golang.org/x/time v0.14.0 // indirect
|
||||||
google.golang.org/api v0.256.0 // indirect
|
|
||||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect
|
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect
|
||||||
|
|||||||
122
go.sum
122
go.sum
@@ -10,36 +10,44 @@ cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdB
|
|||||||
cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
|
cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
|
||||||
cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8=
|
cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8=
|
||||||
cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE=
|
cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE=
|
||||||
|
cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=
|
||||||
|
cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
|
||||||
|
cloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qoboQT1E=
|
||||||
|
cloud.google.com/go/longrunning v0.7.0/go.mod h1:ySn2yXmjbK9Ba0zsQqunhDkYi0+9rlXIwnoAf+h+TPY=
|
||||||
cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM=
|
cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM=
|
||||||
cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U=
|
cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U=
|
||||||
cloud.google.com/go/storage v1.57.2 h1:sVlym3cHGYhrp6XZKkKb+92I1V42ks2qKKpB0CF5Mb4=
|
cloud.google.com/go/storage v1.57.2 h1:sVlym3cHGYhrp6XZKkKb+92I1V42ks2qKKpB0CF5Mb4=
|
||||||
cloud.google.com/go/storage v1.57.2/go.mod h1:n5ijg4yiRXXpCu0sJTD6k+eMf7GRrJmPyr9YxLXGHOk=
|
cloud.google.com/go/storage v1.57.2/go.mod h1:n5ijg4yiRXXpCu0sJTD6k+eMf7GRrJmPyr9YxLXGHOk=
|
||||||
|
cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4=
|
||||||
|
cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI=
|
||||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 h1:KpMC6LFL7mqpExyMC9jVOYRiVhLmamjeZfRsUpB7l4s=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 h1:ZJJNFaQ86GVKQ9ehwqyAFE6pIfyicpuJ8IkVaPBc6/4=
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 h1:ZJJNFaQ86GVKQ9ehwqyAFE6pIfyicpuJ8IkVaPBc6/4=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3/go.mod h1:URuDvhmATVKqHBH9/0nOiNKk0+YcwfQ3WkK5PqHKxc8=
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3/go.mod h1:URuDvhmATVKqHBH9/0nOiNKk0+YcwfQ3WkK5PqHKxc8=
|
||||||
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
|
||||||
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4=
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4=
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw=
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw=
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM=
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM=
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs=
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs=
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg=
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ=
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ=
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ=
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo=
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo=
|
||||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
|
|
||||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
|
|
||||||
github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc=
|
github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
|
github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 h1:DHctwEM8P8iTXFxC/QK0MRjwEpWQeM9yzidCRjldUz0=
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 h1:DHctwEM8P8iTXFxC/QK0MRjwEpWQeM9yzidCRjldUz0=
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3/go.mod h1:xdCzcZEtnSTKVDOmUZs4l/j3pSV6rpo1WXl5ugNsL8Y=
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3/go.mod h1:xdCzcZEtnSTKVDOmUZs4l/j3pSV6rpo1WXl5ugNsL8Y=
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.32.1 h1:iODUDLgk3q8/flEC7ymhmxjfoAnBDwEEYEVyKZ9mzjU=
|
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.32.1/go.mod h1:xoAgo17AGrPpJBSLg81W+ikM0cpOZG8ad04T2r+d5P0=
|
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk=
|
github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk=
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI=
|
github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI=
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.1 h1:JeW+EwmtTE0yXFK8SmklrFh/cGTTXsQJumgMZNlbxfM=
|
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.1/go.mod h1:BOoXiStwTF+fT2XufhO0Efssbi1CNIO/ZXpZu87N0pw=
|
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4=
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4=
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g=
|
github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA=
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA=
|
||||||
@@ -62,30 +70,22 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE
|
|||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E=
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.14 h1:FzQE21lNtUor0Fb7QNgnEyiRCBlolLTX/Z1j65S7teM=
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.14 h1:FzQE21lNtUor0Fb7QNgnEyiRCBlolLTX/Z1j65S7teM=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.14/go.mod h1:s1ydyWG9pm3ZwmmYN21HKyG9WzAZhYVW85wMHs5FV6w=
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.14/go.mod h1:s1ydyWG9pm3ZwmmYN21HKyG9WzAZhYVW85wMHs5FV6w=
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.92.0 h1:8FshVvnV2sr9kOSAbOnc/vwVmmAwMjOedKH6JW2ddPM=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.92.0/go.mod h1:wYNqY3L02Z3IgRYxOBPH9I1zD9Cjh9hI5QOy/eOjQvw=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.92.1 h1:OgQy/+0+Kc3khtqiEOk23xQAglXi3Tj0y5doOxbi5tg=
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.92.1 h1:OgQy/+0+Kc3khtqiEOk23xQAglXi3Tj0y5doOxbi5tg=
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.92.1/go.mod h1:wYNqY3L02Z3IgRYxOBPH9I1zD9Cjh9hI5QOy/eOjQvw=
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.92.1/go.mod h1:wYNqY3L02Z3IgRYxOBPH9I1zD9Cjh9hI5QOy/eOjQvw=
|
||||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.1 h1:BDgIUYGEo5TkayOWv/oBLPphWwNm/A91AebUjAu5L5g=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.1/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs=
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs=
|
||||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk=
|
github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.4 h1:U//SlnkE1wOQiIImxzdY5PXat4Wq+8rlfVEw4Y7J8as=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.4/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ=
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw=
|
github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw=
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.9 h1:LU8S9W/mPDAU9q0FjCLi0TrCheLMGwzbRpvUMwYspcA=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.9/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8=
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8=
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug=
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.1 h1:GdGmKtG+/Krag7VfyOXV17xjTCz0i9NT+JnqLTOI5nA=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.1/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso=
|
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY=
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso=
|
github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso=
|
||||||
github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
|
github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
|
||||||
github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||||
|
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||||
|
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs=
|
github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs=
|
||||||
@@ -105,17 +105,24 @@ github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNE
|
|||||||
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls=
|
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls=
|
||||||
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||||
github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI=
|
|
||||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
|
||||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
|
github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
|
||||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
|
github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
|
||||||
|
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
|
||||||
|
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
|
||||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
|
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
|
||||||
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
|
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
|
||||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
||||||
|
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||||
|
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||||
github.com/go-jose/go-jose/v4 v4.1.2 h1:TK/7NqRQZfgAh+Td8AlsrvtPoUyiHh0LqVvokh+1vHI=
|
github.com/go-jose/go-jose/v4 v4.1.2 h1:TK/7NqRQZfgAh+Td8AlsrvtPoUyiHh0LqVvokh+1vHI=
|
||||||
@@ -125,8 +132,19 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
|||||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
|
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||||
|
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||||
github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
|
github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
|
||||||
github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
|
github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||||
|
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||||
|
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||||
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
|
github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=
|
||||||
|
github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=
|
||||||
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
|
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
|
||||||
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
|
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
@@ -135,6 +153,10 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAV
|
|||||||
github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
|
github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
|
||||||
github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo=
|
github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo=
|
||||||
github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc=
|
github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc=
|
||||||
|
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||||
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||||
@@ -145,8 +167,15 @@ github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
|
|||||||
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
||||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||||
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||||
|
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||||
|
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||||
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
|
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
|
||||||
@@ -155,22 +184,35 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T
|
|||||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
|
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
|
||||||
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||||
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
|
||||||
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
|
||||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
||||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
||||||
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
||||||
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
||||||
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
|
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
|
||||||
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
|
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
|
||||||
|
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||||
|
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
|
||||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||||
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/schollz/progressbar/v3 v3.19.0 h1:Ea18xuIRQXLAUidVDox3AbwfUhD0/1IvohyTutOIFoc=
|
||||||
|
github.com/schollz/progressbar/v3 v3.19.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec=
|
||||||
|
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
|
||||||
|
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
|
||||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
|
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||||
|
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||||
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
||||||
@@ -179,13 +221,17 @@ github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8W
|
|||||||
github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
|
github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
|
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||||
|
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||||
|
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||||
|
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||||
|
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||||
|
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||||
github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
|
github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
|
||||||
github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
|
github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||||
@@ -198,6 +244,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6h
|
|||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
|
||||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||||
|
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=
|
||||||
|
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=
|
||||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||||
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
||||||
@@ -206,43 +254,35 @@ go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFh
|
|||||||
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
|
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
|
||||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
|
||||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
|
||||||
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
|
|
||||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
|
||||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
||||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||||
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E=
|
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E=
|
||||||
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
|
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
|
||||||
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
|
|
||||||
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
|
||||||
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
|
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
|
||||||
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
|
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
|
||||||
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
|
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
|
||||||
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
|
||||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
|
||||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
|
||||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
|
||||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
|
||||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
|
||||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||||
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
|
||||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
|
||||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||||
|
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||||
google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI=
|
google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI=
|
||||||
google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964=
|
google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964=
|
||||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4=
|
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4=
|
||||||
|
|||||||
1303
grafana/dbbackup-dashboard.json
Normal file
1303
grafana/dbbackup-dashboard.json
Normal file
@@ -0,0 +1,1303 @@
|
|||||||
|
{
|
||||||
|
"annotations": {
|
||||||
|
"list": [
|
||||||
|
{
|
||||||
|
"builtIn": 1,
|
||||||
|
"datasource": {
|
||||||
|
"type": "grafana",
|
||||||
|
"uid": "-- Grafana --"
|
||||||
|
},
|
||||||
|
"enable": true,
|
||||||
|
"hide": true,
|
||||||
|
"iconColor": "rgba(0, 211, 255, 1)",
|
||||||
|
"name": "Annotations & Alerts",
|
||||||
|
"type": "dashboard"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"editable": true,
|
||||||
|
"fiscalYearStartMonth": 0,
|
||||||
|
"graphTooltip": 0,
|
||||||
|
"id": null,
|
||||||
|
"links": [],
|
||||||
|
"liveNow": false,
|
||||||
|
"panels": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [
|
||||||
|
{
|
||||||
|
"options": {
|
||||||
|
"0": {
|
||||||
|
"color": "red",
|
||||||
|
"index": 1,
|
||||||
|
"text": "FAILED"
|
||||||
|
},
|
||||||
|
"1": {
|
||||||
|
"color": "green",
|
||||||
|
"index": 0,
|
||||||
|
"text": "SUCCESS"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "value"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 4,
|
||||||
|
"w": 6,
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"id": 1,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "background",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_rpo_seconds{instance=~\"$instance\"} < 86400",
|
||||||
|
"legendFormat": "{{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Last Backup Status",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "yellow",
|
||||||
|
"value": 43200
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 86400
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "s"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 4,
|
||||||
|
"w": 6,
|
||||||
|
"x": 6,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"id": 2,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_rpo_seconds{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "{{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Time Since Last Backup",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 4,
|
||||||
|
"w": 6,
|
||||||
|
"x": 12,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"id": 3,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_backup_total{instance=~\"$instance\", status=\"success\"}",
|
||||||
|
"legendFormat": "{{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Total Successful Backups",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 4,
|
||||||
|
"w": 6,
|
||||||
|
"x": 18,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"id": 4,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_backup_total{instance=~\"$instance\", status=\"failure\"}",
|
||||||
|
"legendFormat": "{{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Total Failed Backups",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 10,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "line"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 86400
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "s"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 0,
|
||||||
|
"y": 4
|
||||||
|
},
|
||||||
|
"id": 5,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_rpo_seconds{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "{{instance}} - {{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "RPO Over Time",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "bars",
|
||||||
|
"fillOpacity": 100,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "never",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "bytes"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 12,
|
||||||
|
"y": 4
|
||||||
|
},
|
||||||
|
"id": 6,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_last_backup_size_bytes{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "{{instance}} - {{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Backup Size",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 10,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "s"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 0,
|
||||||
|
"y": 12
|
||||||
|
},
|
||||||
|
"id": 7,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_last_backup_duration_seconds{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "{{instance}} - {{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Backup Duration",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"align": "auto",
|
||||||
|
"cellOptions": {
|
||||||
|
"type": "auto"
|
||||||
|
},
|
||||||
|
"inspect": false
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": [
|
||||||
|
{
|
||||||
|
"matcher": {
|
||||||
|
"id": "byName",
|
||||||
|
"options": "Status"
|
||||||
|
},
|
||||||
|
"properties": [
|
||||||
|
{
|
||||||
|
"id": "mappings",
|
||||||
|
"value": [
|
||||||
|
{
|
||||||
|
"options": {
|
||||||
|
"0": {
|
||||||
|
"color": "red",
|
||||||
|
"index": 1,
|
||||||
|
"text": "FAILED"
|
||||||
|
},
|
||||||
|
"1": {
|
||||||
|
"color": "green",
|
||||||
|
"index": 0,
|
||||||
|
"text": "SUCCESS"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "value"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "custom.cellOptions",
|
||||||
|
"value": {
|
||||||
|
"mode": "basic",
|
||||||
|
"type": "color-background"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"matcher": {
|
||||||
|
"id": "byName",
|
||||||
|
"options": "RPO"
|
||||||
|
},
|
||||||
|
"properties": [
|
||||||
|
{
|
||||||
|
"id": "unit",
|
||||||
|
"value": "s"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "thresholds",
|
||||||
|
"value": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "yellow",
|
||||||
|
"value": 43200
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 86400
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "custom.cellOptions",
|
||||||
|
"value": {
|
||||||
|
"mode": "basic",
|
||||||
|
"type": "color-background"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"matcher": {
|
||||||
|
"id": "byName",
|
||||||
|
"options": "Size"
|
||||||
|
},
|
||||||
|
"properties": [
|
||||||
|
{
|
||||||
|
"id": "unit",
|
||||||
|
"value": "bytes"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 12,
|
||||||
|
"y": 12
|
||||||
|
},
|
||||||
|
"id": 8,
|
||||||
|
"options": {
|
||||||
|
"cellHeight": "sm",
|
||||||
|
"footer": {
|
||||||
|
"countRows": false,
|
||||||
|
"fields": "",
|
||||||
|
"reducer": [
|
||||||
|
"sum"
|
||||||
|
],
|
||||||
|
"show": false
|
||||||
|
},
|
||||||
|
"showHeader": true
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_rpo_seconds{instance=~\"$instance\"} < 86400",
|
||||||
|
"format": "table",
|
||||||
|
"instant": true,
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": false,
|
||||||
|
"refId": "Status"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_rpo_seconds{instance=~\"$instance\"}",
|
||||||
|
"format": "table",
|
||||||
|
"hide": false,
|
||||||
|
"instant": true,
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": false,
|
||||||
|
"refId": "RPO"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_last_backup_size_bytes{instance=~\"$instance\"}",
|
||||||
|
"format": "table",
|
||||||
|
"hide": false,
|
||||||
|
"instant": true,
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": false,
|
||||||
|
"refId": "Size"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Backup Status Overview",
|
||||||
|
"transformations": [
|
||||||
|
{
|
||||||
|
"id": "joinByField",
|
||||||
|
"options": {
|
||||||
|
"byField": "database",
|
||||||
|
"mode": "outer"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "organize",
|
||||||
|
"options": {
|
||||||
|
"excludeByName": {
|
||||||
|
"Time": true,
|
||||||
|
"Time 1": true,
|
||||||
|
"Time 2": true,
|
||||||
|
"Time 3": true,
|
||||||
|
"__name__": true,
|
||||||
|
"__name__ 1": true,
|
||||||
|
"__name__ 2": true,
|
||||||
|
"__name__ 3": true,
|
||||||
|
"instance 1": true,
|
||||||
|
"instance 2": true,
|
||||||
|
"instance 3": true,
|
||||||
|
"job": true,
|
||||||
|
"job 1": true,
|
||||||
|
"job 2": true,
|
||||||
|
"job 3": true
|
||||||
|
},
|
||||||
|
"indexByName": {},
|
||||||
|
"renameByName": {
|
||||||
|
"Value #RPO": "RPO",
|
||||||
|
"Value #Size": "Size",
|
||||||
|
"Value #Status": "Status",
|
||||||
|
"database": "Database",
|
||||||
|
"instance": "Instance"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"type": "table"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"collapsed": false,
|
||||||
|
"gridPos": {
|
||||||
|
"h": 1,
|
||||||
|
"w": 24,
|
||||||
|
"x": 0,
|
||||||
|
"y": 30
|
||||||
|
},
|
||||||
|
"id": 100,
|
||||||
|
"panels": [],
|
||||||
|
"title": "Deduplication Statistics",
|
||||||
|
"type": "row"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "blue",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "percentunit"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 5,
|
||||||
|
"w": 6,
|
||||||
|
"x": 0,
|
||||||
|
"y": 31
|
||||||
|
},
|
||||||
|
"id": 101,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "background",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": ["lastNotNull"],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_dedup_ratio{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Dedup Ratio",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "bytes"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 5,
|
||||||
|
"w": 6,
|
||||||
|
"x": 6,
|
||||||
|
"y": 31
|
||||||
|
},
|
||||||
|
"id": 102,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": ["lastNotNull"],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_dedup_space_saved_bytes{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Space Saved",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "yellow",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "bytes"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 5,
|
||||||
|
"w": 6,
|
||||||
|
"x": 12,
|
||||||
|
"y": 31
|
||||||
|
},
|
||||||
|
"id": 103,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": ["lastNotNull"],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_dedup_disk_usage_bytes{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Disk Usage",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "purple",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "short"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 5,
|
||||||
|
"w": 6,
|
||||||
|
"x": 18,
|
||||||
|
"y": 31
|
||||||
|
},
|
||||||
|
"id": 104,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": ["lastNotNull"],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_dedup_chunks_total{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Total Chunks",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisBorderShow": false,
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 10,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "percentunit"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 0,
|
||||||
|
"y": 36
|
||||||
|
},
|
||||||
|
"id": 105,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_dedup_database_ratio{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "{{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Dedup Ratio by Database",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisBorderShow": false,
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 10,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "bytes"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 12,
|
||||||
|
"y": 36
|
||||||
|
},
|
||||||
|
"id": 106,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_dedup_space_saved_bytes{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "Space Saved",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_dedup_disk_usage_bytes{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "Disk Usage",
|
||||||
|
"range": true,
|
||||||
|
"refId": "B"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Dedup Storage Over Time",
|
||||||
|
"type": "timeseries"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"refresh": "30s",
|
||||||
|
"schemaVersion": 38,
|
||||||
|
"tags": [
|
||||||
|
"dbbackup",
|
||||||
|
"backup",
|
||||||
|
"database",
|
||||||
|
"dedup"
|
||||||
|
],
|
||||||
|
"templating": {
|
||||||
|
"list": [
|
||||||
|
{
|
||||||
|
"current": {
|
||||||
|
"selected": false,
|
||||||
|
"text": "All",
|
||||||
|
"value": "$__all"
|
||||||
|
},
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"definition": "label_values(dbbackup_rpo_seconds, instance)",
|
||||||
|
"hide": 0,
|
||||||
|
"includeAll": true,
|
||||||
|
"label": "Instance",
|
||||||
|
"multi": true,
|
||||||
|
"name": "instance",
|
||||||
|
"options": [],
|
||||||
|
"query": {
|
||||||
|
"query": "label_values(dbbackup_rpo_seconds, instance)",
|
||||||
|
"refId": "StandardVariableQuery"
|
||||||
|
},
|
||||||
|
"refresh": 1,
|
||||||
|
"regex": "",
|
||||||
|
"skipUrlSync": false,
|
||||||
|
"sort": 1,
|
||||||
|
"type": "query"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hide": 2,
|
||||||
|
"name": "DS_PROMETHEUS",
|
||||||
|
"query": "prometheus",
|
||||||
|
"skipUrlSync": false,
|
||||||
|
"type": "datasource"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"time": {
|
||||||
|
"from": "now-24h",
|
||||||
|
"to": "now"
|
||||||
|
},
|
||||||
|
"timepicker": {},
|
||||||
|
"timezone": "",
|
||||||
|
"title": "DBBackup Overview",
|
||||||
|
"uid": "dbbackup-overview",
|
||||||
|
"version": 1,
|
||||||
|
"weekStart": ""
|
||||||
|
}
|
||||||
@@ -2,12 +2,14 @@ package auth
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"dbbackup/internal/config"
|
"dbbackup/internal/config"
|
||||||
)
|
)
|
||||||
@@ -69,7 +71,10 @@ func checkPgHbaConf(user string) AuthMethod {
|
|||||||
|
|
||||||
// findHbaFileViaPostgres asks PostgreSQL for the hba_file location
|
// findHbaFileViaPostgres asks PostgreSQL for the hba_file location
|
||||||
func findHbaFileViaPostgres() string {
|
func findHbaFileViaPostgres() string {
|
||||||
cmd := exec.Command("psql", "-U", "postgres", "-t", "-c", "SHOW hba_file;")
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, "psql", "-U", "postgres", "-t", "-c", "SHOW hba_file;")
|
||||||
output, err := cmd.Output()
|
output, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
@@ -82,8 +87,11 @@ func parsePgHbaConf(path string, user string) AuthMethod {
|
|||||||
// Try with sudo if we can't read directly
|
// Try with sudo if we can't read directly
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Try with sudo
|
// Try with sudo (with timeout)
|
||||||
cmd := exec.Command("sudo", "cat", path)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, "sudo", "cat", path)
|
||||||
output, err := cmd.Output()
|
output, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return AuthUnknown
|
return AuthUnknown
|
||||||
@@ -196,13 +204,13 @@ func CheckAuthenticationMismatch(cfg *config.Config) (bool, string) {
|
|||||||
func buildAuthMismatchMessage(osUser, dbUser string, method AuthMethod) string {
|
func buildAuthMismatchMessage(osUser, dbUser string, method AuthMethod) string {
|
||||||
var msg strings.Builder
|
var msg strings.Builder
|
||||||
|
|
||||||
msg.WriteString("\n⚠️ Authentication Mismatch Detected\n")
|
msg.WriteString("\n[WARN] Authentication Mismatch Detected\n")
|
||||||
msg.WriteString(strings.Repeat("=", 60) + "\n\n")
|
msg.WriteString(strings.Repeat("=", 60) + "\n\n")
|
||||||
|
|
||||||
msg.WriteString(fmt.Sprintf(" PostgreSQL is using '%s' authentication\n", method))
|
msg.WriteString(fmt.Sprintf(" PostgreSQL is using '%s' authentication\n", method))
|
||||||
msg.WriteString(fmt.Sprintf(" OS user '%s' cannot authenticate as DB user '%s'\n\n", osUser, dbUser))
|
msg.WriteString(fmt.Sprintf(" OS user '%s' cannot authenticate as DB user '%s'\n\n", osUser, dbUser))
|
||||||
|
|
||||||
msg.WriteString("💡 Solutions (choose one):\n\n")
|
msg.WriteString("[TIP] Solutions (choose one):\n\n")
|
||||||
|
|
||||||
msg.WriteString(fmt.Sprintf(" 1. Run as matching user:\n"))
|
msg.WriteString(fmt.Sprintf(" 1. Run as matching user:\n"))
|
||||||
msg.WriteString(fmt.Sprintf(" sudo -u %s %s\n\n", dbUser, getCommandLine()))
|
msg.WriteString(fmt.Sprintf(" sudo -u %s %s\n\n", dbUser, getCommandLine()))
|
||||||
@@ -218,7 +226,7 @@ func buildAuthMismatchMessage(osUser, dbUser string, method AuthMethod) string {
|
|||||||
msg.WriteString(" 4. Provide password via flag:\n")
|
msg.WriteString(" 4. Provide password via flag:\n")
|
||||||
msg.WriteString(fmt.Sprintf(" %s --password your_password\n\n", getCommandLine()))
|
msg.WriteString(fmt.Sprintf(" %s --password your_password\n\n", getCommandLine()))
|
||||||
|
|
||||||
msg.WriteString("📝 Note: For production use, ~/.pgpass or PGPASSWORD are recommended\n")
|
msg.WriteString("[NOTE] Note: For production use, ~/.pgpass or PGPASSWORD are recommended\n")
|
||||||
msg.WriteString(" to avoid exposing passwords in command history.\n\n")
|
msg.WriteString(" to avoid exposing passwords in command history.\n\n")
|
||||||
|
|
||||||
msg.WriteString(strings.Repeat("=", 60) + "\n")
|
msg.WriteString(strings.Repeat("=", 60) + "\n")
|
||||||
|
|||||||
@@ -87,20 +87,46 @@ func IsBackupEncrypted(backupPath string) bool {
|
|||||||
return meta.Encrypted
|
return meta.Encrypted
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback: check if file starts with encryption nonce
|
// No metadata found - check file format to determine if encrypted
|
||||||
|
// Known unencrypted formats have specific magic bytes:
|
||||||
|
// - Gzip: 1f 8b
|
||||||
|
// - PGDMP (PostgreSQL custom): 50 47 44 4d 50 (PGDMP)
|
||||||
|
// - Plain SQL: starts with text (-- or SET or CREATE)
|
||||||
|
// - Tar: 75 73 74 61 72 (ustar) at offset 257
|
||||||
|
//
|
||||||
|
// If file doesn't match any known format, it MIGHT be encrypted,
|
||||||
|
// but we return false to avoid false positives. User must provide
|
||||||
|
// metadata file or use --encrypt flag explicitly.
|
||||||
file, err := os.Open(backupPath)
|
file, err := os.Open(backupPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
// Try to read nonce - if it succeeds, likely encrypted
|
header := make([]byte, 6)
|
||||||
nonce := make([]byte, crypto.NonceSize)
|
if n, err := file.Read(header); err != nil || n < 2 {
|
||||||
if n, err := file.Read(nonce); err != nil || n != crypto.NonceSize {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
// Check for known unencrypted formats
|
||||||
|
// Gzip magic: 1f 8b
|
||||||
|
if header[0] == 0x1f && header[1] == 0x8b {
|
||||||
|
return false // Gzip compressed - not encrypted
|
||||||
|
}
|
||||||
|
|
||||||
|
// PGDMP magic (PostgreSQL custom format)
|
||||||
|
if len(header) >= 5 && string(header[:5]) == "PGDMP" {
|
||||||
|
return false // PostgreSQL custom dump - not encrypted
|
||||||
|
}
|
||||||
|
|
||||||
|
// Plain text SQL (starts with --, SET, CREATE, etc.)
|
||||||
|
if header[0] == '-' || header[0] == 'S' || header[0] == 'C' || header[0] == '/' {
|
||||||
|
return false // Plain text SQL - not encrypted
|
||||||
|
}
|
||||||
|
|
||||||
|
// Without metadata, we cannot reliably determine encryption status
|
||||||
|
// Return false to avoid blocking restores with false positives
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecryptBackupFile decrypts an encrypted backup file
|
// DecryptBackupFile decrypts an encrypted backup file
|
||||||
|
|||||||
@@ -443,6 +443,14 @@ func (e *Engine) BackupCluster(ctx context.Context) error {
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer func() { <-semaphore }() // Release
|
defer func() { <-semaphore }() // Release
|
||||||
|
|
||||||
|
// Panic recovery - prevent one database failure from crashing entire cluster backup
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
e.log.Error("Panic in database backup goroutine", "database", name, "panic", r)
|
||||||
|
atomic.AddInt32(&failCount, 1)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Check for cancellation at start of goroutine
|
// Check for cancellation at start of goroutine
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@@ -465,7 +473,7 @@ func (e *Engine) BackupCluster(ctx context.Context) error {
|
|||||||
mu.Lock()
|
mu.Lock()
|
||||||
e.printf(" Database size: %s\n", sizeStr)
|
e.printf(" Database size: %s\n", sizeStr)
|
||||||
if size > 10*1024*1024*1024 { // > 10GB
|
if size > 10*1024*1024*1024 { // > 10GB
|
||||||
e.printf(" ⚠️ Large database detected - this may take a while\n")
|
e.printf(" [WARN] Large database detected - this may take a while\n")
|
||||||
}
|
}
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
}
|
}
|
||||||
@@ -502,40 +510,24 @@ func (e *Engine) BackupCluster(ctx context.Context) error {
|
|||||||
|
|
||||||
cmd := e.db.BuildBackupCommand(name, dumpFile, options)
|
cmd := e.db.BuildBackupCommand(name, dumpFile, options)
|
||||||
|
|
||||||
// Calculate timeout based on database size:
|
// NO TIMEOUT for individual database backups
|
||||||
// - Minimum 2 hours for small databases
|
// Large databases with large objects can take many hours
|
||||||
// - Add 1 hour per 20GB for large databases
|
// The parent context handles cancellation if needed
|
||||||
// - This allows ~69GB database to take up to 5+ hours
|
err := e.executeCommand(ctx, cmd, dumpFile)
|
||||||
timeout := 2 * time.Hour
|
|
||||||
if size, err := e.db.GetDatabaseSize(ctx, name); err == nil {
|
|
||||||
sizeGB := size / (1024 * 1024 * 1024)
|
|
||||||
if sizeGB > 20 {
|
|
||||||
extraHours := (sizeGB / 20) + 1
|
|
||||||
timeout = time.Duration(2+extraHours) * time.Hour
|
|
||||||
mu.Lock()
|
|
||||||
e.printf(" Extended timeout: %v (for %dGB database)\n", timeout, sizeGB)
|
|
||||||
mu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dbCtx, cancel := context.WithTimeout(ctx, timeout)
|
|
||||||
defer cancel()
|
|
||||||
err := e.executeCommand(dbCtx, cmd, dumpFile)
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Warn("Failed to backup database", "database", name, "error", err)
|
e.log.Warn("Failed to backup database", "database", name, "error", err)
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
e.printf(" ⚠️ WARNING: Failed to backup %s: %v\n", name, err)
|
e.printf(" [WARN] WARNING: Failed to backup %s: %v\n", name, err)
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
atomic.AddInt32(&failCount, 1)
|
atomic.AddInt32(&failCount, 1)
|
||||||
} else {
|
} else {
|
||||||
compressedCandidate := strings.TrimSuffix(dumpFile, ".dump") + ".sql.gz"
|
compressedCandidate := strings.TrimSuffix(dumpFile, ".dump") + ".sql.gz"
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
if info, err := os.Stat(compressedCandidate); err == nil {
|
if info, err := os.Stat(compressedCandidate); err == nil {
|
||||||
e.printf(" ✅ Completed %s (%s)\n", name, formatBytes(info.Size()))
|
e.printf(" [OK] Completed %s (%s)\n", name, formatBytes(info.Size()))
|
||||||
} else if info, err := os.Stat(dumpFile); err == nil {
|
} else if info, err := os.Stat(dumpFile); err == nil {
|
||||||
e.printf(" ✅ Completed %s (%s)\n", name, formatBytes(info.Size()))
|
e.printf(" [OK] Completed %s (%s)\n", name, formatBytes(info.Size()))
|
||||||
}
|
}
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
atomic.AddInt32(&successCount, 1)
|
atomic.AddInt32(&successCount, 1)
|
||||||
@@ -614,12 +606,36 @@ func (e *Engine) executeCommandWithProgress(ctx context.Context, cmdArgs []strin
|
|||||||
return fmt.Errorf("failed to start command: %w", err)
|
return fmt.Errorf("failed to start command: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Monitor progress via stderr
|
// Monitor progress via stderr in goroutine
|
||||||
go e.monitorCommandProgress(stderr, tracker)
|
stderrDone := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(stderrDone)
|
||||||
|
e.monitorCommandProgress(stderr, tracker)
|
||||||
|
}()
|
||||||
|
|
||||||
// Wait for command to complete
|
// Wait for command to complete with proper context handling
|
||||||
if err := cmd.Wait(); err != nil {
|
cmdDone := make(chan error, 1)
|
||||||
return fmt.Errorf("backup command failed: %w", err)
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed (success or failure)
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Context cancelled - kill process to unblock
|
||||||
|
e.log.Warn("Backup cancelled - killing process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone // Wait for goroutine to finish
|
||||||
|
cmdErr = ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for stderr reader to finish
|
||||||
|
<-stderrDone
|
||||||
|
|
||||||
|
if cmdErr != nil {
|
||||||
|
return fmt.Errorf("backup command failed: %w", cmdErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -696,8 +712,12 @@ func (e *Engine) executeMySQLWithProgressAndCompression(ctx context.Context, cmd
|
|||||||
return fmt.Errorf("failed to get stderr pipe: %w", err)
|
return fmt.Errorf("failed to get stderr pipe: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start monitoring progress
|
// Start monitoring progress in goroutine
|
||||||
go e.monitorCommandProgress(stderr, tracker)
|
stderrDone := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(stderrDone)
|
||||||
|
e.monitorCommandProgress(stderr, tracker)
|
||||||
|
}()
|
||||||
|
|
||||||
// Start both commands
|
// Start both commands
|
||||||
if err := gzipCmd.Start(); err != nil {
|
if err := gzipCmd.Start(); err != nil {
|
||||||
@@ -705,20 +725,41 @@ func (e *Engine) executeMySQLWithProgressAndCompression(ctx context.Context, cmd
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := dumpCmd.Start(); err != nil {
|
if err := dumpCmd.Start(); err != nil {
|
||||||
|
gzipCmd.Process.Kill()
|
||||||
return fmt.Errorf("failed to start mysqldump: %w", err)
|
return fmt.Errorf("failed to start mysqldump: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for mysqldump to complete
|
// Wait for mysqldump with context handling
|
||||||
if err := dumpCmd.Wait(); err != nil {
|
dumpDone := make(chan error, 1)
|
||||||
return fmt.Errorf("mysqldump failed: %w", err)
|
go func() {
|
||||||
|
dumpDone <- dumpCmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var dumpErr error
|
||||||
|
select {
|
||||||
|
case dumpErr = <-dumpDone:
|
||||||
|
// mysqldump completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.log.Warn("Backup cancelled - killing mysqldump")
|
||||||
|
dumpCmd.Process.Kill()
|
||||||
|
gzipCmd.Process.Kill()
|
||||||
|
<-dumpDone
|
||||||
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait for stderr reader
|
||||||
|
<-stderrDone
|
||||||
|
|
||||||
// Close pipe and wait for gzip
|
// Close pipe and wait for gzip
|
||||||
pipe.Close()
|
pipe.Close()
|
||||||
if err := gzipCmd.Wait(); err != nil {
|
if err := gzipCmd.Wait(); err != nil {
|
||||||
return fmt.Errorf("gzip failed: %w", err)
|
return fmt.Errorf("gzip failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if dumpErr != nil {
|
||||||
|
return fmt.Errorf("mysqldump failed: %w", dumpErr)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -749,19 +790,45 @@ func (e *Engine) executeMySQLWithCompression(ctx context.Context, cmdArgs []stri
|
|||||||
gzipCmd.Stdin = stdin
|
gzipCmd.Stdin = stdin
|
||||||
gzipCmd.Stdout = outFile
|
gzipCmd.Stdout = outFile
|
||||||
|
|
||||||
// Start both commands
|
// Start gzip first
|
||||||
if err := gzipCmd.Start(); err != nil {
|
if err := gzipCmd.Start(); err != nil {
|
||||||
return fmt.Errorf("failed to start gzip: %w", err)
|
return fmt.Errorf("failed to start gzip: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := dumpCmd.Run(); err != nil {
|
// Start mysqldump
|
||||||
return fmt.Errorf("mysqldump failed: %w", err)
|
if err := dumpCmd.Start(); err != nil {
|
||||||
|
gzipCmd.Process.Kill()
|
||||||
|
return fmt.Errorf("failed to start mysqldump: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait for mysqldump with context handling
|
||||||
|
dumpDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
dumpDone <- dumpCmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var dumpErr error
|
||||||
|
select {
|
||||||
|
case dumpErr = <-dumpDone:
|
||||||
|
// mysqldump completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.log.Warn("Backup cancelled - killing mysqldump")
|
||||||
|
dumpCmd.Process.Kill()
|
||||||
|
gzipCmd.Process.Kill()
|
||||||
|
<-dumpDone
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close pipe and wait for gzip
|
||||||
|
stdin.Close()
|
||||||
if err := gzipCmd.Wait(); err != nil {
|
if err := gzipCmd.Wait(); err != nil {
|
||||||
return fmt.Errorf("gzip failed: %w", err)
|
return fmt.Errorf("gzip failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if dumpErr != nil {
|
||||||
|
return fmt.Errorf("mysqldump failed: %w", dumpErr)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -898,15 +965,46 @@ func (e *Engine) createArchive(ctx context.Context, sourceDir, outputFile string
|
|||||||
goto regularTar
|
goto regularTar
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for tar to finish
|
// Wait for tar with proper context handling
|
||||||
if err := cmd.Wait(); err != nil {
|
tarDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
tarDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var tarErr error
|
||||||
|
select {
|
||||||
|
case tarErr = <-tarDone:
|
||||||
|
// tar completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.log.Warn("Archive creation cancelled - killing processes")
|
||||||
|
cmd.Process.Kill()
|
||||||
pigzCmd.Process.Kill()
|
pigzCmd.Process.Kill()
|
||||||
return fmt.Errorf("tar failed: %w", err)
|
<-tarDone
|
||||||
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for pigz to finish
|
if tarErr != nil {
|
||||||
if err := pigzCmd.Wait(); err != nil {
|
pigzCmd.Process.Kill()
|
||||||
return fmt.Errorf("pigz compression failed: %w", err)
|
return fmt.Errorf("tar failed: %w", tarErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for pigz with proper context handling
|
||||||
|
pigzDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
pigzDone <- pigzCmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var pigzErr error
|
||||||
|
select {
|
||||||
|
case pigzErr = <-pigzDone:
|
||||||
|
case <-ctx.Done():
|
||||||
|
pigzCmd.Process.Kill()
|
||||||
|
<-pigzDone
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if pigzErr != nil {
|
||||||
|
return fmt.Errorf("pigz compression failed: %w", pigzErr)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1144,23 +1242,29 @@ func (e *Engine) uploadToCloud(ctx context.Context, backupFile string, tracker *
|
|||||||
filename := filepath.Base(backupFile)
|
filename := filepath.Base(backupFile)
|
||||||
e.log.Info("Uploading backup to cloud", "file", filename, "size", cloud.FormatSize(info.Size()))
|
e.log.Info("Uploading backup to cloud", "file", filename, "size", cloud.FormatSize(info.Size()))
|
||||||
|
|
||||||
// Progress callback
|
// Create schollz progressbar for visual upload progress
|
||||||
var lastPercent int
|
bar := progress.NewSchollzBar(info.Size(), fmt.Sprintf("Uploading %s", filename))
|
||||||
|
|
||||||
|
// Progress callback with schollz progressbar
|
||||||
|
var lastBytes int64
|
||||||
progressCallback := func(transferred, total int64) {
|
progressCallback := func(transferred, total int64) {
|
||||||
percent := int(float64(transferred) / float64(total) * 100)
|
delta := transferred - lastBytes
|
||||||
if percent != lastPercent && percent%10 == 0 {
|
if delta > 0 {
|
||||||
e.log.Debug("Upload progress", "percent", percent, "transferred", cloud.FormatSize(transferred), "total", cloud.FormatSize(total))
|
_ = bar.Add64(delta)
|
||||||
lastPercent = percent
|
|
||||||
}
|
}
|
||||||
|
lastBytes = transferred
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload to cloud
|
// Upload to cloud
|
||||||
err = backend.Upload(ctx, backupFile, filename, progressCallback)
|
err = backend.Upload(ctx, backupFile, filename, progressCallback)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
bar.Fail("Upload failed")
|
||||||
uploadStep.Fail(fmt.Errorf("cloud upload failed: %w", err))
|
uploadStep.Fail(fmt.Errorf("cloud upload failed: %w", err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_ = bar.Finish()
|
||||||
|
|
||||||
// Also upload metadata file
|
// Also upload metadata file
|
||||||
metaFile := backupFile + ".meta.json"
|
metaFile := backupFile + ".meta.json"
|
||||||
if _, err := os.Stat(metaFile); err == nil {
|
if _, err := os.Stat(metaFile); err == nil {
|
||||||
@@ -1251,8 +1355,10 @@ func (e *Engine) executeCommand(ctx context.Context, cmdArgs []string, outputFil
|
|||||||
return fmt.Errorf("failed to start backup command: %w", err)
|
return fmt.Errorf("failed to start backup command: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stream stderr output (don't buffer it all in memory)
|
// Stream stderr output in goroutine (don't buffer it all in memory)
|
||||||
|
stderrDone := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
|
defer close(stderrDone)
|
||||||
scanner := bufio.NewScanner(stderr)
|
scanner := bufio.NewScanner(stderr)
|
||||||
scanner.Buffer(make([]byte, 64*1024), 1024*1024) // 1MB max line size
|
scanner.Buffer(make([]byte, 64*1024), 1024*1024) // 1MB max line size
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
@@ -1263,10 +1369,30 @@ func (e *Engine) executeCommand(ctx context.Context, cmdArgs []string, outputFil
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Wait for command to complete
|
// Wait for command to complete with proper context handling
|
||||||
if err := cmd.Wait(); err != nil {
|
cmdDone := make(chan error, 1)
|
||||||
e.log.Error("Backup command failed", "error", err, "database", filepath.Base(outputFile))
|
go func() {
|
||||||
return fmt.Errorf("backup command failed: %w", err)
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed (success or failure)
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Context cancelled - kill process to unblock
|
||||||
|
e.log.Warn("Backup cancelled - killing pg_dump process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone // Wait for goroutine to finish
|
||||||
|
cmdErr = ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for stderr reader to finish
|
||||||
|
<-stderrDone
|
||||||
|
|
||||||
|
if cmdErr != nil {
|
||||||
|
e.log.Error("Backup command failed", "error", cmdErr, "database", filepath.Base(outputFile))
|
||||||
|
return fmt.Errorf("backup command failed: %w", cmdErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -242,7 +242,7 @@ func TestIncrementalBackupRestore(t *testing.T) {
|
|||||||
t.Errorf("Unchanged file base/12345/1235 not found in restore: %v", err)
|
t.Errorf("Unchanged file base/12345/1235 not found in restore: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Log("✅ Incremental backup and restore test completed successfully")
|
t.Log("[OK] Incremental backup and restore test completed successfully")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIncrementalBackupErrors tests error handling
|
// TestIncrementalBackupErrors tests error handling
|
||||||
|
|||||||
@@ -75,16 +75,16 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
status = "CRITICAL"
|
status = "CRITICAL"
|
||||||
icon = "❌"
|
icon = "[X]"
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
status = "WARNING"
|
status = "WARNING"
|
||||||
icon = "⚠️ "
|
icon = "[!]"
|
||||||
} else {
|
} else {
|
||||||
status = "OK"
|
status = "OK"
|
||||||
icon = "✓"
|
icon = "[+]"
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := fmt.Sprintf(`📊 Disk Space Check (%s):
|
msg := fmt.Sprintf(`[DISK] Disk Space Check (%s):
|
||||||
Path: %s
|
Path: %s
|
||||||
Total: %s
|
Total: %s
|
||||||
Available: %s (%.1f%% used)
|
Available: %s (%.1f%% used)
|
||||||
@@ -98,13 +98,13 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
status)
|
status)
|
||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
msg += "\n \n ⚠️ CRITICAL: Insufficient disk space!"
|
msg += "\n \n [!!] CRITICAL: Insufficient disk space!"
|
||||||
msg += "\n Operation blocked. Free up space before continuing."
|
msg += "\n Operation blocked. Free up space before continuing."
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
msg += "\n \n ⚠️ WARNING: Low disk space!"
|
msg += "\n \n [!] WARNING: Low disk space!"
|
||||||
msg += "\n Backup may fail if database is larger than estimated."
|
msg += "\n Backup may fail if database is larger than estimated."
|
||||||
} else {
|
} else {
|
||||||
msg += "\n \n ✓ Sufficient space available"
|
msg += "\n \n [+] Sufficient space available"
|
||||||
}
|
}
|
||||||
|
|
||||||
return msg
|
return msg
|
||||||
|
|||||||
@@ -75,16 +75,16 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
status = "CRITICAL"
|
status = "CRITICAL"
|
||||||
icon = "❌"
|
icon = "[X]"
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
status = "WARNING"
|
status = "WARNING"
|
||||||
icon = "⚠️ "
|
icon = "[!]"
|
||||||
} else {
|
} else {
|
||||||
status = "OK"
|
status = "OK"
|
||||||
icon = "✓"
|
icon = "[+]"
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := fmt.Sprintf(`📊 Disk Space Check (%s):
|
msg := fmt.Sprintf(`[DISK] Disk Space Check (%s):
|
||||||
Path: %s
|
Path: %s
|
||||||
Total: %s
|
Total: %s
|
||||||
Available: %s (%.1f%% used)
|
Available: %s (%.1f%% used)
|
||||||
@@ -98,13 +98,13 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
status)
|
status)
|
||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
msg += "\n \n ⚠️ CRITICAL: Insufficient disk space!"
|
msg += "\n \n [!!] CRITICAL: Insufficient disk space!"
|
||||||
msg += "\n Operation blocked. Free up space before continuing."
|
msg += "\n Operation blocked. Free up space before continuing."
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
msg += "\n \n ⚠️ WARNING: Low disk space!"
|
msg += "\n \n [!] WARNING: Low disk space!"
|
||||||
msg += "\n Backup may fail if database is larger than estimated."
|
msg += "\n Backup may fail if database is larger than estimated."
|
||||||
} else {
|
} else {
|
||||||
msg += "\n \n ✓ Sufficient space available"
|
msg += "\n \n [+] Sufficient space available"
|
||||||
}
|
}
|
||||||
|
|
||||||
return msg
|
return msg
|
||||||
|
|||||||
@@ -58,16 +58,16 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
status = "CRITICAL"
|
status = "CRITICAL"
|
||||||
icon = "❌"
|
icon = "[X]"
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
status = "WARNING"
|
status = "WARNING"
|
||||||
icon = "⚠️ "
|
icon = "[!]"
|
||||||
} else {
|
} else {
|
||||||
status = "OK"
|
status = "OK"
|
||||||
icon = "✓"
|
icon = "[+]"
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := fmt.Sprintf(`📊 Disk Space Check (%s):
|
msg := fmt.Sprintf(`[DISK] Disk Space Check (%s):
|
||||||
Path: %s
|
Path: %s
|
||||||
Total: %s
|
Total: %s
|
||||||
Available: %s (%.1f%% used)
|
Available: %s (%.1f%% used)
|
||||||
@@ -81,13 +81,13 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
status)
|
status)
|
||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
msg += "\n \n ⚠️ CRITICAL: Insufficient disk space!"
|
msg += "\n \n [!!] CRITICAL: Insufficient disk space!"
|
||||||
msg += "\n Operation blocked. Free up space before continuing."
|
msg += "\n Operation blocked. Free up space before continuing."
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
msg += "\n \n ⚠️ WARNING: Low disk space!"
|
msg += "\n \n [!] WARNING: Low disk space!"
|
||||||
msg += "\n Backup may fail if database is larger than estimated."
|
msg += "\n Backup may fail if database is larger than estimated."
|
||||||
} else {
|
} else {
|
||||||
msg += "\n \n ✓ Sufficient space available"
|
msg += "\n \n [+] Sufficient space available"
|
||||||
}
|
}
|
||||||
|
|
||||||
return msg
|
return msg
|
||||||
|
|||||||
@@ -94,16 +94,16 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
status = "CRITICAL"
|
status = "CRITICAL"
|
||||||
icon = "❌"
|
icon = "[X]"
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
status = "WARNING"
|
status = "WARNING"
|
||||||
icon = "⚠️ "
|
icon = "[!]"
|
||||||
} else {
|
} else {
|
||||||
status = "OK"
|
status = "OK"
|
||||||
icon = "✓"
|
icon = "[+]"
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := fmt.Sprintf(`📊 Disk Space Check (%s):
|
msg := fmt.Sprintf(`[DISK] Disk Space Check (%s):
|
||||||
Path: %s
|
Path: %s
|
||||||
Total: %s
|
Total: %s
|
||||||
Available: %s (%.1f%% used)
|
Available: %s (%.1f%% used)
|
||||||
@@ -117,13 +117,13 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
status)
|
status)
|
||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
msg += "\n \n ⚠️ CRITICAL: Insufficient disk space!"
|
msg += "\n \n [!!] CRITICAL: Insufficient disk space!"
|
||||||
msg += "\n Operation blocked. Free up space before continuing."
|
msg += "\n Operation blocked. Free up space before continuing."
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
msg += "\n \n ⚠️ WARNING: Low disk space!"
|
msg += "\n \n [!] WARNING: Low disk space!"
|
||||||
msg += "\n Backup may fail if database is larger than estimated."
|
msg += "\n Backup may fail if database is larger than estimated."
|
||||||
} else {
|
} else {
|
||||||
msg += "\n \n ✓ Sufficient space available"
|
msg += "\n \n [+] Sufficient space available"
|
||||||
}
|
}
|
||||||
|
|
||||||
return msg
|
return msg
|
||||||
|
|||||||
@@ -68,8 +68,8 @@ func ClassifyError(errorMsg string) *ErrorClassification {
|
|||||||
Type: "critical",
|
Type: "critical",
|
||||||
Category: "locks",
|
Category: "locks",
|
||||||
Message: errorMsg,
|
Message: errorMsg,
|
||||||
Hint: "Lock table exhausted - typically caused by large objects in parallel restore",
|
Hint: "Lock table exhausted - typically caused by large objects (BLOBs) during restore",
|
||||||
Action: "Increase max_locks_per_transaction in postgresql.conf to 512 or higher",
|
Action: "Option 1: Increase max_locks_per_transaction to 1024+ in postgresql.conf (requires restart). Option 2: Update dbbackup and retry - phased restore now auto-enabled for BLOB databases",
|
||||||
Severity: 2,
|
Severity: 2,
|
||||||
}
|
}
|
||||||
case "permission_denied":
|
case "permission_denied":
|
||||||
@@ -142,8 +142,8 @@ func ClassifyError(errorMsg string) *ErrorClassification {
|
|||||||
Type: "critical",
|
Type: "critical",
|
||||||
Category: "locks",
|
Category: "locks",
|
||||||
Message: errorMsg,
|
Message: errorMsg,
|
||||||
Hint: "Lock table exhausted - typically caused by large objects in parallel restore",
|
Hint: "Lock table exhausted - typically caused by large objects (BLOBs) during restore",
|
||||||
Action: "Increase max_locks_per_transaction in postgresql.conf to 512 or higher",
|
Action: "Option 1: Increase max_locks_per_transaction to 1024+ in postgresql.conf (requires restart). Option 2: Update dbbackup and retry - phased restore now auto-enabled for BLOB databases",
|
||||||
Severity: 2,
|
Severity: 2,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -234,22 +234,22 @@ func FormatErrorWithHint(errorMsg string) string {
|
|||||||
var icon string
|
var icon string
|
||||||
switch classification.Type {
|
switch classification.Type {
|
||||||
case "ignorable":
|
case "ignorable":
|
||||||
icon = "ℹ️ "
|
icon = "[i]"
|
||||||
case "warning":
|
case "warning":
|
||||||
icon = "⚠️ "
|
icon = "[!]"
|
||||||
case "critical":
|
case "critical":
|
||||||
icon = "❌"
|
icon = "[X]"
|
||||||
case "fatal":
|
case "fatal":
|
||||||
icon = "🛑"
|
icon = "[!!]"
|
||||||
default:
|
default:
|
||||||
icon = "⚠️ "
|
icon = "[!]"
|
||||||
}
|
}
|
||||||
|
|
||||||
output := fmt.Sprintf("%s %s Error\n\n", icon, strings.ToUpper(classification.Type))
|
output := fmt.Sprintf("%s %s Error\n\n", icon, strings.ToUpper(classification.Type))
|
||||||
output += fmt.Sprintf("Category: %s\n", classification.Category)
|
output += fmt.Sprintf("Category: %s\n", classification.Category)
|
||||||
output += fmt.Sprintf("Message: %s\n\n", classification.Message)
|
output += fmt.Sprintf("Message: %s\n\n", classification.Message)
|
||||||
output += fmt.Sprintf("💡 Hint: %s\n\n", classification.Hint)
|
output += fmt.Sprintf("[HINT] Hint: %s\n\n", classification.Hint)
|
||||||
output += fmt.Sprintf("🔧 Action: %s\n", classification.Action)
|
output += fmt.Sprintf("[ACTION] Action: %s\n", classification.Action)
|
||||||
|
|
||||||
return output
|
return output
|
||||||
}
|
}
|
||||||
@@ -257,7 +257,7 @@ func FormatErrorWithHint(errorMsg string) string {
|
|||||||
// FormatMultipleErrors formats multiple errors with classification
|
// FormatMultipleErrors formats multiple errors with classification
|
||||||
func FormatMultipleErrors(errors []string) string {
|
func FormatMultipleErrors(errors []string) string {
|
||||||
if len(errors) == 0 {
|
if len(errors) == 0 {
|
||||||
return "✓ No errors"
|
return "[+] No errors"
|
||||||
}
|
}
|
||||||
|
|
||||||
ignorable := 0
|
ignorable := 0
|
||||||
@@ -285,22 +285,22 @@ func FormatMultipleErrors(errors []string) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
output := "📊 Error Summary:\n\n"
|
output := "[SUMMARY] Error Summary:\n\n"
|
||||||
if ignorable > 0 {
|
if ignorable > 0 {
|
||||||
output += fmt.Sprintf(" ℹ️ %d ignorable (objects already exist)\n", ignorable)
|
output += fmt.Sprintf(" [i] %d ignorable (objects already exist)\n", ignorable)
|
||||||
}
|
}
|
||||||
if warnings > 0 {
|
if warnings > 0 {
|
||||||
output += fmt.Sprintf(" ⚠️ %d warnings\n", warnings)
|
output += fmt.Sprintf(" [!] %d warnings\n", warnings)
|
||||||
}
|
}
|
||||||
if critical > 0 {
|
if critical > 0 {
|
||||||
output += fmt.Sprintf(" ❌ %d critical errors\n", critical)
|
output += fmt.Sprintf(" [X] %d critical errors\n", critical)
|
||||||
}
|
}
|
||||||
if fatal > 0 {
|
if fatal > 0 {
|
||||||
output += fmt.Sprintf(" 🛑 %d fatal errors\n", fatal)
|
output += fmt.Sprintf(" [!!] %d fatal errors\n", fatal)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(criticalErrors) > 0 {
|
if len(criticalErrors) > 0 {
|
||||||
output += "\n📝 Critical Issues:\n\n"
|
output += "\n[CRITICAL] Critical Issues:\n\n"
|
||||||
for i, err := range criticalErrors {
|
for i, err := range criticalErrors {
|
||||||
class := ClassifyError(err)
|
class := ClassifyError(err)
|
||||||
output += fmt.Sprintf("%d. %s\n", i+1, class.Hint)
|
output += fmt.Sprintf("%d. %s\n", i+1, class.Hint)
|
||||||
|
|||||||
@@ -49,15 +49,15 @@ func (s CheckStatus) String() string {
|
|||||||
func (s CheckStatus) Icon() string {
|
func (s CheckStatus) Icon() string {
|
||||||
switch s {
|
switch s {
|
||||||
case StatusPassed:
|
case StatusPassed:
|
||||||
return "✓"
|
return "[+]"
|
||||||
case StatusWarning:
|
case StatusWarning:
|
||||||
return "⚠"
|
return "[!]"
|
||||||
case StatusFailed:
|
case StatusFailed:
|
||||||
return "✗"
|
return "[-]"
|
||||||
case StatusSkipped:
|
case StatusSkipped:
|
||||||
return "○"
|
return "[ ]"
|
||||||
default:
|
default:
|
||||||
return "?"
|
return "[?]"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,9 +11,9 @@ func FormatPreflightReport(result *PreflightResult, dbName string, verbose bool)
|
|||||||
var sb strings.Builder
|
var sb strings.Builder
|
||||||
|
|
||||||
sb.WriteString("\n")
|
sb.WriteString("\n")
|
||||||
sb.WriteString("╔══════════════════════════════════════════════════════════════╗\n")
|
sb.WriteString("+==============================================================+\n")
|
||||||
sb.WriteString("║ [DRY RUN] Preflight Check Results ║\n")
|
sb.WriteString("| [DRY RUN] Preflight Check Results |\n")
|
||||||
sb.WriteString("╚══════════════════════════════════════════════════════════════╝\n")
|
sb.WriteString("+==============================================================+\n")
|
||||||
sb.WriteString("\n")
|
sb.WriteString("\n")
|
||||||
|
|
||||||
// Database info
|
// Database info
|
||||||
@@ -29,7 +29,7 @@ func FormatPreflightReport(result *PreflightResult, dbName string, verbose bool)
|
|||||||
|
|
||||||
// Check results
|
// Check results
|
||||||
sb.WriteString(" Checks:\n")
|
sb.WriteString(" Checks:\n")
|
||||||
sb.WriteString(" ─────────────────────────────────────────────────────────────\n")
|
sb.WriteString(" --------------------------------------------------------------\n")
|
||||||
|
|
||||||
for _, check := range result.Checks {
|
for _, check := range result.Checks {
|
||||||
icon := check.Status.Icon()
|
icon := check.Status.Icon()
|
||||||
@@ -40,26 +40,26 @@ func FormatPreflightReport(result *PreflightResult, dbName string, verbose bool)
|
|||||||
color, icon, reset, check.Name+":", check.Message))
|
color, icon, reset, check.Name+":", check.Message))
|
||||||
|
|
||||||
if verbose && check.Details != "" {
|
if verbose && check.Details != "" {
|
||||||
sb.WriteString(fmt.Sprintf(" └─ %s\n", check.Details))
|
sb.WriteString(fmt.Sprintf(" +- %s\n", check.Details))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sb.WriteString(" ─────────────────────────────────────────────────────────────\n")
|
sb.WriteString(" --------------------------------------------------------------\n")
|
||||||
sb.WriteString("\n")
|
sb.WriteString("\n")
|
||||||
|
|
||||||
// Summary
|
// Summary
|
||||||
if result.AllPassed {
|
if result.AllPassed {
|
||||||
if result.HasWarnings {
|
if result.HasWarnings {
|
||||||
sb.WriteString(" ⚠️ All checks passed with warnings\n")
|
sb.WriteString(" [!] All checks passed with warnings\n")
|
||||||
sb.WriteString("\n")
|
sb.WriteString("\n")
|
||||||
sb.WriteString(" Ready to backup. Remove --dry-run to execute.\n")
|
sb.WriteString(" Ready to backup. Remove --dry-run to execute.\n")
|
||||||
} else {
|
} else {
|
||||||
sb.WriteString(" ✅ All checks passed\n")
|
sb.WriteString(" [OK] All checks passed\n")
|
||||||
sb.WriteString("\n")
|
sb.WriteString("\n")
|
||||||
sb.WriteString(" Ready to backup. Remove --dry-run to execute.\n")
|
sb.WriteString(" Ready to backup. Remove --dry-run to execute.\n")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
sb.WriteString(fmt.Sprintf(" ❌ %d check(s) failed\n", result.FailureCount))
|
sb.WriteString(fmt.Sprintf(" [FAIL] %d check(s) failed\n", result.FailureCount))
|
||||||
sb.WriteString("\n")
|
sb.WriteString("\n")
|
||||||
sb.WriteString(" Fix the issues above before running backup.\n")
|
sb.WriteString(" Fix the issues above before running backup.\n")
|
||||||
}
|
}
|
||||||
@@ -96,7 +96,7 @@ func FormatPreflightReportPlain(result *PreflightResult, dbName string) string {
|
|||||||
status := fmt.Sprintf("[%s]", check.Status.String())
|
status := fmt.Sprintf("[%s]", check.Status.String())
|
||||||
sb.WriteString(fmt.Sprintf(" %-10s %-25s %s\n", status, check.Name+":", check.Message))
|
sb.WriteString(fmt.Sprintf(" %-10s %-25s %s\n", status, check.Name+":", check.Message))
|
||||||
if check.Details != "" {
|
if check.Details != "" {
|
||||||
sb.WriteString(fmt.Sprintf(" └─ %s\n", check.Details))
|
sb.WriteString(fmt.Sprintf(" +- %s\n", check.Details))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
"dbbackup/internal/logger"
|
"dbbackup/internal/logger"
|
||||||
)
|
)
|
||||||
@@ -116,8 +117,11 @@ func KillOrphanedProcesses(log logger.Logger) error {
|
|||||||
|
|
||||||
// findProcessesByName returns PIDs of processes matching the given name
|
// findProcessesByName returns PIDs of processes matching the given name
|
||||||
func findProcessesByName(name string, excludePID int) ([]int, error) {
|
func findProcessesByName(name string, excludePID int) ([]int, error) {
|
||||||
// Use pgrep for efficient process searching
|
// Use pgrep for efficient process searching with timeout
|
||||||
cmd := exec.Command("pgrep", "-x", name)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, "pgrep", "-x", name)
|
||||||
output, err := cmd.Output()
|
output, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Exit code 1 means no processes found (not an error)
|
// Exit code 1 means no processes found (not an error)
|
||||||
|
|||||||
@@ -90,7 +90,7 @@ func NewAzureBackend(cfg *Config) (*AzureBackend, error) {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Use default Azure credential (managed identity, environment variables, etc.)
|
// Use default Azure credential (managed identity, environment variables, etc.)
|
||||||
return nil, fmt.Errorf("Azure authentication requires account name and key, or use AZURE_STORAGE_CONNECTION_STRING environment variable")
|
return nil, fmt.Errorf("azure authentication requires account name and key, or use AZURE_STORAGE_CONNECTION_STRING environment variable")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -151,37 +151,46 @@ func (a *AzureBackend) Upload(ctx context.Context, localPath, remotePath string,
|
|||||||
return a.uploadSimple(ctx, file, blobName, fileSize, progress)
|
return a.uploadSimple(ctx, file, blobName, fileSize, progress)
|
||||||
}
|
}
|
||||||
|
|
||||||
// uploadSimple uploads a file using simple upload (single request)
|
// uploadSimple uploads a file using simple upload (single request) with retry
|
||||||
func (a *AzureBackend) uploadSimple(ctx context.Context, file *os.File, blobName string, fileSize int64, progress ProgressCallback) error {
|
func (a *AzureBackend) uploadSimple(ctx context.Context, file *os.File, blobName string, fileSize int64, progress ProgressCallback) error {
|
||||||
blockBlobClient := a.client.ServiceClient().NewContainerClient(a.containerName).NewBlockBlobClient(blobName)
|
return RetryOperationWithNotify(ctx, DefaultRetryConfig(), func() error {
|
||||||
|
// Reset file position for retry
|
||||||
|
if _, err := file.Seek(0, 0); err != nil {
|
||||||
|
return fmt.Errorf("failed to reset file position: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Wrap reader with progress tracking
|
blockBlobClient := a.client.ServiceClient().NewContainerClient(a.containerName).NewBlockBlobClient(blobName)
|
||||||
reader := NewProgressReader(file, fileSize, progress)
|
|
||||||
|
|
||||||
// Calculate MD5 hash for integrity
|
// Wrap reader with progress tracking
|
||||||
hash := sha256.New()
|
reader := NewProgressReader(file, fileSize, progress)
|
||||||
teeReader := io.TeeReader(reader, hash)
|
|
||||||
|
|
||||||
_, err := blockBlobClient.UploadStream(ctx, teeReader, &blockblob.UploadStreamOptions{
|
// Calculate MD5 hash for integrity
|
||||||
BlockSize: 4 * 1024 * 1024, // 4MB blocks
|
hash := sha256.New()
|
||||||
|
teeReader := io.TeeReader(reader, hash)
|
||||||
|
|
||||||
|
_, err := blockBlobClient.UploadStream(ctx, teeReader, &blockblob.UploadStreamOptions{
|
||||||
|
BlockSize: 4 * 1024 * 1024, // 4MB blocks
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to upload blob: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store checksum as metadata
|
||||||
|
checksum := hex.EncodeToString(hash.Sum(nil))
|
||||||
|
metadata := map[string]*string{
|
||||||
|
"sha256": &checksum,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = blockBlobClient.SetMetadata(ctx, metadata, nil)
|
||||||
|
if err != nil {
|
||||||
|
// Non-fatal: upload succeeded but metadata failed
|
||||||
|
fmt.Fprintf(os.Stderr, "Warning: failed to set blob metadata: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, func(err error, duration time.Duration) {
|
||||||
|
fmt.Printf("[Azure] Upload retry in %v: %v\n", duration, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to upload blob: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store checksum as metadata
|
|
||||||
checksum := hex.EncodeToString(hash.Sum(nil))
|
|
||||||
metadata := map[string]*string{
|
|
||||||
"sha256": &checksum,
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = blockBlobClient.SetMetadata(ctx, metadata, nil)
|
|
||||||
if err != nil {
|
|
||||||
// Non-fatal: upload succeeded but metadata failed
|
|
||||||
fmt.Fprintf(os.Stderr, "Warning: failed to set blob metadata: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// uploadBlocks uploads a file using block blob staging (for large files)
|
// uploadBlocks uploads a file using block blob staging (for large files)
|
||||||
@@ -251,7 +260,7 @@ func (a *AzureBackend) uploadBlocks(ctx context.Context, file *os.File, blobName
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Download downloads a file from Azure Blob Storage
|
// Download downloads a file from Azure Blob Storage with retry
|
||||||
func (a *AzureBackend) Download(ctx context.Context, remotePath, localPath string, progress ProgressCallback) error {
|
func (a *AzureBackend) Download(ctx context.Context, remotePath, localPath string, progress ProgressCallback) error {
|
||||||
blobName := strings.TrimPrefix(remotePath, "/")
|
blobName := strings.TrimPrefix(remotePath, "/")
|
||||||
blockBlobClient := a.client.ServiceClient().NewContainerClient(a.containerName).NewBlockBlobClient(blobName)
|
blockBlobClient := a.client.ServiceClient().NewContainerClient(a.containerName).NewBlockBlobClient(blobName)
|
||||||
@@ -264,30 +273,34 @@ func (a *AzureBackend) Download(ctx context.Context, remotePath, localPath strin
|
|||||||
|
|
||||||
fileSize := *props.ContentLength
|
fileSize := *props.ContentLength
|
||||||
|
|
||||||
// Download blob
|
return RetryOperationWithNotify(ctx, DefaultRetryConfig(), func() error {
|
||||||
resp, err := blockBlobClient.DownloadStream(ctx, nil)
|
// Download blob
|
||||||
if err != nil {
|
resp, err := blockBlobClient.DownloadStream(ctx, nil)
|
||||||
return fmt.Errorf("failed to download blob: %w", err)
|
if err != nil {
|
||||||
}
|
return fmt.Errorf("failed to download blob: %w", err)
|
||||||
defer resp.Body.Close()
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
// Create local file
|
// Create/truncate local file
|
||||||
file, err := os.Create(localPath)
|
file, err := os.Create(localPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create file: %w", err)
|
return fmt.Errorf("failed to create file: %w", err)
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
// Wrap reader with progress tracking
|
// Wrap reader with progress tracking
|
||||||
reader := NewProgressReader(resp.Body, fileSize, progress)
|
reader := NewProgressReader(resp.Body, fileSize, progress)
|
||||||
|
|
||||||
// Copy with progress
|
// Copy with progress
|
||||||
_, err = io.Copy(file, reader)
|
_, err = io.Copy(file, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to write file: %w", err)
|
return fmt.Errorf("failed to write file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
}, func(err error, duration time.Duration) {
|
||||||
|
fmt.Printf("[Azure] Download retry in %v: %v\n", duration, err)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete deletes a file from Azure Blob Storage
|
// Delete deletes a file from Azure Blob Storage
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ func (g *GCSBackend) Name() string {
|
|||||||
return "gcs"
|
return "gcs"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload uploads a file to Google Cloud Storage
|
// Upload uploads a file to Google Cloud Storage with retry
|
||||||
func (g *GCSBackend) Upload(ctx context.Context, localPath, remotePath string, progress ProgressCallback) error {
|
func (g *GCSBackend) Upload(ctx context.Context, localPath, remotePath string, progress ProgressCallback) error {
|
||||||
file, err := os.Open(localPath)
|
file, err := os.Open(localPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -106,45 +106,54 @@ func (g *GCSBackend) Upload(ctx context.Context, localPath, remotePath string, p
|
|||||||
// Remove leading slash from remote path
|
// Remove leading slash from remote path
|
||||||
objectName := strings.TrimPrefix(remotePath, "/")
|
objectName := strings.TrimPrefix(remotePath, "/")
|
||||||
|
|
||||||
bucket := g.client.Bucket(g.bucketName)
|
return RetryOperationWithNotify(ctx, DefaultRetryConfig(), func() error {
|
||||||
object := bucket.Object(objectName)
|
// Reset file position for retry
|
||||||
|
if _, err := file.Seek(0, 0); err != nil {
|
||||||
|
return fmt.Errorf("failed to reset file position: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Create writer with automatic chunking for large files
|
bucket := g.client.Bucket(g.bucketName)
|
||||||
writer := object.NewWriter(ctx)
|
object := bucket.Object(objectName)
|
||||||
writer.ChunkSize = 16 * 1024 * 1024 // 16MB chunks for streaming
|
|
||||||
|
|
||||||
// Wrap reader with progress tracking and hash calculation
|
// Create writer with automatic chunking for large files
|
||||||
hash := sha256.New()
|
writer := object.NewWriter(ctx)
|
||||||
reader := NewProgressReader(io.TeeReader(file, hash), fileSize, progress)
|
writer.ChunkSize = 16 * 1024 * 1024 // 16MB chunks for streaming
|
||||||
|
|
||||||
// Upload with progress tracking
|
// Wrap reader with progress tracking and hash calculation
|
||||||
_, err = io.Copy(writer, reader)
|
hash := sha256.New()
|
||||||
if err != nil {
|
reader := NewProgressReader(io.TeeReader(file, hash), fileSize, progress)
|
||||||
writer.Close()
|
|
||||||
return fmt.Errorf("failed to upload object: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close writer (finalizes upload)
|
// Upload with progress tracking
|
||||||
if err := writer.Close(); err != nil {
|
_, err = io.Copy(writer, reader)
|
||||||
return fmt.Errorf("failed to finalize upload: %w", err)
|
if err != nil {
|
||||||
}
|
writer.Close()
|
||||||
|
return fmt.Errorf("failed to upload object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Store checksum as metadata
|
// Close writer (finalizes upload)
|
||||||
checksum := hex.EncodeToString(hash.Sum(nil))
|
if err := writer.Close(); err != nil {
|
||||||
_, err = object.Update(ctx, storage.ObjectAttrsToUpdate{
|
return fmt.Errorf("failed to finalize upload: %w", err)
|
||||||
Metadata: map[string]string{
|
}
|
||||||
"sha256": checksum,
|
|
||||||
},
|
// Store checksum as metadata
|
||||||
|
checksum := hex.EncodeToString(hash.Sum(nil))
|
||||||
|
_, err = object.Update(ctx, storage.ObjectAttrsToUpdate{
|
||||||
|
Metadata: map[string]string{
|
||||||
|
"sha256": checksum,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
// Non-fatal: upload succeeded but metadata failed
|
||||||
|
fmt.Fprintf(os.Stderr, "Warning: failed to set object metadata: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, func(err error, duration time.Duration) {
|
||||||
|
fmt.Printf("[GCS] Upload retry in %v: %v\n", duration, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
// Non-fatal: upload succeeded but metadata failed
|
|
||||||
fmt.Fprintf(os.Stderr, "Warning: failed to set object metadata: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Download downloads a file from Google Cloud Storage
|
// Download downloads a file from Google Cloud Storage with retry
|
||||||
func (g *GCSBackend) Download(ctx context.Context, remotePath, localPath string, progress ProgressCallback) error {
|
func (g *GCSBackend) Download(ctx context.Context, remotePath, localPath string, progress ProgressCallback) error {
|
||||||
objectName := strings.TrimPrefix(remotePath, "/")
|
objectName := strings.TrimPrefix(remotePath, "/")
|
||||||
|
|
||||||
@@ -159,30 +168,34 @@ func (g *GCSBackend) Download(ctx context.Context, remotePath, localPath string,
|
|||||||
|
|
||||||
fileSize := attrs.Size
|
fileSize := attrs.Size
|
||||||
|
|
||||||
// Create reader
|
return RetryOperationWithNotify(ctx, DefaultRetryConfig(), func() error {
|
||||||
reader, err := object.NewReader(ctx)
|
// Create reader
|
||||||
if err != nil {
|
reader, err := object.NewReader(ctx)
|
||||||
return fmt.Errorf("failed to download object: %w", err)
|
if err != nil {
|
||||||
}
|
return fmt.Errorf("failed to download object: %w", err)
|
||||||
defer reader.Close()
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
// Create local file
|
// Create/truncate local file
|
||||||
file, err := os.Create(localPath)
|
file, err := os.Create(localPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create file: %w", err)
|
return fmt.Errorf("failed to create file: %w", err)
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
// Wrap reader with progress tracking
|
// Wrap reader with progress tracking
|
||||||
progressReader := NewProgressReader(reader, fileSize, progress)
|
progressReader := NewProgressReader(reader, fileSize, progress)
|
||||||
|
|
||||||
// Copy with progress
|
// Copy with progress
|
||||||
_, err = io.Copy(file, progressReader)
|
_, err = io.Copy(file, progressReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to write file: %w", err)
|
return fmt.Errorf("failed to write file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
}, func(err error, duration time.Duration) {
|
||||||
|
fmt.Printf("[GCS] Download retry in %v: %v\n", duration, err)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete deletes a file from Google Cloud Storage
|
// Delete deletes a file from Google Cloud Storage
|
||||||
|
|||||||
257
internal/cloud/retry.go
Normal file
257
internal/cloud/retry.go
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
package cloud
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cenkalti/backoff/v4"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RetryConfig configures retry behavior
|
||||||
|
type RetryConfig struct {
|
||||||
|
MaxRetries int // Maximum number of retries (0 = unlimited)
|
||||||
|
InitialInterval time.Duration // Initial backoff interval
|
||||||
|
MaxInterval time.Duration // Maximum backoff interval
|
||||||
|
MaxElapsedTime time.Duration // Maximum total time for retries
|
||||||
|
Multiplier float64 // Backoff multiplier
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultRetryConfig returns sensible defaults for cloud operations
|
||||||
|
func DefaultRetryConfig() *RetryConfig {
|
||||||
|
return &RetryConfig{
|
||||||
|
MaxRetries: 5,
|
||||||
|
InitialInterval: 500 * time.Millisecond,
|
||||||
|
MaxInterval: 30 * time.Second,
|
||||||
|
MaxElapsedTime: 5 * time.Minute,
|
||||||
|
Multiplier: 2.0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AggressiveRetryConfig returns config for critical operations that need more retries
|
||||||
|
func AggressiveRetryConfig() *RetryConfig {
|
||||||
|
return &RetryConfig{
|
||||||
|
MaxRetries: 10,
|
||||||
|
InitialInterval: 1 * time.Second,
|
||||||
|
MaxInterval: 60 * time.Second,
|
||||||
|
MaxElapsedTime: 15 * time.Minute,
|
||||||
|
Multiplier: 1.5,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// QuickRetryConfig returns config for operations that should fail fast
|
||||||
|
func QuickRetryConfig() *RetryConfig {
|
||||||
|
return &RetryConfig{
|
||||||
|
MaxRetries: 3,
|
||||||
|
InitialInterval: 100 * time.Millisecond,
|
||||||
|
MaxInterval: 5 * time.Second,
|
||||||
|
MaxElapsedTime: 30 * time.Second,
|
||||||
|
Multiplier: 2.0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryOperation executes an operation with exponential backoff retry
|
||||||
|
func RetryOperation(ctx context.Context, cfg *RetryConfig, operation func() error) error {
|
||||||
|
if cfg == nil {
|
||||||
|
cfg = DefaultRetryConfig()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create exponential backoff
|
||||||
|
expBackoff := backoff.NewExponentialBackOff()
|
||||||
|
expBackoff.InitialInterval = cfg.InitialInterval
|
||||||
|
expBackoff.MaxInterval = cfg.MaxInterval
|
||||||
|
expBackoff.MaxElapsedTime = cfg.MaxElapsedTime
|
||||||
|
expBackoff.Multiplier = cfg.Multiplier
|
||||||
|
expBackoff.Reset()
|
||||||
|
|
||||||
|
// Wrap with max retries if specified
|
||||||
|
var b backoff.BackOff = expBackoff
|
||||||
|
if cfg.MaxRetries > 0 {
|
||||||
|
b = backoff.WithMaxRetries(expBackoff, uint64(cfg.MaxRetries))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add context support
|
||||||
|
b = backoff.WithContext(b, ctx)
|
||||||
|
|
||||||
|
// Track attempts for logging
|
||||||
|
attempt := 0
|
||||||
|
|
||||||
|
// Wrap operation to handle permanent vs retryable errors
|
||||||
|
wrappedOp := func() error {
|
||||||
|
attempt++
|
||||||
|
err := operation()
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if error is permanent (should not retry)
|
||||||
|
if IsPermanentError(err) {
|
||||||
|
return backoff.Permanent(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return backoff.Retry(wrappedOp, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryOperationWithNotify executes an operation with retry and calls notify on each retry
|
||||||
|
func RetryOperationWithNotify(ctx context.Context, cfg *RetryConfig, operation func() error, notify func(err error, duration time.Duration)) error {
|
||||||
|
if cfg == nil {
|
||||||
|
cfg = DefaultRetryConfig()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create exponential backoff
|
||||||
|
expBackoff := backoff.NewExponentialBackOff()
|
||||||
|
expBackoff.InitialInterval = cfg.InitialInterval
|
||||||
|
expBackoff.MaxInterval = cfg.MaxInterval
|
||||||
|
expBackoff.MaxElapsedTime = cfg.MaxElapsedTime
|
||||||
|
expBackoff.Multiplier = cfg.Multiplier
|
||||||
|
expBackoff.Reset()
|
||||||
|
|
||||||
|
// Wrap with max retries if specified
|
||||||
|
var b backoff.BackOff = expBackoff
|
||||||
|
if cfg.MaxRetries > 0 {
|
||||||
|
b = backoff.WithMaxRetries(expBackoff, uint64(cfg.MaxRetries))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add context support
|
||||||
|
b = backoff.WithContext(b, ctx)
|
||||||
|
|
||||||
|
// Wrap operation to handle permanent vs retryable errors
|
||||||
|
wrappedOp := func() error {
|
||||||
|
err := operation()
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if error is permanent (should not retry)
|
||||||
|
if IsPermanentError(err) {
|
||||||
|
return backoff.Permanent(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return backoff.RetryNotify(wrappedOp, b, notify)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPermanentError returns true if the error should not be retried
|
||||||
|
func IsPermanentError(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
errStr := strings.ToLower(err.Error())
|
||||||
|
|
||||||
|
// Authentication/authorization errors - don't retry
|
||||||
|
permanentPatterns := []string{
|
||||||
|
"access denied",
|
||||||
|
"forbidden",
|
||||||
|
"unauthorized",
|
||||||
|
"invalid credentials",
|
||||||
|
"invalid access key",
|
||||||
|
"invalid secret",
|
||||||
|
"no such bucket",
|
||||||
|
"bucket not found",
|
||||||
|
"container not found",
|
||||||
|
"nosuchbucket",
|
||||||
|
"nosuchkey",
|
||||||
|
"invalid argument",
|
||||||
|
"malformed",
|
||||||
|
"invalid request",
|
||||||
|
"permission denied",
|
||||||
|
"access control",
|
||||||
|
"policy",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pattern := range permanentPatterns {
|
||||||
|
if strings.Contains(errStr, pattern) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRetryableError returns true if the error is transient and should be retried
|
||||||
|
func IsRetryableError(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Network errors are typically retryable
|
||||||
|
var netErr net.Error
|
||||||
|
if ok := isNetError(err, &netErr); ok {
|
||||||
|
return netErr.Timeout() || netErr.Temporary()
|
||||||
|
}
|
||||||
|
|
||||||
|
errStr := strings.ToLower(err.Error())
|
||||||
|
|
||||||
|
// Transient errors - should retry
|
||||||
|
retryablePatterns := []string{
|
||||||
|
"timeout",
|
||||||
|
"connection reset",
|
||||||
|
"connection refused",
|
||||||
|
"connection closed",
|
||||||
|
"eof",
|
||||||
|
"broken pipe",
|
||||||
|
"temporary failure",
|
||||||
|
"service unavailable",
|
||||||
|
"internal server error",
|
||||||
|
"bad gateway",
|
||||||
|
"gateway timeout",
|
||||||
|
"too many requests",
|
||||||
|
"rate limit",
|
||||||
|
"throttl",
|
||||||
|
"slowdown",
|
||||||
|
"try again",
|
||||||
|
"retry",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pattern := range retryablePatterns {
|
||||||
|
if strings.Contains(errStr, pattern) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// isNetError checks if err wraps a net.Error
|
||||||
|
func isNetError(err error, target *net.Error) bool {
|
||||||
|
for err != nil {
|
||||||
|
if ne, ok := err.(net.Error); ok {
|
||||||
|
*target = ne
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Try to unwrap
|
||||||
|
if unwrapper, ok := err.(interface{ Unwrap() error }); ok {
|
||||||
|
err = unwrapper.Unwrap()
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRetry is a helper that wraps a function with default retry logic
|
||||||
|
func WithRetry(ctx context.Context, operationName string, fn func() error) error {
|
||||||
|
notify := func(err error, duration time.Duration) {
|
||||||
|
// Log retry attempts (caller can provide their own logger if needed)
|
||||||
|
fmt.Printf("[RETRY] %s failed, retrying in %v: %v\n", operationName, duration, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return RetryOperationWithNotify(ctx, DefaultRetryConfig(), fn, notify)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRetryConfig is a helper that wraps a function with custom retry config
|
||||||
|
func WithRetryConfig(ctx context.Context, cfg *RetryConfig, operationName string, fn func() error) error {
|
||||||
|
notify := func(err error, duration time.Duration) {
|
||||||
|
fmt.Printf("[RETRY] %s failed, retrying in %v: %v\n", operationName, duration, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return RetryOperationWithNotify(ctx, cfg, fn, notify)
|
||||||
|
}
|
||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
"github.com/aws/aws-sdk-go-v2/config"
|
"github.com/aws/aws-sdk-go-v2/config"
|
||||||
@@ -123,63 +124,81 @@ func (s *S3Backend) Upload(ctx context.Context, localPath, remotePath string, pr
|
|||||||
return s.uploadSimple(ctx, file, key, fileSize, progress)
|
return s.uploadSimple(ctx, file, key, fileSize, progress)
|
||||||
}
|
}
|
||||||
|
|
||||||
// uploadSimple performs a simple single-part upload
|
// uploadSimple performs a simple single-part upload with retry
|
||||||
func (s *S3Backend) uploadSimple(ctx context.Context, file *os.File, key string, fileSize int64, progress ProgressCallback) error {
|
func (s *S3Backend) uploadSimple(ctx context.Context, file *os.File, key string, fileSize int64, progress ProgressCallback) error {
|
||||||
// Create progress reader
|
return RetryOperationWithNotify(ctx, DefaultRetryConfig(), func() error {
|
||||||
var reader io.Reader = file
|
// Reset file position for retry
|
||||||
if progress != nil {
|
if _, err := file.Seek(0, 0); err != nil {
|
||||||
reader = NewProgressReader(file, fileSize, progress)
|
return fmt.Errorf("failed to reset file position: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload to S3
|
// Create progress reader
|
||||||
_, err := s.client.PutObject(ctx, &s3.PutObjectInput{
|
var reader io.Reader = file
|
||||||
Bucket: aws.String(s.bucket),
|
if progress != nil {
|
||||||
Key: aws.String(key),
|
reader = NewProgressReader(file, fileSize, progress)
|
||||||
Body: reader,
|
}
|
||||||
|
|
||||||
|
// Upload to S3
|
||||||
|
_, err := s.client.PutObject(ctx, &s3.PutObjectInput{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
Body: reader,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to upload to S3: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, func(err error, duration time.Duration) {
|
||||||
|
fmt.Printf("[S3] Upload retry in %v: %v\n", duration, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to upload to S3: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// uploadMultipart performs a multipart upload for large files
|
// uploadMultipart performs a multipart upload for large files with retry
|
||||||
func (s *S3Backend) uploadMultipart(ctx context.Context, file *os.File, key string, fileSize int64, progress ProgressCallback) error {
|
func (s *S3Backend) uploadMultipart(ctx context.Context, file *os.File, key string, fileSize int64, progress ProgressCallback) error {
|
||||||
// Create uploader with custom options
|
return RetryOperationWithNotify(ctx, AggressiveRetryConfig(), func() error {
|
||||||
uploader := manager.NewUploader(s.client, func(u *manager.Uploader) {
|
// Reset file position for retry
|
||||||
// Part size: 10MB
|
if _, err := file.Seek(0, 0); err != nil {
|
||||||
u.PartSize = 10 * 1024 * 1024
|
return fmt.Errorf("failed to reset file position: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Upload up to 10 parts concurrently
|
// Create uploader with custom options
|
||||||
u.Concurrency = 10
|
uploader := manager.NewUploader(s.client, func(u *manager.Uploader) {
|
||||||
|
// Part size: 10MB
|
||||||
|
u.PartSize = 10 * 1024 * 1024
|
||||||
|
|
||||||
// Leave parts on failure for debugging
|
// Upload up to 10 parts concurrently
|
||||||
u.LeavePartsOnError = false
|
u.Concurrency = 10
|
||||||
|
|
||||||
|
// Leave parts on failure for debugging
|
||||||
|
u.LeavePartsOnError = false
|
||||||
|
})
|
||||||
|
|
||||||
|
// Wrap file with progress reader
|
||||||
|
var reader io.Reader = file
|
||||||
|
if progress != nil {
|
||||||
|
reader = NewProgressReader(file, fileSize, progress)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload with multipart
|
||||||
|
_, err := uploader.Upload(ctx, &s3.PutObjectInput{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
Body: reader,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("multipart upload failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, func(err error, duration time.Duration) {
|
||||||
|
fmt.Printf("[S3] Multipart upload retry in %v: %v\n", duration, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Wrap file with progress reader
|
|
||||||
var reader io.Reader = file
|
|
||||||
if progress != nil {
|
|
||||||
reader = NewProgressReader(file, fileSize, progress)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upload with multipart
|
|
||||||
_, err := uploader.Upload(ctx, &s3.PutObjectInput{
|
|
||||||
Bucket: aws.String(s.bucket),
|
|
||||||
Key: aws.String(key),
|
|
||||||
Body: reader,
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("multipart upload failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Download downloads a file from S3
|
// Download downloads a file from S3 with retry
|
||||||
func (s *S3Backend) Download(ctx context.Context, remotePath, localPath string, progress ProgressCallback) error {
|
func (s *S3Backend) Download(ctx context.Context, remotePath, localPath string, progress ProgressCallback) error {
|
||||||
// Build S3 key
|
// Build S3 key
|
||||||
key := s.buildKey(remotePath)
|
key := s.buildKey(remotePath)
|
||||||
@@ -190,39 +209,44 @@ func (s *S3Backend) Download(ctx context.Context, remotePath, localPath string,
|
|||||||
return fmt.Errorf("failed to get object size: %w", err)
|
return fmt.Errorf("failed to get object size: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Download from S3
|
// Create directory for local file
|
||||||
result, err := s.client.GetObject(ctx, &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String(s.bucket),
|
|
||||||
Key: aws.String(key),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to download from S3: %w", err)
|
|
||||||
}
|
|
||||||
defer result.Body.Close()
|
|
||||||
|
|
||||||
// Create local file
|
|
||||||
if err := os.MkdirAll(filepath.Dir(localPath), 0755); err != nil {
|
if err := os.MkdirAll(filepath.Dir(localPath), 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create directory: %w", err)
|
return fmt.Errorf("failed to create directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
outFile, err := os.Create(localPath)
|
return RetryOperationWithNotify(ctx, DefaultRetryConfig(), func() error {
|
||||||
if err != nil {
|
// Download from S3
|
||||||
return fmt.Errorf("failed to create local file: %w", err)
|
result, err := s.client.GetObject(ctx, &s3.GetObjectInput{
|
||||||
}
|
Bucket: aws.String(s.bucket),
|
||||||
defer outFile.Close()
|
Key: aws.String(key),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to download from S3: %w", err)
|
||||||
|
}
|
||||||
|
defer result.Body.Close()
|
||||||
|
|
||||||
// Copy with progress tracking
|
// Create/truncate local file
|
||||||
var reader io.Reader = result.Body
|
outFile, err := os.Create(localPath)
|
||||||
if progress != nil {
|
if err != nil {
|
||||||
reader = NewProgressReader(result.Body, size, progress)
|
return fmt.Errorf("failed to create local file: %w", err)
|
||||||
}
|
}
|
||||||
|
defer outFile.Close()
|
||||||
|
|
||||||
_, err = io.Copy(outFile, reader)
|
// Copy with progress tracking
|
||||||
if err != nil {
|
var reader io.Reader = result.Body
|
||||||
return fmt.Errorf("failed to write file: %w", err)
|
if progress != nil {
|
||||||
}
|
reader = NewProgressReader(result.Body, size, progress)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
_, err = io.Copy(outFile, reader)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to write file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, func(err error, duration time.Duration) {
|
||||||
|
fmt.Printf("[S3] Download retry in %v: %v\n", duration, err)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// List lists all backup files in S3
|
// List lists all backup files in S3
|
||||||
|
|||||||
@@ -217,14 +217,17 @@ func New() *Config {
|
|||||||
SingleDBName: getEnvString("SINGLE_DB_NAME", ""),
|
SingleDBName: getEnvString("SINGLE_DB_NAME", ""),
|
||||||
RestoreDBName: getEnvString("RESTORE_DB_NAME", ""),
|
RestoreDBName: getEnvString("RESTORE_DB_NAME", ""),
|
||||||
|
|
||||||
// Timeouts
|
// Timeouts - default 24 hours (1440 min) to handle very large databases with large objects
|
||||||
ClusterTimeoutMinutes: getEnvInt("CLUSTER_TIMEOUT_MIN", 240),
|
ClusterTimeoutMinutes: getEnvInt("CLUSTER_TIMEOUT_MIN", 1440),
|
||||||
|
|
||||||
// Cluster parallelism (default: 2 concurrent operations for faster cluster backup/restore)
|
// Cluster parallelism (default: 2 concurrent operations for faster cluster backup/restore)
|
||||||
ClusterParallelism: getEnvInt("CLUSTER_PARALLELISM", 2),
|
ClusterParallelism: getEnvInt("CLUSTER_PARALLELISM", 2),
|
||||||
|
|
||||||
|
// Working directory for large operations (default: system temp)
|
||||||
|
WorkDir: getEnvString("WORK_DIR", ""),
|
||||||
|
|
||||||
// Swap file management
|
// Swap file management
|
||||||
SwapFilePath: getEnvString("SWAP_FILE_PATH", "/tmp/dbbackup_swap"),
|
SwapFilePath: "", // Will be set after WorkDir is initialized
|
||||||
SwapFileSizeGB: getEnvInt("SWAP_FILE_SIZE_GB", 0), // 0 = disabled by default
|
SwapFileSizeGB: getEnvInt("SWAP_FILE_SIZE_GB", 0), // 0 = disabled by default
|
||||||
AutoSwap: getEnvBool("AUTO_SWAP", false),
|
AutoSwap: getEnvBool("AUTO_SWAP", false),
|
||||||
|
|
||||||
@@ -264,6 +267,13 @@ func New() *Config {
|
|||||||
cfg.SSLMode = "prefer"
|
cfg.SSLMode = "prefer"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set SwapFilePath using WorkDir if not explicitly set via env var
|
||||||
|
if envSwap := os.Getenv("SWAP_FILE_PATH"); envSwap != "" {
|
||||||
|
cfg.SwapFilePath = envSwap
|
||||||
|
} else {
|
||||||
|
cfg.SwapFilePath = filepath.Join(cfg.GetEffectiveWorkDir(), "dbbackup_swap")
|
||||||
|
}
|
||||||
|
|
||||||
return cfg
|
return cfg
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -499,6 +509,14 @@ func GetCurrentOSUser() string {
|
|||||||
return getCurrentUser()
|
return getCurrentUser()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetEffectiveWorkDir returns the configured WorkDir or system temp as fallback
|
||||||
|
func (c *Config) GetEffectiveWorkDir() string {
|
||||||
|
if c.WorkDir != "" {
|
||||||
|
return c.WorkDir
|
||||||
|
}
|
||||||
|
return os.TempDir()
|
||||||
|
}
|
||||||
|
|
||||||
func getDefaultBackupDir() string {
|
func getDefaultBackupDir() string {
|
||||||
// Try to create a sensible default backup directory
|
// Try to create a sensible default backup directory
|
||||||
homeDir, _ := os.UserHomeDir()
|
homeDir, _ := os.UserHomeDir()
|
||||||
@@ -516,7 +534,7 @@ func getDefaultBackupDir() string {
|
|||||||
return "/var/lib/pgsql/pg_backups"
|
return "/var/lib/pgsql/pg_backups"
|
||||||
}
|
}
|
||||||
|
|
||||||
return "/tmp/db_backups"
|
return filepath.Join(os.TempDir(), "db_backups")
|
||||||
}
|
}
|
||||||
|
|
||||||
// CPU-related helper functions
|
// CPU-related helper functions
|
||||||
|
|||||||
@@ -28,8 +28,9 @@ type LocalConfig struct {
|
|||||||
DumpJobs int
|
DumpJobs int
|
||||||
|
|
||||||
// Performance settings
|
// Performance settings
|
||||||
CPUWorkload string
|
CPUWorkload string
|
||||||
MaxCores int
|
MaxCores int
|
||||||
|
ClusterTimeout int // Cluster operation timeout in minutes (default: 1440 = 24 hours)
|
||||||
|
|
||||||
// Security settings
|
// Security settings
|
||||||
RetentionDays int
|
RetentionDays int
|
||||||
@@ -121,6 +122,10 @@ func LoadLocalConfig() (*LocalConfig, error) {
|
|||||||
if mc, err := strconv.Atoi(value); err == nil {
|
if mc, err := strconv.Atoi(value); err == nil {
|
||||||
cfg.MaxCores = mc
|
cfg.MaxCores = mc
|
||||||
}
|
}
|
||||||
|
case "cluster_timeout":
|
||||||
|
if ct, err := strconv.Atoi(value); err == nil {
|
||||||
|
cfg.ClusterTimeout = ct
|
||||||
|
}
|
||||||
}
|
}
|
||||||
case "security":
|
case "security":
|
||||||
switch key {
|
switch key {
|
||||||
@@ -199,6 +204,9 @@ func SaveLocalConfig(cfg *LocalConfig) error {
|
|||||||
if cfg.MaxCores != 0 {
|
if cfg.MaxCores != 0 {
|
||||||
sb.WriteString(fmt.Sprintf("max_cores = %d\n", cfg.MaxCores))
|
sb.WriteString(fmt.Sprintf("max_cores = %d\n", cfg.MaxCores))
|
||||||
}
|
}
|
||||||
|
if cfg.ClusterTimeout != 0 {
|
||||||
|
sb.WriteString(fmt.Sprintf("cluster_timeout = %d\n", cfg.ClusterTimeout))
|
||||||
|
}
|
||||||
sb.WriteString("\n")
|
sb.WriteString("\n")
|
||||||
|
|
||||||
// Security section
|
// Security section
|
||||||
@@ -268,6 +276,10 @@ func ApplyLocalConfig(cfg *Config, local *LocalConfig) {
|
|||||||
if local.MaxCores != 0 {
|
if local.MaxCores != 0 {
|
||||||
cfg.MaxCores = local.MaxCores
|
cfg.MaxCores = local.MaxCores
|
||||||
}
|
}
|
||||||
|
// Apply cluster timeout from config file (overrides default)
|
||||||
|
if local.ClusterTimeout != 0 {
|
||||||
|
cfg.ClusterTimeoutMinutes = local.ClusterTimeout
|
||||||
|
}
|
||||||
if cfg.RetentionDays == 30 && local.RetentionDays != 0 {
|
if cfg.RetentionDays == 30 && local.RetentionDays != 0 {
|
||||||
cfg.RetentionDays = local.RetentionDays
|
cfg.RetentionDays = local.RetentionDays
|
||||||
}
|
}
|
||||||
@@ -282,21 +294,22 @@ func ApplyLocalConfig(cfg *Config, local *LocalConfig) {
|
|||||||
// ConfigFromConfig creates a LocalConfig from a Config
|
// ConfigFromConfig creates a LocalConfig from a Config
|
||||||
func ConfigFromConfig(cfg *Config) *LocalConfig {
|
func ConfigFromConfig(cfg *Config) *LocalConfig {
|
||||||
return &LocalConfig{
|
return &LocalConfig{
|
||||||
DBType: cfg.DatabaseType,
|
DBType: cfg.DatabaseType,
|
||||||
Host: cfg.Host,
|
Host: cfg.Host,
|
||||||
Port: cfg.Port,
|
Port: cfg.Port,
|
||||||
User: cfg.User,
|
User: cfg.User,
|
||||||
Database: cfg.Database,
|
Database: cfg.Database,
|
||||||
SSLMode: cfg.SSLMode,
|
SSLMode: cfg.SSLMode,
|
||||||
BackupDir: cfg.BackupDir,
|
BackupDir: cfg.BackupDir,
|
||||||
WorkDir: cfg.WorkDir,
|
WorkDir: cfg.WorkDir,
|
||||||
Compression: cfg.CompressionLevel,
|
Compression: cfg.CompressionLevel,
|
||||||
Jobs: cfg.Jobs,
|
Jobs: cfg.Jobs,
|
||||||
DumpJobs: cfg.DumpJobs,
|
DumpJobs: cfg.DumpJobs,
|
||||||
CPUWorkload: cfg.CPUWorkloadType,
|
CPUWorkload: cfg.CPUWorkloadType,
|
||||||
MaxCores: cfg.MaxCores,
|
MaxCores: cfg.MaxCores,
|
||||||
RetentionDays: cfg.RetentionDays,
|
ClusterTimeout: cfg.ClusterTimeoutMinutes,
|
||||||
MinBackups: cfg.MinBackups,
|
RetentionDays: cfg.RetentionDays,
|
||||||
MaxRetries: cfg.MaxRetries,
|
MinBackups: cfg.MinBackups,
|
||||||
|
MaxRetries: cfg.MaxRetries,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ import (
|
|||||||
|
|
||||||
"github.com/jackc/pgx/v5/pgxpool"
|
"github.com/jackc/pgx/v5/pgxpool"
|
||||||
"github.com/jackc/pgx/v5/stdlib"
|
"github.com/jackc/pgx/v5/stdlib"
|
||||||
_ "github.com/jackc/pgx/v5/stdlib" // PostgreSQL driver (pgx)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// PostgreSQL implements Database interface for PostgreSQL
|
// PostgreSQL implements Database interface for PostgreSQL
|
||||||
|
|||||||
228
internal/dedup/chunker.go
Normal file
228
internal/dedup/chunker.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
// Package dedup provides content-defined chunking and deduplication
|
||||||
|
// for database backups, similar to restic/borgbackup but with native
|
||||||
|
// database dump support.
|
||||||
|
package dedup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Chunker constants for content-defined chunking
|
||||||
|
const (
|
||||||
|
// DefaultMinChunkSize is the minimum chunk size (4KB)
|
||||||
|
DefaultMinChunkSize = 4 * 1024
|
||||||
|
|
||||||
|
// DefaultAvgChunkSize is the target average chunk size (8KB)
|
||||||
|
DefaultAvgChunkSize = 8 * 1024
|
||||||
|
|
||||||
|
// DefaultMaxChunkSize is the maximum chunk size (32KB)
|
||||||
|
DefaultMaxChunkSize = 32 * 1024
|
||||||
|
|
||||||
|
// WindowSize for the rolling hash
|
||||||
|
WindowSize = 48
|
||||||
|
|
||||||
|
// ChunkMask determines average chunk size
|
||||||
|
// For 8KB average: we look for hash % 8192 == 0
|
||||||
|
ChunkMask = DefaultAvgChunkSize - 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// Gear hash table - random values for each byte
|
||||||
|
// This is used for the Gear rolling hash which is simpler and faster than Buzhash
|
||||||
|
var gearTable = [256]uint64{
|
||||||
|
0x5c95c078, 0x22408989, 0x2d48a214, 0x12842087, 0x530f8afb, 0x474536b9, 0x2963b4f1, 0x44cb738b,
|
||||||
|
0x4ea7403d, 0x4d606b6e, 0x074ec5d3, 0x3f7e82f4, 0x4e3d26e7, 0x5cb4e82f, 0x7b0a1ef5, 0x3d4e7c92,
|
||||||
|
0x2a81ed69, 0x7f853df8, 0x452c8cf7, 0x0f4f3c9d, 0x3a5e81b7, 0x6cb2d819, 0x2e4c5f93, 0x7e8a1c57,
|
||||||
|
0x1f9d3e8c, 0x4b7c2a5d, 0x3c8f1d6e, 0x5d2a7b4f, 0x6e9c3f8a, 0x7a4d1e5c, 0x2b8c4f7d, 0x4f7d2c9e,
|
||||||
|
0x5a1e3d7c, 0x6b4f8a2d, 0x3e7c9d5a, 0x7d2a4f8b, 0x4c9e7d3a, 0x5b8a1c6e, 0x2d5f4a9c, 0x7a3c8d6b,
|
||||||
|
0x6e2a7b4d, 0x3f8c5d9a, 0x4a7d3e5b, 0x5c9a2d7e, 0x7b4e8f3c, 0x2a6d9c5b, 0x3e4a7d8c, 0x5d7b2e9a,
|
||||||
|
0x4c8a3d7b, 0x6e9d5c8a, 0x7a3e4d9c, 0x2b5c8a7d, 0x4d7e3a9c, 0x5a9c7d3e, 0x3c8b5a7d, 0x7d4e9c2a,
|
||||||
|
0x6a3d8c5b, 0x4e7a9d3c, 0x5c2a7b9e, 0x3a9d4e7c, 0x7b8c5a2d, 0x2d7e4a9c, 0x4a3c9d7b, 0x5e9a7c3d,
|
||||||
|
0x6c4d8a5b, 0x3b7e9c4a, 0x7a5c2d8b, 0x4d9a3e7c, 0x5b7c4a9e, 0x2e8a5d3c, 0x3c9e7a4d, 0x7d4a8c5b,
|
||||||
|
0x6b2d9a7c, 0x4a8c3e5d, 0x5d7a9c2e, 0x3e4c7b9a, 0x7c9d5a4b, 0x2a7e8c3d, 0x4c5a9d7e, 0x5a3e7c4b,
|
||||||
|
0x6d8a2c9e, 0x3c7b4a8d, 0x7e2d9c5a, 0x4b9a7e3c, 0x5c4d8a7b, 0x2d9e3c5a, 0x3a7c9d4e, 0x7b5a4c8d,
|
||||||
|
0x6a9c2e7b, 0x4d3e8a9c, 0x5e7b4d2a, 0x3b9a7c5d, 0x7c4e8a3b, 0x2e7d9c4a, 0x4a8b3e7d, 0x5d2c9a7e,
|
||||||
|
0x6c7a5d3e, 0x3e9c4a7b, 0x7a8d2c5e, 0x4c3e9a7d, 0x5b9c7e2a, 0x2a4d7c9e, 0x3d8a5c4b, 0x7e7b9a3c,
|
||||||
|
0x6b4a8d9e, 0x4e9c3b7a, 0x5a7d4e9c, 0x3c2a8b7d, 0x7d9e5c4a, 0x2b8a7d3e, 0x4d5c9a2b, 0x5e3a7c8d,
|
||||||
|
0x6a9d4b7c, 0x3b7a9c5e, 0x7c4b8a2d, 0x4a9e7c3b, 0x5d2b9a4e, 0x2e7c4d9a, 0x3a9b7e4c, 0x7e5a3c8b,
|
||||||
|
0x6c8a9d4e, 0x4b7c2a5e, 0x5a3e9c7d, 0x3d9a4b7c, 0x7a2d5e9c, 0x2c8b7a3d, 0x4e9c5a2b, 0x5b4d7e9a,
|
||||||
|
0x6d7a3c8b, 0x3e2b9a5d, 0x7c9d4a7e, 0x4a5e3c9b, 0x5e7a9d2c, 0x2b3c7e9a, 0x3a9e4b7d, 0x7d8a5c3e,
|
||||||
|
0x6b9c2d4a, 0x4c7e9a3b, 0x5a2c8b7e, 0x3b4d9a5c, 0x7e9b3a4d, 0x2d5a7c9e, 0x4b8d3e7a, 0x5c9a4b2d,
|
||||||
|
0x6a7c8d9e, 0x3c9e5a7b, 0x7b4a2c9d, 0x4d3b7e9a, 0x5e9c4a3b, 0x2a7b9d4e, 0x3e5c8a7b, 0x7a9d3e5c,
|
||||||
|
0x6c2a7b8d, 0x4e9a5c3b, 0x5b7d2a9e, 0x3a4e9c7b, 0x7d8b3a5c, 0x2c9e7a4b, 0x4a3d5e9c, 0x5d7b8a2e,
|
||||||
|
0x6b9a4c7d, 0x3d5a9e4b, 0x7e2c7b9a, 0x4b9d3a5e, 0x5c4e7a9d, 0x2e8a3c7b, 0x3b7c9e5a, 0x7a4d8b3e,
|
||||||
|
0x6d9c5a2b, 0x4a7e3d9c, 0x5e2a9b7d, 0x3c9a7e4b, 0x7b3e5c9a, 0x2a4b8d7e, 0x4d9c2a5b, 0x5a7d9e3c,
|
||||||
|
0x6c3b8a7d, 0x3e9d4a5c, 0x7d5c2b9e, 0x4c8a7d3b, 0x5b9e3c7a, 0x2d7a9c4e, 0x3a5e7b9d, 0x7e8b4a3c,
|
||||||
|
0x6a2d9e7b, 0x4b3e5a9d, 0x5d9c7b2a, 0x3b7d4e9c, 0x7c9a3b5e, 0x2e5c8a7d, 0x4a7b9d3e, 0x5c3a7e9b,
|
||||||
|
0x6d9e5c4a, 0x3c4a7b9e, 0x7a9d2e5c, 0x4e7c9a3d, 0x5a8b4e7c, 0x2b9a3d7e, 0x3d5b8a9c, 0x7b4e9a2d,
|
||||||
|
0x6c7d3a9e, 0x4a9c5e3b, 0x5e2b7d9a, 0x3a8d4c7b, 0x7d3e9a5c, 0x2c7a8b9e, 0x4b5d3a7c, 0x5c9a7e2b,
|
||||||
|
0x6a4b9d3e, 0x3e7c2a9d, 0x7c8a5b4e, 0x4d9e3c7a, 0x5b3a9e7c, 0x2e9c7b4a, 0x3b4e8a9d, 0x7a9c4e3b,
|
||||||
|
0x6d2a7c9e, 0x4c8b9a5d, 0x5a9e2b7c, 0x3c3d7a9e, 0x7e5a9c4b, 0x2a8d3e7c, 0x4e7a5c9b, 0x5d9b8a2e,
|
||||||
|
0x6b4c9e7a, 0x3a9d5b4e, 0x7b2e8a9c, 0x4a5c3e9b, 0x5c9a4d7e, 0x2d7e9a3c, 0x3e8b7c5a, 0x7c9e2a4d,
|
||||||
|
0x6a3b7d9c, 0x4d9a8b3e, 0x5e5c2a7b, 0x3b4a9d7c, 0x7a7c5e9b, 0x2c9b4a8d, 0x4b3e7c9a, 0x5a9d3b7e,
|
||||||
|
0x6c8a4e9d, 0x3d7b9c5a, 0x7e2a4b9c, 0x4c9e5d3a, 0x5b7a9c4e, 0x2e4d8a7b, 0x3a9c7e5d, 0x7b8d3a9e,
|
||||||
|
0x6d5c9a4b, 0x4a2e7b9d, 0x5d9b4c8a, 0x3c7a9e2b, 0x7d4b8c9e, 0x2b9a5c4d, 0x4e7d3a9c, 0x5c8a9e7b,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chunk represents a single deduplicated chunk
|
||||||
|
type Chunk struct {
|
||||||
|
// Hash is the SHA-256 hash of the chunk data (content-addressed)
|
||||||
|
Hash string
|
||||||
|
|
||||||
|
// Data is the raw chunk bytes
|
||||||
|
Data []byte
|
||||||
|
|
||||||
|
// Offset is the byte offset in the original file
|
||||||
|
Offset int64
|
||||||
|
|
||||||
|
// Length is the size of this chunk
|
||||||
|
Length int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkerConfig holds configuration for the chunker
|
||||||
|
type ChunkerConfig struct {
|
||||||
|
MinSize int // Minimum chunk size
|
||||||
|
AvgSize int // Target average chunk size
|
||||||
|
MaxSize int // Maximum chunk size
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultChunkerConfig returns sensible defaults
|
||||||
|
func DefaultChunkerConfig() ChunkerConfig {
|
||||||
|
return ChunkerConfig{
|
||||||
|
MinSize: DefaultMinChunkSize,
|
||||||
|
AvgSize: DefaultAvgChunkSize,
|
||||||
|
MaxSize: DefaultMaxChunkSize,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chunker performs content-defined chunking using Gear hash
|
||||||
|
type Chunker struct {
|
||||||
|
reader io.Reader
|
||||||
|
config ChunkerConfig
|
||||||
|
|
||||||
|
// Rolling hash state
|
||||||
|
hash uint64
|
||||||
|
|
||||||
|
// Current chunk state
|
||||||
|
buf []byte
|
||||||
|
offset int64
|
||||||
|
mask uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChunker creates a new chunker for the given reader
|
||||||
|
func NewChunker(r io.Reader, config ChunkerConfig) *Chunker {
|
||||||
|
// Calculate mask for target average size
|
||||||
|
// We want: avg_size = 1 / P(boundary)
|
||||||
|
// With mask, P(boundary) = 1 / (mask + 1)
|
||||||
|
// So mask = avg_size - 1
|
||||||
|
mask := uint64(config.AvgSize - 1)
|
||||||
|
|
||||||
|
return &Chunker{
|
||||||
|
reader: r,
|
||||||
|
config: config,
|
||||||
|
buf: make([]byte, 0, config.MaxSize),
|
||||||
|
mask: mask,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next chunk from the input stream
|
||||||
|
// Returns io.EOF when no more data is available
|
||||||
|
func (c *Chunker) Next() (*Chunk, error) {
|
||||||
|
c.buf = c.buf[:0]
|
||||||
|
c.hash = 0
|
||||||
|
|
||||||
|
// Read bytes until we find a chunk boundary or hit max size
|
||||||
|
singleByte := make([]byte, 1)
|
||||||
|
|
||||||
|
for {
|
||||||
|
n, err := c.reader.Read(singleByte)
|
||||||
|
if n == 0 {
|
||||||
|
if err == io.EOF {
|
||||||
|
// Return remaining data as final chunk
|
||||||
|
if len(c.buf) > 0 {
|
||||||
|
return c.makeChunk(), nil
|
||||||
|
}
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
b := singleByte[0]
|
||||||
|
c.buf = append(c.buf, b)
|
||||||
|
|
||||||
|
// Update Gear rolling hash
|
||||||
|
// Gear hash: hash = (hash << 1) + gear_table[byte]
|
||||||
|
c.hash = (c.hash << 1) + gearTable[b]
|
||||||
|
|
||||||
|
// Check for chunk boundary after minimum size
|
||||||
|
if len(c.buf) >= c.config.MinSize {
|
||||||
|
// Check if we hit a boundary (hash matches mask pattern)
|
||||||
|
if (c.hash & c.mask) == 0 {
|
||||||
|
return c.makeChunk(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force boundary at max size
|
||||||
|
if len(c.buf) >= c.config.MaxSize {
|
||||||
|
return c.makeChunk(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeChunk creates a Chunk from the current buffer
|
||||||
|
func (c *Chunker) makeChunk() *Chunk {
|
||||||
|
// Compute SHA-256 hash
|
||||||
|
h := sha256.Sum256(c.buf)
|
||||||
|
hash := hex.EncodeToString(h[:])
|
||||||
|
|
||||||
|
// Copy data
|
||||||
|
data := make([]byte, len(c.buf))
|
||||||
|
copy(data, c.buf)
|
||||||
|
|
||||||
|
chunk := &Chunk{
|
||||||
|
Hash: hash,
|
||||||
|
Data: data,
|
||||||
|
Offset: c.offset,
|
||||||
|
Length: len(data),
|
||||||
|
}
|
||||||
|
|
||||||
|
c.offset += int64(len(data))
|
||||||
|
return chunk
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkReader splits a reader into content-defined chunks
|
||||||
|
// and returns them via a channel for concurrent processing
|
||||||
|
func ChunkReader(r io.Reader, config ChunkerConfig) (<-chan *Chunk, <-chan error) {
|
||||||
|
chunks := make(chan *Chunk, 100)
|
||||||
|
errs := make(chan error, 1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(chunks)
|
||||||
|
defer close(errs)
|
||||||
|
|
||||||
|
chunker := NewChunker(r, config)
|
||||||
|
for {
|
||||||
|
chunk, err := chunker.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
errs <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
chunks <- chunk
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return chunks, errs
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashData computes SHA-256 hash of data
|
||||||
|
func HashData(data []byte) string {
|
||||||
|
h := sha256.Sum256(data)
|
||||||
|
return hex.EncodeToString(h[:])
|
||||||
|
}
|
||||||
217
internal/dedup/chunker_test.go
Normal file
217
internal/dedup/chunker_test.go
Normal file
@@ -0,0 +1,217 @@
|
|||||||
|
package dedup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestChunker_Basic(t *testing.T) {
|
||||||
|
// Create test data
|
||||||
|
data := make([]byte, 100*1024) // 100KB
|
||||||
|
rand.Read(data)
|
||||||
|
|
||||||
|
chunker := NewChunker(bytes.NewReader(data), DefaultChunkerConfig())
|
||||||
|
|
||||||
|
var chunks []*Chunk
|
||||||
|
var totalBytes int
|
||||||
|
|
||||||
|
for {
|
||||||
|
chunk, err := chunker.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Chunker.Next() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunks = append(chunks, chunk)
|
||||||
|
totalBytes += chunk.Length
|
||||||
|
|
||||||
|
// Verify chunk properties
|
||||||
|
if chunk.Length < DefaultMinChunkSize && len(chunks) < 10 {
|
||||||
|
// Only the last chunk can be smaller than min
|
||||||
|
// (unless file is smaller than min)
|
||||||
|
}
|
||||||
|
if chunk.Length > DefaultMaxChunkSize {
|
||||||
|
t.Errorf("Chunk %d exceeds max size: %d > %d", len(chunks), chunk.Length, DefaultMaxChunkSize)
|
||||||
|
}
|
||||||
|
if chunk.Hash == "" {
|
||||||
|
t.Errorf("Chunk %d has empty hash", len(chunks))
|
||||||
|
}
|
||||||
|
if len(chunk.Hash) != 64 { // SHA-256 hex length
|
||||||
|
t.Errorf("Chunk %d has invalid hash length: %d", len(chunks), len(chunk.Hash))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if totalBytes != len(data) {
|
||||||
|
t.Errorf("Total bytes mismatch: got %d, want %d", totalBytes, len(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Chunked %d bytes into %d chunks", totalBytes, len(chunks))
|
||||||
|
t.Logf("Average chunk size: %d bytes", totalBytes/len(chunks))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChunker_Deterministic(t *testing.T) {
|
||||||
|
// Same data should produce same chunks
|
||||||
|
data := make([]byte, 50*1024)
|
||||||
|
rand.Read(data)
|
||||||
|
|
||||||
|
// First pass
|
||||||
|
chunker1 := NewChunker(bytes.NewReader(data), DefaultChunkerConfig())
|
||||||
|
var hashes1 []string
|
||||||
|
for {
|
||||||
|
chunk, err := chunker1.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
hashes1 = append(hashes1, chunk.Hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second pass
|
||||||
|
chunker2 := NewChunker(bytes.NewReader(data), DefaultChunkerConfig())
|
||||||
|
var hashes2 []string
|
||||||
|
for {
|
||||||
|
chunk, err := chunker2.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
hashes2 = append(hashes2, chunk.Hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare
|
||||||
|
if len(hashes1) != len(hashes2) {
|
||||||
|
t.Fatalf("Different chunk counts: %d vs %d", len(hashes1), len(hashes2))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range hashes1 {
|
||||||
|
if hashes1[i] != hashes2[i] {
|
||||||
|
t.Errorf("Hash mismatch at chunk %d: %s vs %s", i, hashes1[i], hashes2[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChunker_ShiftedData(t *testing.T) {
|
||||||
|
// Test that shifted data still shares chunks (the key CDC benefit)
|
||||||
|
original := make([]byte, 100*1024)
|
||||||
|
rand.Read(original)
|
||||||
|
|
||||||
|
// Create shifted version (prepend some bytes)
|
||||||
|
prefix := make([]byte, 1000)
|
||||||
|
rand.Read(prefix)
|
||||||
|
shifted := append(prefix, original...)
|
||||||
|
|
||||||
|
// Chunk both
|
||||||
|
config := DefaultChunkerConfig()
|
||||||
|
|
||||||
|
chunker1 := NewChunker(bytes.NewReader(original), config)
|
||||||
|
hashes1 := make(map[string]bool)
|
||||||
|
for {
|
||||||
|
chunk, err := chunker1.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
hashes1[chunk.Hash] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
chunker2 := NewChunker(bytes.NewReader(shifted), config)
|
||||||
|
var matched, total int
|
||||||
|
for {
|
||||||
|
chunk, err := chunker2.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
total++
|
||||||
|
if hashes1[chunk.Hash] {
|
||||||
|
matched++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should have significant overlap despite the shift
|
||||||
|
overlapRatio := float64(matched) / float64(total)
|
||||||
|
t.Logf("Chunk overlap after %d-byte shift: %.1f%% (%d/%d chunks)",
|
||||||
|
len(prefix), overlapRatio*100, matched, total)
|
||||||
|
|
||||||
|
// We expect at least 50% overlap for content-defined chunking
|
||||||
|
if overlapRatio < 0.5 {
|
||||||
|
t.Errorf("Low chunk overlap: %.1f%% (expected >50%%)", overlapRatio*100)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChunker_SmallFile(t *testing.T) {
|
||||||
|
// File smaller than min chunk size
|
||||||
|
data := []byte("hello world")
|
||||||
|
chunker := NewChunker(bytes.NewReader(data), DefaultChunkerConfig())
|
||||||
|
|
||||||
|
chunk, err := chunker.Next()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if chunk.Length != len(data) {
|
||||||
|
t.Errorf("Expected chunk length %d, got %d", len(data), chunk.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should be EOF after
|
||||||
|
_, err = chunker.Next()
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Errorf("Expected EOF, got %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChunker_EmptyFile(t *testing.T) {
|
||||||
|
chunker := NewChunker(bytes.NewReader(nil), DefaultChunkerConfig())
|
||||||
|
|
||||||
|
_, err := chunker.Next()
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Errorf("Expected EOF for empty file, got %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHashData(t *testing.T) {
|
||||||
|
hash := HashData([]byte("test"))
|
||||||
|
if len(hash) != 64 {
|
||||||
|
t.Errorf("Expected 64-char hash, got %d", len(hash))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Known SHA-256 of "test"
|
||||||
|
expected := "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08"
|
||||||
|
if hash != expected {
|
||||||
|
t.Errorf("Hash mismatch: got %s, want %s", hash, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkChunker(b *testing.B) {
|
||||||
|
// 1MB of random data
|
||||||
|
data := make([]byte, 1024*1024)
|
||||||
|
rand.Read(data)
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
b.SetBytes(int64(len(data)))
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
chunker := NewChunker(bytes.NewReader(data), DefaultChunkerConfig())
|
||||||
|
for {
|
||||||
|
_, err := chunker.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
306
internal/dedup/index.go
Normal file
306
internal/dedup/index.go
Normal file
@@ -0,0 +1,306 @@
|
|||||||
|
package dedup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
_ "github.com/mattn/go-sqlite3" // SQLite driver
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChunkIndex provides fast chunk lookups using SQLite
|
||||||
|
type ChunkIndex struct {
|
||||||
|
db *sql.DB
|
||||||
|
dbPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChunkIndex opens or creates a chunk index database at the default location
|
||||||
|
func NewChunkIndex(basePath string) (*ChunkIndex, error) {
|
||||||
|
dbPath := filepath.Join(basePath, "chunks.db")
|
||||||
|
return NewChunkIndexAt(dbPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChunkIndexAt opens or creates a chunk index database at a specific path
|
||||||
|
// Use this to put the SQLite index on local storage when chunks are on NFS/CIFS
|
||||||
|
func NewChunkIndexAt(dbPath string) (*ChunkIndex, error) {
|
||||||
|
// Ensure parent directory exists
|
||||||
|
if err := os.MkdirAll(filepath.Dir(dbPath), 0700); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create index directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add busy_timeout to handle lock contention gracefully
|
||||||
|
db, err := sql.Open("sqlite3", dbPath+"?_journal_mode=WAL&_synchronous=NORMAL&_busy_timeout=5000")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test the connection and check for locking issues
|
||||||
|
if err := db.Ping(); err != nil {
|
||||||
|
db.Close()
|
||||||
|
if isNFSLockingError(err) {
|
||||||
|
return nil, fmt.Errorf("database locked (common on NFS/CIFS): %w\n\n"+
|
||||||
|
"HINT: Use --index-db to put the SQLite index on local storage:\n"+
|
||||||
|
" dbbackup dedup ... --index-db /var/lib/dbbackup/dedup-index.db", err)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("failed to connect to chunk index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
idx := &ChunkIndex{db: db, dbPath: dbPath}
|
||||||
|
if err := idx.migrate(); err != nil {
|
||||||
|
db.Close()
|
||||||
|
if isNFSLockingError(err) {
|
||||||
|
return nil, fmt.Errorf("database locked during migration (common on NFS/CIFS): %w\n\n"+
|
||||||
|
"HINT: Use --index-db to put the SQLite index on local storage:\n"+
|
||||||
|
" dbbackup dedup ... --index-db /var/lib/dbbackup/dedup-index.db", err)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return idx, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isNFSLockingError checks if an error is likely due to NFS/CIFS locking issues
|
||||||
|
func isNFSLockingError(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
errStr := err.Error()
|
||||||
|
return strings.Contains(errStr, "database is locked") ||
|
||||||
|
strings.Contains(errStr, "SQLITE_BUSY") ||
|
||||||
|
strings.Contains(errStr, "cannot lock") ||
|
||||||
|
strings.Contains(errStr, "lock protocol")
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrate creates the schema if needed
|
||||||
|
func (idx *ChunkIndex) migrate() error {
|
||||||
|
schema := `
|
||||||
|
CREATE TABLE IF NOT EXISTS chunks (
|
||||||
|
hash TEXT PRIMARY KEY,
|
||||||
|
size_raw INTEGER NOT NULL,
|
||||||
|
size_stored INTEGER NOT NULL,
|
||||||
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
last_accessed DATETIME,
|
||||||
|
ref_count INTEGER DEFAULT 1
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS manifests (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
database_type TEXT,
|
||||||
|
database_name TEXT,
|
||||||
|
database_host TEXT,
|
||||||
|
created_at DATETIME,
|
||||||
|
original_size INTEGER,
|
||||||
|
stored_size INTEGER,
|
||||||
|
chunk_count INTEGER,
|
||||||
|
new_chunks INTEGER,
|
||||||
|
dedup_ratio REAL,
|
||||||
|
sha256 TEXT,
|
||||||
|
verified_at DATETIME
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_chunks_created ON chunks(created_at);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_chunks_accessed ON chunks(last_accessed);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_manifests_created ON manifests(created_at);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_manifests_database ON manifests(database_name);
|
||||||
|
`
|
||||||
|
|
||||||
|
_, err := idx.db.Exec(schema)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the database
|
||||||
|
func (idx *ChunkIndex) Close() error {
|
||||||
|
return idx.db.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddChunk records a chunk in the index
|
||||||
|
func (idx *ChunkIndex) AddChunk(hash string, sizeRaw, sizeStored int) error {
|
||||||
|
_, err := idx.db.Exec(`
|
||||||
|
INSERT INTO chunks (hash, size_raw, size_stored, created_at, last_accessed, ref_count)
|
||||||
|
VALUES (?, ?, ?, ?, ?, 1)
|
||||||
|
ON CONFLICT(hash) DO UPDATE SET
|
||||||
|
ref_count = ref_count + 1,
|
||||||
|
last_accessed = ?
|
||||||
|
`, hash, sizeRaw, sizeStored, time.Now(), time.Now(), time.Now())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasChunk checks if a chunk exists in the index
|
||||||
|
func (idx *ChunkIndex) HasChunk(hash string) (bool, error) {
|
||||||
|
var count int
|
||||||
|
err := idx.db.QueryRow("SELECT COUNT(*) FROM chunks WHERE hash = ?", hash).Scan(&count)
|
||||||
|
return count > 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetChunk retrieves chunk metadata
|
||||||
|
func (idx *ChunkIndex) GetChunk(hash string) (*ChunkMeta, error) {
|
||||||
|
var m ChunkMeta
|
||||||
|
err := idx.db.QueryRow(`
|
||||||
|
SELECT hash, size_raw, size_stored, created_at, ref_count
|
||||||
|
FROM chunks WHERE hash = ?
|
||||||
|
`, hash).Scan(&m.Hash, &m.SizeRaw, &m.SizeStored, &m.CreatedAt, &m.RefCount)
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkMeta holds metadata about a chunk
|
||||||
|
type ChunkMeta struct {
|
||||||
|
Hash string
|
||||||
|
SizeRaw int64
|
||||||
|
SizeStored int64
|
||||||
|
CreatedAt time.Time
|
||||||
|
RefCount int
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecrementRef decreases the reference count for a chunk
|
||||||
|
// Returns true if the chunk should be deleted (ref_count <= 0)
|
||||||
|
func (idx *ChunkIndex) DecrementRef(hash string) (shouldDelete bool, err error) {
|
||||||
|
result, err := idx.db.Exec(`
|
||||||
|
UPDATE chunks SET ref_count = ref_count - 1 WHERE hash = ?
|
||||||
|
`, hash)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
affected, _ := result.RowsAffected()
|
||||||
|
if affected == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var refCount int
|
||||||
|
err = idx.db.QueryRow("SELECT ref_count FROM chunks WHERE hash = ?", hash).Scan(&refCount)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return refCount <= 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveChunk removes a chunk from the index
|
||||||
|
func (idx *ChunkIndex) RemoveChunk(hash string) error {
|
||||||
|
_, err := idx.db.Exec("DELETE FROM chunks WHERE hash = ?", hash)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddManifest records a manifest in the index
|
||||||
|
func (idx *ChunkIndex) AddManifest(m *Manifest) error {
|
||||||
|
_, err := idx.db.Exec(`
|
||||||
|
INSERT OR REPLACE INTO manifests
|
||||||
|
(id, database_type, database_name, database_host, created_at,
|
||||||
|
original_size, stored_size, chunk_count, new_chunks, dedup_ratio, sha256)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
|
`, m.ID, m.DatabaseType, m.DatabaseName, m.DatabaseHost, m.CreatedAt,
|
||||||
|
m.OriginalSize, m.StoredSize, m.ChunkCount, m.NewChunks, m.DedupRatio, m.SHA256)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveManifest removes a manifest from the index
|
||||||
|
func (idx *ChunkIndex) RemoveManifest(id string) error {
|
||||||
|
_, err := idx.db.Exec("DELETE FROM manifests WHERE id = ?", id)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateManifestVerified updates the verified timestamp for a manifest
|
||||||
|
func (idx *ChunkIndex) UpdateManifestVerified(id string, verifiedAt time.Time) error {
|
||||||
|
_, err := idx.db.Exec("UPDATE manifests SET verified_at = ? WHERE id = ?", verifiedAt, id)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexStats holds statistics about the dedup index
|
||||||
|
type IndexStats struct {
|
||||||
|
TotalChunks int64
|
||||||
|
TotalManifests int64
|
||||||
|
TotalSizeRaw int64 // Uncompressed, undeduplicated (per-chunk)
|
||||||
|
TotalSizeStored int64 // On-disk after dedup+compression (per-chunk)
|
||||||
|
DedupRatio float64 // Based on manifests (real dedup ratio)
|
||||||
|
OldestChunk time.Time
|
||||||
|
NewestChunk time.Time
|
||||||
|
|
||||||
|
// Manifest-based stats (accurate dedup calculation)
|
||||||
|
TotalBackupSize int64 // Sum of all backup original sizes
|
||||||
|
TotalNewData int64 // Sum of all new chunks stored
|
||||||
|
SpaceSaved int64 // Difference = what dedup saved
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats returns statistics about the index
|
||||||
|
func (idx *ChunkIndex) Stats() (*IndexStats, error) {
|
||||||
|
stats := &IndexStats{}
|
||||||
|
|
||||||
|
var oldestStr, newestStr string
|
||||||
|
err := idx.db.QueryRow(`
|
||||||
|
SELECT
|
||||||
|
COUNT(*),
|
||||||
|
COALESCE(SUM(size_raw), 0),
|
||||||
|
COALESCE(SUM(size_stored), 0),
|
||||||
|
COALESCE(MIN(created_at), ''),
|
||||||
|
COALESCE(MAX(created_at), '')
|
||||||
|
FROM chunks
|
||||||
|
`).Scan(&stats.TotalChunks, &stats.TotalSizeRaw, &stats.TotalSizeStored,
|
||||||
|
&oldestStr, &newestStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse time strings
|
||||||
|
if oldestStr != "" {
|
||||||
|
stats.OldestChunk, _ = time.Parse("2006-01-02 15:04:05", oldestStr)
|
||||||
|
}
|
||||||
|
if newestStr != "" {
|
||||||
|
stats.NewestChunk, _ = time.Parse("2006-01-02 15:04:05", newestStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
idx.db.QueryRow("SELECT COUNT(*) FROM manifests").Scan(&stats.TotalManifests)
|
||||||
|
|
||||||
|
// Calculate accurate dedup ratio from manifests
|
||||||
|
// Sum all backup original sizes and all new data stored
|
||||||
|
err = idx.db.QueryRow(`
|
||||||
|
SELECT
|
||||||
|
COALESCE(SUM(original_size), 0),
|
||||||
|
COALESCE(SUM(stored_size), 0)
|
||||||
|
FROM manifests
|
||||||
|
`).Scan(&stats.TotalBackupSize, &stats.TotalNewData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate real dedup ratio: how much data was deduplicated across all backups
|
||||||
|
if stats.TotalBackupSize > 0 {
|
||||||
|
stats.DedupRatio = 1.0 - float64(stats.TotalNewData)/float64(stats.TotalBackupSize)
|
||||||
|
stats.SpaceSaved = stats.TotalBackupSize - stats.TotalNewData
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListOrphanedChunks returns chunks that have ref_count <= 0
|
||||||
|
func (idx *ChunkIndex) ListOrphanedChunks() ([]string, error) {
|
||||||
|
rows, err := idx.db.Query("SELECT hash FROM chunks WHERE ref_count <= 0")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var hashes []string
|
||||||
|
for rows.Next() {
|
||||||
|
var hash string
|
||||||
|
if err := rows.Scan(&hash); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
}
|
||||||
|
return hashes, rows.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Vacuum cleans up the database
|
||||||
|
func (idx *ChunkIndex) Vacuum() error {
|
||||||
|
_, err := idx.db.Exec("VACUUM")
|
||||||
|
return err
|
||||||
|
}
|
||||||
189
internal/dedup/manifest.go
Normal file
189
internal/dedup/manifest.go
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
package dedup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Manifest describes a single backup as a list of chunks
|
||||||
|
type Manifest struct {
|
||||||
|
// ID is the unique identifier (typically timestamp-based)
|
||||||
|
ID string `json:"id"`
|
||||||
|
|
||||||
|
// Name is an optional human-readable name
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
|
||||||
|
// CreatedAt is when this backup was created
|
||||||
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
|
||||||
|
// Database information
|
||||||
|
DatabaseType string `json:"database_type"` // postgres, mysql
|
||||||
|
DatabaseName string `json:"database_name"`
|
||||||
|
DatabaseHost string `json:"database_host"`
|
||||||
|
|
||||||
|
// Chunks is the ordered list of chunk hashes
|
||||||
|
// The file is reconstructed by concatenating chunks in order
|
||||||
|
Chunks []ChunkRef `json:"chunks"`
|
||||||
|
|
||||||
|
// Stats about the backup
|
||||||
|
OriginalSize int64 `json:"original_size"` // Size before deduplication
|
||||||
|
StoredSize int64 `json:"stored_size"` // Size after dedup (new chunks only)
|
||||||
|
ChunkCount int `json:"chunk_count"` // Total chunks
|
||||||
|
NewChunks int `json:"new_chunks"` // Chunks that weren't deduplicated
|
||||||
|
DedupRatio float64 `json:"dedup_ratio"` // 1.0 = no dedup, 0.0 = 100% dedup
|
||||||
|
|
||||||
|
// Encryption and compression settings used
|
||||||
|
Encrypted bool `json:"encrypted"`
|
||||||
|
Compressed bool `json:"compressed"`
|
||||||
|
Decompressed bool `json:"decompressed,omitempty"` // Input was auto-decompressed before chunking
|
||||||
|
|
||||||
|
// Verification
|
||||||
|
SHA256 string `json:"sha256"` // Hash of reconstructed file
|
||||||
|
VerifiedAt time.Time `json:"verified_at,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkRef references a chunk in the manifest
|
||||||
|
type ChunkRef struct {
|
||||||
|
Hash string `json:"h"` // SHA-256 hash (64 chars)
|
||||||
|
Offset int64 `json:"o"` // Offset in original file
|
||||||
|
Length int `json:"l"` // Chunk length
|
||||||
|
}
|
||||||
|
|
||||||
|
// ManifestStore manages backup manifests
|
||||||
|
type ManifestStore struct {
|
||||||
|
basePath string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewManifestStore creates a new manifest store
|
||||||
|
func NewManifestStore(basePath string) (*ManifestStore, error) {
|
||||||
|
manifestDir := filepath.Join(basePath, "manifests")
|
||||||
|
if err := os.MkdirAll(manifestDir, 0700); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create manifest directory: %w", err)
|
||||||
|
}
|
||||||
|
return &ManifestStore{basePath: basePath}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// manifestPath returns the path for a manifest ID
|
||||||
|
func (s *ManifestStore) manifestPath(id string) string {
|
||||||
|
return filepath.Join(s.basePath, "manifests", id+".manifest.json")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save writes a manifest to disk
|
||||||
|
func (s *ManifestStore) Save(m *Manifest) error {
|
||||||
|
path := s.manifestPath(m.ID)
|
||||||
|
|
||||||
|
data, err := json.MarshalIndent(m, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Atomic write
|
||||||
|
tmpPath := path + ".tmp"
|
||||||
|
if err := os.WriteFile(tmpPath, data, 0600); err != nil {
|
||||||
|
return fmt.Errorf("failed to write manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Rename(tmpPath, path); err != nil {
|
||||||
|
os.Remove(tmpPath)
|
||||||
|
return fmt.Errorf("failed to commit manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load reads a manifest from disk
|
||||||
|
func (s *ManifestStore) Load(id string) (*Manifest, error) {
|
||||||
|
path := s.manifestPath(id)
|
||||||
|
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read manifest %s: %w", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var m Manifest
|
||||||
|
if err := json.Unmarshal(data, &m); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse manifest %s: %w", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes a manifest
|
||||||
|
func (s *ManifestStore) Delete(id string) error {
|
||||||
|
path := s.manifestPath(id)
|
||||||
|
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("failed to delete manifest %s: %w", id, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns all manifest IDs
|
||||||
|
func (s *ManifestStore) List() ([]string, error) {
|
||||||
|
manifestDir := filepath.Join(s.basePath, "manifests")
|
||||||
|
entries, err := os.ReadDir(manifestDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to list manifests: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var ids []string
|
||||||
|
for _, e := range entries {
|
||||||
|
if e.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := e.Name()
|
||||||
|
if len(name) > 14 && name[len(name)-14:] == ".manifest.json" {
|
||||||
|
ids = append(ids, name[:len(name)-14])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListAll returns all manifests sorted by creation time (newest first)
|
||||||
|
func (s *ManifestStore) ListAll() ([]*Manifest, error) {
|
||||||
|
ids, err := s.List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var manifests []*Manifest
|
||||||
|
for _, id := range ids {
|
||||||
|
m, err := s.Load(id)
|
||||||
|
if err != nil {
|
||||||
|
continue // Skip corrupted manifests
|
||||||
|
}
|
||||||
|
manifests = append(manifests, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by creation time (newest first)
|
||||||
|
for i := 0; i < len(manifests)-1; i++ {
|
||||||
|
for j := i + 1; j < len(manifests); j++ {
|
||||||
|
if manifests[j].CreatedAt.After(manifests[i].CreatedAt) {
|
||||||
|
manifests[i], manifests[j] = manifests[j], manifests[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return manifests, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetChunkHashes returns all unique chunk hashes referenced by manifests
|
||||||
|
func (s *ManifestStore) GetChunkHashes() (map[string]int, error) {
|
||||||
|
manifests, err := s.ListAll()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map hash -> reference count
|
||||||
|
refs := make(map[string]int)
|
||||||
|
for _, m := range manifests {
|
||||||
|
for _, c := range m.Chunks {
|
||||||
|
refs[c.Hash]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return refs, nil
|
||||||
|
}
|
||||||
235
internal/dedup/metrics.go
Normal file
235
internal/dedup/metrics.go
Normal file
@@ -0,0 +1,235 @@
|
|||||||
|
package dedup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DedupMetrics holds deduplication statistics for Prometheus
|
||||||
|
type DedupMetrics struct {
|
||||||
|
// Global stats
|
||||||
|
TotalChunks int64
|
||||||
|
TotalManifests int64
|
||||||
|
TotalBackupSize int64 // Sum of all backup original sizes
|
||||||
|
TotalNewData int64 // Sum of all new chunks stored
|
||||||
|
SpaceSaved int64 // Bytes saved by deduplication
|
||||||
|
DedupRatio float64 // Overall dedup ratio (0-1)
|
||||||
|
DiskUsage int64 // Actual bytes on disk
|
||||||
|
|
||||||
|
// Per-database stats
|
||||||
|
ByDatabase map[string]*DatabaseDedupMetrics
|
||||||
|
}
|
||||||
|
|
||||||
|
// DatabaseDedupMetrics holds per-database dedup stats
|
||||||
|
type DatabaseDedupMetrics struct {
|
||||||
|
Database string
|
||||||
|
BackupCount int
|
||||||
|
TotalSize int64
|
||||||
|
StoredSize int64
|
||||||
|
DedupRatio float64
|
||||||
|
LastBackupTime time.Time
|
||||||
|
LastVerified time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectMetrics gathers dedup statistics from the index and store
|
||||||
|
func CollectMetrics(basePath string, indexPath string) (*DedupMetrics, error) {
|
||||||
|
var idx *ChunkIndex
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if indexPath != "" {
|
||||||
|
idx, err = NewChunkIndexAt(indexPath)
|
||||||
|
} else {
|
||||||
|
idx, err = NewChunkIndex(basePath)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer idx.Close()
|
||||||
|
|
||||||
|
store, err := NewChunkStore(StoreConfig{BasePath: basePath})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get index stats
|
||||||
|
stats, err := idx.Stats()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get index stats: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get store stats
|
||||||
|
storeStats, err := store.Stats()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get store stats: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics := &DedupMetrics{
|
||||||
|
TotalChunks: stats.TotalChunks,
|
||||||
|
TotalManifests: stats.TotalManifests,
|
||||||
|
TotalBackupSize: stats.TotalBackupSize,
|
||||||
|
TotalNewData: stats.TotalNewData,
|
||||||
|
SpaceSaved: stats.SpaceSaved,
|
||||||
|
DedupRatio: stats.DedupRatio,
|
||||||
|
DiskUsage: storeStats.TotalSize,
|
||||||
|
ByDatabase: make(map[string]*DatabaseDedupMetrics),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect per-database metrics from manifest store
|
||||||
|
manifestStore, err := NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return metrics, nil // Return partial metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
manifests, err := manifestStore.ListAll()
|
||||||
|
if err != nil {
|
||||||
|
return metrics, nil // Return partial metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range manifests {
|
||||||
|
dbKey := m.DatabaseName
|
||||||
|
if dbKey == "" {
|
||||||
|
dbKey = "_default"
|
||||||
|
}
|
||||||
|
|
||||||
|
dbMetrics, ok := metrics.ByDatabase[dbKey]
|
||||||
|
if !ok {
|
||||||
|
dbMetrics = &DatabaseDedupMetrics{
|
||||||
|
Database: dbKey,
|
||||||
|
}
|
||||||
|
metrics.ByDatabase[dbKey] = dbMetrics
|
||||||
|
}
|
||||||
|
|
||||||
|
dbMetrics.BackupCount++
|
||||||
|
dbMetrics.TotalSize += m.OriginalSize
|
||||||
|
dbMetrics.StoredSize += m.StoredSize
|
||||||
|
|
||||||
|
if m.CreatedAt.After(dbMetrics.LastBackupTime) {
|
||||||
|
dbMetrics.LastBackupTime = m.CreatedAt
|
||||||
|
}
|
||||||
|
if !m.VerifiedAt.IsZero() && m.VerifiedAt.After(dbMetrics.LastVerified) {
|
||||||
|
dbMetrics.LastVerified = m.VerifiedAt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate per-database dedup ratios
|
||||||
|
for _, dbMetrics := range metrics.ByDatabase {
|
||||||
|
if dbMetrics.TotalSize > 0 {
|
||||||
|
dbMetrics.DedupRatio = 1.0 - float64(dbMetrics.StoredSize)/float64(dbMetrics.TotalSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return metrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WritePrometheusTextfile writes dedup metrics in Prometheus format
|
||||||
|
func WritePrometheusTextfile(path string, instance string, basePath string, indexPath string) error {
|
||||||
|
metrics, err := CollectMetrics(basePath, indexPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
output := FormatPrometheusMetrics(metrics, instance)
|
||||||
|
|
||||||
|
// Atomic write
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to create directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpPath := path + ".tmp"
|
||||||
|
if err := os.WriteFile(tmpPath, []byte(output), 0644); err != nil {
|
||||||
|
return fmt.Errorf("failed to write temp file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Rename(tmpPath, path); err != nil {
|
||||||
|
os.Remove(tmpPath)
|
||||||
|
return fmt.Errorf("failed to rename temp file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatPrometheusMetrics formats dedup metrics in Prometheus exposition format
|
||||||
|
func FormatPrometheusMetrics(m *DedupMetrics, instance string) string {
|
||||||
|
var b strings.Builder
|
||||||
|
now := time.Now().Unix()
|
||||||
|
|
||||||
|
b.WriteString("# DBBackup Deduplication Prometheus Metrics\n")
|
||||||
|
b.WriteString(fmt.Sprintf("# Generated at: %s\n", time.Now().Format(time.RFC3339)))
|
||||||
|
b.WriteString(fmt.Sprintf("# Instance: %s\n", instance))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
// Global dedup metrics
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_chunks_total Total number of unique chunks stored\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_chunks_total gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_chunks_total{instance=%q} %d\n", instance, m.TotalChunks))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_manifests_total Total number of deduplicated backups\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_manifests_total gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_manifests_total{instance=%q} %d\n", instance, m.TotalManifests))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_backup_bytes_total Total logical size of all backups in bytes\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_backup_bytes_total gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_backup_bytes_total{instance=%q} %d\n", instance, m.TotalBackupSize))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_stored_bytes_total Total unique data stored in bytes (after dedup)\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_stored_bytes_total gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_stored_bytes_total{instance=%q} %d\n", instance, m.TotalNewData))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_space_saved_bytes Bytes saved by deduplication\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_space_saved_bytes gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_space_saved_bytes{instance=%q} %d\n", instance, m.SpaceSaved))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_ratio Deduplication ratio (0-1, higher is better)\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_ratio gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_ratio{instance=%q} %.4f\n", instance, m.DedupRatio))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_disk_usage_bytes Actual disk usage of chunk store\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_disk_usage_bytes gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_disk_usage_bytes{instance=%q} %d\n", instance, m.DiskUsage))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
// Per-database metrics
|
||||||
|
if len(m.ByDatabase) > 0 {
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_database_backup_count Number of deduplicated backups per database\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_database_backup_count gauge\n")
|
||||||
|
for _, db := range m.ByDatabase {
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_database_backup_count{instance=%q,database=%q} %d\n",
|
||||||
|
instance, db.Database, db.BackupCount))
|
||||||
|
}
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_database_ratio Deduplication ratio per database (0-1)\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_database_ratio gauge\n")
|
||||||
|
for _, db := range m.ByDatabase {
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_database_ratio{instance=%q,database=%q} %.4f\n",
|
||||||
|
instance, db.Database, db.DedupRatio))
|
||||||
|
}
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_database_last_backup_timestamp Last backup timestamp per database\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_database_last_backup_timestamp gauge\n")
|
||||||
|
for _, db := range m.ByDatabase {
|
||||||
|
if !db.LastBackupTime.IsZero() {
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_database_last_backup_timestamp{instance=%q,database=%q} %d\n",
|
||||||
|
instance, db.Database, db.LastBackupTime.Unix()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.WriteString("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_scrape_timestamp Unix timestamp when dedup metrics were collected\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_scrape_timestamp gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_scrape_timestamp{instance=%q} %d\n", instance, now))
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
367
internal/dedup/store.go
Normal file
367
internal/dedup/store.go
Normal file
@@ -0,0 +1,367 @@
|
|||||||
|
package dedup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"crypto/aes"
|
||||||
|
"crypto/cipher"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChunkStore manages content-addressed chunk storage
|
||||||
|
// Chunks are stored as: <base>/<prefix>/<hash>.chunk[.gz][.enc]
|
||||||
|
type ChunkStore struct {
|
||||||
|
basePath string
|
||||||
|
compress bool
|
||||||
|
encryptionKey []byte // 32 bytes for AES-256
|
||||||
|
mu sync.RWMutex
|
||||||
|
existingChunks map[string]bool // Cache of known chunks
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreConfig holds configuration for the chunk store
|
||||||
|
type StoreConfig struct {
|
||||||
|
BasePath string
|
||||||
|
Compress bool // Enable gzip compression
|
||||||
|
EncryptionKey string // Optional: hex-encoded 32-byte key for AES-256-GCM
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChunkStore creates a new chunk store
|
||||||
|
func NewChunkStore(config StoreConfig) (*ChunkStore, error) {
|
||||||
|
store := &ChunkStore{
|
||||||
|
basePath: config.BasePath,
|
||||||
|
compress: config.Compress,
|
||||||
|
existingChunks: make(map[string]bool),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse encryption key if provided
|
||||||
|
if config.EncryptionKey != "" {
|
||||||
|
key, err := hex.DecodeString(config.EncryptionKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid encryption key: %w", err)
|
||||||
|
}
|
||||||
|
if len(key) != 32 {
|
||||||
|
return nil, fmt.Errorf("encryption key must be 32 bytes (got %d)", len(key))
|
||||||
|
}
|
||||||
|
store.encryptionKey = key
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create base directory structure
|
||||||
|
if err := os.MkdirAll(config.BasePath, 0700); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create chunks and manifests directories
|
||||||
|
for _, dir := range []string{"chunks", "manifests"} {
|
||||||
|
if err := os.MkdirAll(filepath.Join(config.BasePath, dir), 0700); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create %s directory: %w", dir, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return store, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// chunkPath returns the filesystem path for a chunk hash
|
||||||
|
// Uses 2-character prefix for directory sharding (256 subdirs)
|
||||||
|
func (s *ChunkStore) chunkPath(hash string) string {
|
||||||
|
if len(hash) < 2 {
|
||||||
|
return filepath.Join(s.basePath, "chunks", "xx", hash+s.chunkExt())
|
||||||
|
}
|
||||||
|
prefix := hash[:2]
|
||||||
|
return filepath.Join(s.basePath, "chunks", prefix, hash+s.chunkExt())
|
||||||
|
}
|
||||||
|
|
||||||
|
// chunkExt returns the file extension based on compression/encryption settings
|
||||||
|
func (s *ChunkStore) chunkExt() string {
|
||||||
|
ext := ".chunk"
|
||||||
|
if s.compress {
|
||||||
|
ext += ".gz"
|
||||||
|
}
|
||||||
|
if s.encryptionKey != nil {
|
||||||
|
ext += ".enc"
|
||||||
|
}
|
||||||
|
return ext
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has checks if a chunk exists in the store
|
||||||
|
func (s *ChunkStore) Has(hash string) bool {
|
||||||
|
s.mu.RLock()
|
||||||
|
if exists, ok := s.existingChunks[hash]; ok {
|
||||||
|
s.mu.RUnlock()
|
||||||
|
return exists
|
||||||
|
}
|
||||||
|
s.mu.RUnlock()
|
||||||
|
|
||||||
|
// Check filesystem
|
||||||
|
path := s.chunkPath(hash)
|
||||||
|
_, err := os.Stat(path)
|
||||||
|
exists := err == nil
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
s.existingChunks[hash] = exists
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
return exists
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put stores a chunk, returning true if it was new (not deduplicated)
|
||||||
|
func (s *ChunkStore) Put(chunk *Chunk) (isNew bool, err error) {
|
||||||
|
// Check if already exists (deduplication!)
|
||||||
|
if s.Has(chunk.Hash) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
path := s.chunkPath(chunk.Hash)
|
||||||
|
|
||||||
|
// Create prefix directory
|
||||||
|
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
|
||||||
|
return false, fmt.Errorf("failed to create chunk directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare data
|
||||||
|
data := chunk.Data
|
||||||
|
|
||||||
|
// Compress if enabled
|
||||||
|
if s.compress {
|
||||||
|
data, err = s.compressData(data)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("compression failed: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypt if enabled
|
||||||
|
if s.encryptionKey != nil {
|
||||||
|
data, err = s.encryptData(data)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("encryption failed: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write atomically (write to temp, then rename)
|
||||||
|
tmpPath := path + ".tmp"
|
||||||
|
if err := os.WriteFile(tmpPath, data, 0600); err != nil {
|
||||||
|
return false, fmt.Errorf("failed to write chunk: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Rename(tmpPath, path); err != nil {
|
||||||
|
os.Remove(tmpPath)
|
||||||
|
return false, fmt.Errorf("failed to commit chunk: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update cache
|
||||||
|
s.mu.Lock()
|
||||||
|
s.existingChunks[chunk.Hash] = true
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retrieves a chunk by hash
|
||||||
|
func (s *ChunkStore) Get(hash string) (*Chunk, error) {
|
||||||
|
path := s.chunkPath(hash)
|
||||||
|
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read chunk %s: %w", hash, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt if encrypted
|
||||||
|
if s.encryptionKey != nil {
|
||||||
|
data, err = s.decryptData(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("decryption failed: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decompress if compressed
|
||||||
|
if s.compress {
|
||||||
|
data, err = s.decompressData(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("decompression failed: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify hash
|
||||||
|
h := sha256.Sum256(data)
|
||||||
|
actualHash := hex.EncodeToString(h[:])
|
||||||
|
if actualHash != hash {
|
||||||
|
return nil, fmt.Errorf("chunk hash mismatch: expected %s, got %s", hash, actualHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Chunk{
|
||||||
|
Hash: hash,
|
||||||
|
Data: data,
|
||||||
|
Length: len(data),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes a chunk from the store
|
||||||
|
func (s *ChunkStore) Delete(hash string) error {
|
||||||
|
path := s.chunkPath(hash)
|
||||||
|
|
||||||
|
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("failed to delete chunk %s: %w", hash, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
delete(s.existingChunks, hash)
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats returns storage statistics
|
||||||
|
type StoreStats struct {
|
||||||
|
TotalChunks int64
|
||||||
|
TotalSize int64 // Bytes on disk (after compression/encryption)
|
||||||
|
UniqueSize int64 // Bytes of unique data
|
||||||
|
Directories int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats returns statistics about the chunk store
|
||||||
|
func (s *ChunkStore) Stats() (*StoreStats, error) {
|
||||||
|
stats := &StoreStats{}
|
||||||
|
|
||||||
|
chunksDir := filepath.Join(s.basePath, "chunks")
|
||||||
|
err := filepath.Walk(chunksDir, func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if info.IsDir() {
|
||||||
|
stats.Directories++
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
stats.TotalChunks++
|
||||||
|
stats.TotalSize += info.Size()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
return stats, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadIndex loads the existing chunk hashes into memory
|
||||||
|
func (s *ChunkStore) LoadIndex() error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
s.existingChunks = make(map[string]bool)
|
||||||
|
|
||||||
|
chunksDir := filepath.Join(s.basePath, "chunks")
|
||||||
|
return filepath.Walk(chunksDir, func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil || info.IsDir() {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract hash from filename
|
||||||
|
base := filepath.Base(path)
|
||||||
|
hash := base
|
||||||
|
// Remove extensions
|
||||||
|
for _, ext := range []string{".enc", ".gz", ".chunk"} {
|
||||||
|
if len(hash) > len(ext) && hash[len(hash)-len(ext):] == ext {
|
||||||
|
hash = hash[:len(hash)-len(ext)]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(hash) == 64 { // SHA-256 hex length
|
||||||
|
s.existingChunks[hash] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// compressData compresses data using gzip
|
||||||
|
func (s *ChunkStore) compressData(data []byte) ([]byte, error) {
|
||||||
|
var buf []byte
|
||||||
|
w, err := gzip.NewWriterLevel((*bytesBuffer)(&buf), gzip.BestCompression)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err := w.Write(data); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// bytesBuffer is a simple io.Writer that appends to a byte slice
|
||||||
|
type bytesBuffer []byte
|
||||||
|
|
||||||
|
func (b *bytesBuffer) Write(p []byte) (int, error) {
|
||||||
|
*b = append(*b, p...)
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decompressData decompresses gzip data
|
||||||
|
func (s *ChunkStore) decompressData(data []byte) ([]byte, error) {
|
||||||
|
r, err := gzip.NewReader(&bytesReader{data: data})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
return io.ReadAll(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// bytesReader is a simple io.Reader from a byte slice
|
||||||
|
type bytesReader struct {
|
||||||
|
data []byte
|
||||||
|
pos int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *bytesReader) Read(p []byte) (int, error) {
|
||||||
|
if r.pos >= len(r.data) {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
n := copy(p, r.data[r.pos:])
|
||||||
|
r.pos += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encryptData encrypts data using AES-256-GCM
|
||||||
|
func (s *ChunkStore) encryptData(plaintext []byte) ([]byte, error) {
|
||||||
|
block, err := aes.NewCipher(s.encryptionKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
gcm, err := cipher.NewGCM(block)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nonce := make([]byte, gcm.NonceSize())
|
||||||
|
if _, err := rand.Read(nonce); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepend nonce to ciphertext
|
||||||
|
return gcm.Seal(nonce, nonce, plaintext, nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decryptData decrypts AES-256-GCM encrypted data
|
||||||
|
func (s *ChunkStore) decryptData(ciphertext []byte) ([]byte, error) {
|
||||||
|
block, err := aes.NewCipher(s.encryptionKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
gcm, err := cipher.NewGCM(block)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ciphertext) < gcm.NonceSize() {
|
||||||
|
return nil, fmt.Errorf("ciphertext too short")
|
||||||
|
}
|
||||||
|
|
||||||
|
nonce := ciphertext[:gcm.NonceSize()]
|
||||||
|
ciphertext = ciphertext[gcm.NonceSize():]
|
||||||
|
|
||||||
|
return gcm.Open(nil, nonce, ciphertext, nil)
|
||||||
|
}
|
||||||
@@ -223,11 +223,11 @@ func (r *DrillResult) IsSuccess() bool {
|
|||||||
|
|
||||||
// Summary returns a human-readable summary of the drill
|
// Summary returns a human-readable summary of the drill
|
||||||
func (r *DrillResult) Summary() string {
|
func (r *DrillResult) Summary() string {
|
||||||
status := "✅ PASSED"
|
status := "[OK] PASSED"
|
||||||
if !r.Success {
|
if !r.Success {
|
||||||
status = "❌ FAILED"
|
status = "[FAIL] FAILED"
|
||||||
} else if r.Status == StatusPartial {
|
} else if r.Status == StatusPartial {
|
||||||
status = "⚠️ PARTIAL"
|
status = "[WARN] PARTIAL"
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("%s - %s (%.2fs) - %d tables, %d rows",
|
return fmt.Sprintf("%s - %s (%.2fs) - %d tables, %d rows",
|
||||||
|
|||||||
@@ -41,20 +41,20 @@ func (e *Engine) Run(ctx context.Context, config *DrillConfig) (*DrillResult, er
|
|||||||
TargetRTO: float64(config.MaxRestoreSeconds),
|
TargetRTO: float64(config.MaxRestoreSeconds),
|
||||||
}
|
}
|
||||||
|
|
||||||
e.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
e.log.Info("=====================================================")
|
||||||
e.log.Info(" 🧪 DR Drill: " + result.DrillID)
|
e.log.Info(" [TEST] DR Drill: " + result.DrillID)
|
||||||
e.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
e.log.Info("=====================================================")
|
||||||
e.log.Info("")
|
e.log.Info("")
|
||||||
|
|
||||||
// Cleanup function for error cases
|
// Cleanup function for error cases
|
||||||
var containerID string
|
var containerID string
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
if containerID != "" && config.CleanupOnExit && (result.Success || !config.KeepOnFailure) {
|
if containerID != "" && config.CleanupOnExit && (result.Success || !config.KeepOnFailure) {
|
||||||
e.log.Info("🗑️ Cleaning up container...")
|
e.log.Info("[DEL] Cleaning up container...")
|
||||||
e.docker.RemoveContainer(context.Background(), containerID)
|
e.docker.RemoveContainer(context.Background(), containerID)
|
||||||
} else if containerID != "" {
|
} else if containerID != "" {
|
||||||
result.ContainerKept = true
|
result.ContainerKept = true
|
||||||
e.log.Info("📦 Container kept for debugging: " + containerID)
|
e.log.Info("[PKG] Container kept for debugging: " + containerID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
@@ -88,7 +88,7 @@ func (e *Engine) Run(ctx context.Context, config *DrillConfig) (*DrillResult, er
|
|||||||
}
|
}
|
||||||
containerID = container.ID
|
containerID = container.ID
|
||||||
result.ContainerID = containerID
|
result.ContainerID = containerID
|
||||||
e.log.Info("📦 Container started: " + containerID[:12])
|
e.log.Info("[PKG] Container started: " + containerID[:12])
|
||||||
|
|
||||||
// Wait for container to be healthy
|
// Wait for container to be healthy
|
||||||
if err := e.docker.WaitForHealth(ctx, containerID, config.DatabaseType, config.ContainerTimeout); err != nil {
|
if err := e.docker.WaitForHealth(ctx, containerID, config.DatabaseType, config.ContainerTimeout); err != nil {
|
||||||
@@ -118,7 +118,7 @@ func (e *Engine) Run(ctx context.Context, config *DrillConfig) (*DrillResult, er
|
|||||||
result.RestoreTime = time.Since(restoreStart).Seconds()
|
result.RestoreTime = time.Since(restoreStart).Seconds()
|
||||||
e.completePhase(&phase, fmt.Sprintf("Restored in %.2fs", result.RestoreTime))
|
e.completePhase(&phase, fmt.Sprintf("Restored in %.2fs", result.RestoreTime))
|
||||||
result.Phases = append(result.Phases, phase)
|
result.Phases = append(result.Phases, phase)
|
||||||
e.log.Info(fmt.Sprintf("✅ Backup restored in %.2fs", result.RestoreTime))
|
e.log.Info(fmt.Sprintf("[OK] Backup restored in %.2fs", result.RestoreTime))
|
||||||
|
|
||||||
// Phase 4: Validate
|
// Phase 4: Validate
|
||||||
phase = e.startPhase("Validate Database")
|
phase = e.startPhase("Validate Database")
|
||||||
@@ -182,24 +182,24 @@ func (e *Engine) preflightChecks(ctx context.Context, config *DrillConfig) error
|
|||||||
if err := e.docker.CheckDockerAvailable(ctx); err != nil {
|
if err := e.docker.CheckDockerAvailable(ctx); err != nil {
|
||||||
return fmt.Errorf("docker not available: %w", err)
|
return fmt.Errorf("docker not available: %w", err)
|
||||||
}
|
}
|
||||||
e.log.Info("✓ Docker is available")
|
e.log.Info("[OK] Docker is available")
|
||||||
|
|
||||||
// Check backup file exists
|
// Check backup file exists
|
||||||
if _, err := os.Stat(config.BackupPath); err != nil {
|
if _, err := os.Stat(config.BackupPath); err != nil {
|
||||||
return fmt.Errorf("backup file not found: %s", config.BackupPath)
|
return fmt.Errorf("backup file not found: %s", config.BackupPath)
|
||||||
}
|
}
|
||||||
e.log.Info("✓ Backup file exists: " + filepath.Base(config.BackupPath))
|
e.log.Info("[OK] Backup file exists: " + filepath.Base(config.BackupPath))
|
||||||
|
|
||||||
// Pull Docker image
|
// Pull Docker image
|
||||||
image := config.ContainerImage
|
image := config.ContainerImage
|
||||||
if image == "" {
|
if image == "" {
|
||||||
image = GetDefaultImage(config.DatabaseType, "")
|
image = GetDefaultImage(config.DatabaseType, "")
|
||||||
}
|
}
|
||||||
e.log.Info("⬇️ Pulling image: " + image)
|
e.log.Info("[DOWN] Pulling image: " + image)
|
||||||
if err := e.docker.PullImage(ctx, image); err != nil {
|
if err := e.docker.PullImage(ctx, image); err != nil {
|
||||||
return fmt.Errorf("failed to pull image: %w", err)
|
return fmt.Errorf("failed to pull image: %w", err)
|
||||||
}
|
}
|
||||||
e.log.Info("✓ Image ready: " + image)
|
e.log.Info("[OK] Image ready: " + image)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -243,7 +243,7 @@ func (e *Engine) restoreBackup(ctx context.Context, config *DrillConfig, contain
|
|||||||
backupName := filepath.Base(config.BackupPath)
|
backupName := filepath.Base(config.BackupPath)
|
||||||
containerBackupPath := "/tmp/" + backupName
|
containerBackupPath := "/tmp/" + backupName
|
||||||
|
|
||||||
e.log.Info("📁 Copying backup to container...")
|
e.log.Info("[DIR] Copying backup to container...")
|
||||||
if err := e.docker.CopyToContainer(ctx, containerID, config.BackupPath, containerBackupPath); err != nil {
|
if err := e.docker.CopyToContainer(ctx, containerID, config.BackupPath, containerBackupPath); err != nil {
|
||||||
return fmt.Errorf("failed to copy backup: %w", err)
|
return fmt.Errorf("failed to copy backup: %w", err)
|
||||||
}
|
}
|
||||||
@@ -256,7 +256,7 @@ func (e *Engine) restoreBackup(ctx context.Context, config *DrillConfig, contain
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Restore based on database type and format
|
// Restore based on database type and format
|
||||||
e.log.Info("🔄 Restoring backup...")
|
e.log.Info("[EXEC] Restoring backup...")
|
||||||
return e.executeRestore(ctx, config, containerID, containerBackupPath, containerConfig)
|
return e.executeRestore(ctx, config, containerID, containerBackupPath, containerConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -366,13 +366,13 @@ func (e *Engine) validateDatabase(ctx context.Context, config *DrillConfig, resu
|
|||||||
tables, err := validator.GetTableList(ctx)
|
tables, err := validator.GetTableList(ctx)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
result.TableCount = len(tables)
|
result.TableCount = len(tables)
|
||||||
e.log.Info(fmt.Sprintf("📊 Tables found: %d", result.TableCount))
|
e.log.Info(fmt.Sprintf("[STATS] Tables found: %d", result.TableCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
totalRows, err := validator.GetTotalRowCount(ctx)
|
totalRows, err := validator.GetTotalRowCount(ctx)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
result.TotalRows = totalRows
|
result.TotalRows = totalRows
|
||||||
e.log.Info(fmt.Sprintf("📊 Total rows: %d", result.TotalRows))
|
e.log.Info(fmt.Sprintf("[STATS] Total rows: %d", result.TotalRows))
|
||||||
}
|
}
|
||||||
|
|
||||||
dbSize, err := validator.GetDatabaseSize(ctx, config.DatabaseName)
|
dbSize, err := validator.GetDatabaseSize(ctx, config.DatabaseName)
|
||||||
@@ -387,9 +387,9 @@ func (e *Engine) validateDatabase(ctx context.Context, config *DrillConfig, resu
|
|||||||
result.CheckResults = append(result.CheckResults, tr)
|
result.CheckResults = append(result.CheckResults, tr)
|
||||||
if !tr.Success {
|
if !tr.Success {
|
||||||
errorCount++
|
errorCount++
|
||||||
e.log.Warn("❌ " + tr.Message)
|
e.log.Warn("[FAIL] " + tr.Message)
|
||||||
} else {
|
} else {
|
||||||
e.log.Info("✓ " + tr.Message)
|
e.log.Info("[OK] " + tr.Message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -404,9 +404,9 @@ func (e *Engine) validateDatabase(ctx context.Context, config *DrillConfig, resu
|
|||||||
totalQueryTime += qr.Duration
|
totalQueryTime += qr.Duration
|
||||||
if !qr.Success {
|
if !qr.Success {
|
||||||
errorCount++
|
errorCount++
|
||||||
e.log.Warn(fmt.Sprintf("❌ %s: %s", qr.Name, qr.Error))
|
e.log.Warn(fmt.Sprintf("[FAIL] %s: %s", qr.Name, qr.Error))
|
||||||
} else {
|
} else {
|
||||||
e.log.Info(fmt.Sprintf("✓ %s: %s (%.0fms)", qr.Name, qr.Result, qr.Duration))
|
e.log.Info(fmt.Sprintf("[OK] %s: %s (%.0fms)", qr.Name, qr.Result, qr.Duration))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(queryResults) > 0 {
|
if len(queryResults) > 0 {
|
||||||
@@ -421,9 +421,9 @@ func (e *Engine) validateDatabase(ctx context.Context, config *DrillConfig, resu
|
|||||||
result.CheckResults = append(result.CheckResults, cr)
|
result.CheckResults = append(result.CheckResults, cr)
|
||||||
if !cr.Success {
|
if !cr.Success {
|
||||||
errorCount++
|
errorCount++
|
||||||
e.log.Warn("❌ " + cr.Message)
|
e.log.Warn("[FAIL] " + cr.Message)
|
||||||
} else {
|
} else {
|
||||||
e.log.Info("✓ " + cr.Message)
|
e.log.Info("[OK] " + cr.Message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -433,7 +433,7 @@ func (e *Engine) validateDatabase(ctx context.Context, config *DrillConfig, resu
|
|||||||
errorCount++
|
errorCount++
|
||||||
msg := fmt.Sprintf("Total rows (%d) below minimum (%d)", result.TotalRows, config.MinRowCount)
|
msg := fmt.Sprintf("Total rows (%d) below minimum (%d)", result.TotalRows, config.MinRowCount)
|
||||||
result.Warnings = append(result.Warnings, msg)
|
result.Warnings = append(result.Warnings, msg)
|
||||||
e.log.Warn("⚠️ " + msg)
|
e.log.Warn("[WARN] " + msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return errorCount
|
return errorCount
|
||||||
@@ -441,7 +441,7 @@ func (e *Engine) validateDatabase(ctx context.Context, config *DrillConfig, resu
|
|||||||
|
|
||||||
// startPhase starts a new drill phase
|
// startPhase starts a new drill phase
|
||||||
func (e *Engine) startPhase(name string) DrillPhase {
|
func (e *Engine) startPhase(name string) DrillPhase {
|
||||||
e.log.Info("▶️ " + name)
|
e.log.Info("[RUN] " + name)
|
||||||
return DrillPhase{
|
return DrillPhase{
|
||||||
Name: name,
|
Name: name,
|
||||||
Status: "running",
|
Status: "running",
|
||||||
@@ -463,7 +463,7 @@ func (e *Engine) failPhase(phase *DrillPhase, message string) {
|
|||||||
phase.Duration = phase.EndTime.Sub(phase.StartTime).Seconds()
|
phase.Duration = phase.EndTime.Sub(phase.StartTime).Seconds()
|
||||||
phase.Status = "failed"
|
phase.Status = "failed"
|
||||||
phase.Message = message
|
phase.Message = message
|
||||||
e.log.Error("❌ Phase failed: " + message)
|
e.log.Error("[FAIL] Phase failed: " + message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// finalize completes the drill result
|
// finalize completes the drill result
|
||||||
@@ -472,9 +472,9 @@ func (e *Engine) finalize(result *DrillResult) {
|
|||||||
result.Duration = result.EndTime.Sub(result.StartTime).Seconds()
|
result.Duration = result.EndTime.Sub(result.StartTime).Seconds()
|
||||||
|
|
||||||
e.log.Info("")
|
e.log.Info("")
|
||||||
e.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
e.log.Info("=====================================================")
|
||||||
e.log.Info(" " + result.Summary())
|
e.log.Info(" " + result.Summary())
|
||||||
e.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
e.log.Info("=====================================================")
|
||||||
|
|
||||||
if result.Success {
|
if result.Success {
|
||||||
e.log.Info(fmt.Sprintf(" RTO: %.2fs (target: %.0fs) %s",
|
e.log.Info(fmt.Sprintf(" RTO: %.2fs (target: %.0fs) %s",
|
||||||
@@ -484,9 +484,9 @@ func (e *Engine) finalize(result *DrillResult) {
|
|||||||
|
|
||||||
func boolIcon(b bool) string {
|
func boolIcon(b bool) string {
|
||||||
if b {
|
if b {
|
||||||
return "✅"
|
return "[OK]"
|
||||||
}
|
}
|
||||||
return "❌"
|
return "[FAIL]"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup removes drill resources
|
// Cleanup removes drill resources
|
||||||
@@ -498,7 +498,7 @@ func (e *Engine) Cleanup(ctx context.Context, drillID string) error {
|
|||||||
|
|
||||||
for _, c := range containers {
|
for _, c := range containers {
|
||||||
if strings.Contains(c.Name, drillID) || (drillID == "" && strings.HasPrefix(c.Name, "drill_")) {
|
if strings.Contains(c.Name, drillID) || (drillID == "" && strings.HasPrefix(c.Name, "drill_")) {
|
||||||
e.log.Info("🗑️ Removing container: " + c.Name)
|
e.log.Info("[DEL] Removing container: " + c.Name)
|
||||||
if err := e.docker.RemoveContainer(ctx, c.ID); err != nil {
|
if err := e.docker.RemoveContainer(ctx, c.ID); err != nil {
|
||||||
e.log.Warn("Failed to remove container", "id", c.ID, "error", err)
|
e.log.Warn("Failed to remove container", "id", c.ID, "error", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
|
|
||||||
func TestEncryptDecrypt(t *testing.T) {
|
func TestEncryptDecrypt(t *testing.T) {
|
||||||
// Test data
|
// Test data
|
||||||
original := []byte("This is a secret database backup that needs encryption! 🔒")
|
original := []byte("This is a secret database backup that needs encryption! [LOCK]")
|
||||||
|
|
||||||
// Test with passphrase
|
// Test with passphrase
|
||||||
t.Run("Passphrase", func(t *testing.T) {
|
t.Run("Passphrase", func(t *testing.T) {
|
||||||
@@ -57,7 +57,7 @@ func TestEncryptDecrypt(t *testing.T) {
|
|||||||
string(original), string(decrypted))
|
string(original), string(decrypted))
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Log("✅ Encryption/decryption successful")
|
t.Log("[OK] Encryption/decryption successful")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Test with direct key
|
// Test with direct key
|
||||||
@@ -102,7 +102,7 @@ func TestEncryptDecrypt(t *testing.T) {
|
|||||||
t.Errorf("Decrypted data doesn't match original")
|
t.Errorf("Decrypted data doesn't match original")
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Log("✅ Direct key encryption/decryption successful")
|
t.Log("[OK] Direct key encryption/decryption successful")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Test wrong password
|
// Test wrong password
|
||||||
@@ -133,7 +133,7 @@ func TestEncryptDecrypt(t *testing.T) {
|
|||||||
t.Error("Expected decryption to fail with wrong password, but it succeeded")
|
t.Error("Expected decryption to fail with wrong password, but it succeeded")
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Logf("✅ Wrong password correctly rejected: %v", err)
|
t.Logf("[OK] Wrong password correctly rejected: %v", err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -183,7 +183,7 @@ func TestLargeData(t *testing.T) {
|
|||||||
t.Errorf("Large data decryption failed")
|
t.Errorf("Large data decryption failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Log("✅ Large data encryption/decryption successful")
|
t.Log("[OK] Large data encryption/decryption successful")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyGeneration(t *testing.T) {
|
func TestKeyGeneration(t *testing.T) {
|
||||||
@@ -207,7 +207,7 @@ func TestKeyGeneration(t *testing.T) {
|
|||||||
t.Error("Generated keys are identical - randomness broken!")
|
t.Error("Generated keys are identical - randomness broken!")
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Log("✅ Key generation successful")
|
t.Log("[OK] Key generation successful")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyDerivation(t *testing.T) {
|
func TestKeyDerivation(t *testing.T) {
|
||||||
@@ -230,5 +230,5 @@ func TestKeyDerivation(t *testing.T) {
|
|||||||
t.Error("Different salts produced same key")
|
t.Error("Different salts produced same key")
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Log("✅ Key derivation successful")
|
t.Log("[OK] Key derivation successful")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -339,7 +339,7 @@ func (e *CloneEngine) Backup(ctx context.Context, opts *BackupOptions) (*BackupR
|
|||||||
|
|
||||||
// Save metadata
|
// Save metadata
|
||||||
meta := &metadata.BackupMetadata{
|
meta := &metadata.BackupMetadata{
|
||||||
Version: "3.40.0",
|
Version: "3.42.1",
|
||||||
Timestamp: startTime,
|
Timestamp: startTime,
|
||||||
Database: opts.Database,
|
Database: opts.Database,
|
||||||
DatabaseType: "mysql",
|
DatabaseType: "mysql",
|
||||||
|
|||||||
@@ -234,10 +234,26 @@ func (e *MySQLDumpEngine) Backup(ctx context.Context, opts *BackupOptions) (*Bac
|
|||||||
gzWriter.Close()
|
gzWriter.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for command
|
// Wait for command with proper context handling
|
||||||
if err := cmd.Wait(); err != nil {
|
cmdDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.log.Warn("MySQL backup cancelled - killing process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone
|
||||||
|
cmdErr = ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if cmdErr != nil {
|
||||||
stderr := stderrBuf.String()
|
stderr := stderrBuf.String()
|
||||||
return nil, fmt.Errorf("mysqldump failed: %w\n%s", err, stderr)
|
return nil, fmt.Errorf("mysqldump failed: %w\n%s", cmdErr, stderr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get file info
|
// Get file info
|
||||||
@@ -254,7 +270,7 @@ func (e *MySQLDumpEngine) Backup(ctx context.Context, opts *BackupOptions) (*Bac
|
|||||||
|
|
||||||
// Save metadata
|
// Save metadata
|
||||||
meta := &metadata.BackupMetadata{
|
meta := &metadata.BackupMetadata{
|
||||||
Version: "3.40.0",
|
Version: "3.42.1",
|
||||||
Timestamp: startTime,
|
Timestamp: startTime,
|
||||||
Database: opts.Database,
|
Database: opts.Database,
|
||||||
DatabaseType: "mysql",
|
DatabaseType: "mysql",
|
||||||
@@ -442,8 +458,25 @@ func (e *MySQLDumpEngine) BackupToWriter(ctx context.Context, w io.Writer, opts
|
|||||||
gzWriter.Close()
|
gzWriter.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cmd.Wait(); err != nil {
|
// Wait for command with proper context handling
|
||||||
return nil, fmt.Errorf("mysqldump failed: %w\n%s", err, stderrBuf.String())
|
cmdDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.log.Warn("MySQL streaming backup cancelled - killing process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone
|
||||||
|
cmdErr = ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if cmdErr != nil {
|
||||||
|
return nil, fmt.Errorf("mysqldump failed: %w\n%s", cmdErr, stderrBuf.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
return &BackupResult{
|
return &BackupResult{
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ func (b *BtrfsBackend) Detect(dataDir string) (bool, error) {
|
|||||||
// CreateSnapshot creates a Btrfs snapshot
|
// CreateSnapshot creates a Btrfs snapshot
|
||||||
func (b *BtrfsBackend) CreateSnapshot(ctx context.Context, opts SnapshotOptions) (*Snapshot, error) {
|
func (b *BtrfsBackend) CreateSnapshot(ctx context.Context, opts SnapshotOptions) (*Snapshot, error) {
|
||||||
if b.config == nil || b.config.Subvolume == "" {
|
if b.config == nil || b.config.Subvolume == "" {
|
||||||
return nil, fmt.Errorf("Btrfs subvolume not configured")
|
return nil, fmt.Errorf("btrfs subvolume not configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate snapshot name
|
// Generate snapshot name
|
||||||
|
|||||||
@@ -188,6 +188,8 @@ func (e *SnapshotEngine) Backup(ctx context.Context, opts *BackupOptions) (*Back
|
|||||||
// Step 4: Mount snapshot
|
// Step 4: Mount snapshot
|
||||||
mountPoint := e.config.MountPoint
|
mountPoint := e.config.MountPoint
|
||||||
if mountPoint == "" {
|
if mountPoint == "" {
|
||||||
|
// Note: snapshot engine uses snapshot.Config which doesnt have GetEffectiveWorkDir()
|
||||||
|
// TODO: Refactor to use main config.Config for WorkDir support
|
||||||
mountPoint = filepath.Join(os.TempDir(), fmt.Sprintf("dbbackup_snap_%s", timestamp))
|
mountPoint = filepath.Join(os.TempDir(), fmt.Sprintf("dbbackup_snap_%s", timestamp))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -223,7 +225,7 @@ func (e *SnapshotEngine) Backup(ctx context.Context, opts *BackupOptions) (*Back
|
|||||||
|
|
||||||
// Save metadata
|
// Save metadata
|
||||||
meta := &metadata.BackupMetadata{
|
meta := &metadata.BackupMetadata{
|
||||||
Version: "3.40.0",
|
Version: "3.42.1",
|
||||||
Timestamp: startTime,
|
Timestamp: startTime,
|
||||||
Database: opts.Database,
|
Database: opts.Database,
|
||||||
DatabaseType: "mysql",
|
DatabaseType: "mysql",
|
||||||
|
|||||||
223
internal/fs/fs.go
Normal file
223
internal/fs/fs.go
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
// Package fs provides filesystem abstraction using spf13/afero for testability.
|
||||||
|
// It allows swapping the real filesystem with an in-memory mock for unit tests.
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/spf13/afero"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FS is the global filesystem interface used throughout the application.
|
||||||
|
// By default, it uses the real OS filesystem.
|
||||||
|
// For testing, use SetFS(afero.NewMemMapFs()) to use an in-memory filesystem.
|
||||||
|
var FS afero.Fs = afero.NewOsFs()
|
||||||
|
|
||||||
|
// SetFS sets the global filesystem (useful for testing)
|
||||||
|
func SetFS(fs afero.Fs) {
|
||||||
|
FS = fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResetFS resets to the real OS filesystem
|
||||||
|
func ResetFS() {
|
||||||
|
FS = afero.NewOsFs()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMemMapFs creates a new in-memory filesystem for testing
|
||||||
|
func NewMemMapFs() afero.Fs {
|
||||||
|
return afero.NewMemMapFs()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReadOnlyFs wraps a filesystem to make it read-only
|
||||||
|
func NewReadOnlyFs(base afero.Fs) afero.Fs {
|
||||||
|
return afero.NewReadOnlyFs(base)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBasePathFs creates a filesystem rooted at a specific path
|
||||||
|
func NewBasePathFs(base afero.Fs, path string) afero.Fs {
|
||||||
|
return afero.NewBasePathFs(base, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- File Operations (use global FS) ---
|
||||||
|
|
||||||
|
// Create creates a file
|
||||||
|
func Create(name string) (afero.File, error) {
|
||||||
|
return FS.Create(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens a file for reading
|
||||||
|
func Open(name string) (afero.File, error) {
|
||||||
|
return FS.Open(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenFile opens a file with specified flags and permissions
|
||||||
|
func OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) {
|
||||||
|
return FS.OpenFile(name, flag, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes a file or empty directory
|
||||||
|
func Remove(name string) error {
|
||||||
|
return FS.Remove(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAll removes a path and any children it contains
|
||||||
|
func RemoveAll(path string) error {
|
||||||
|
return FS.RemoveAll(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rename renames (moves) a file
|
||||||
|
func Rename(oldname, newname string) error {
|
||||||
|
return FS.Rename(oldname, newname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns file info
|
||||||
|
func Stat(name string) (os.FileInfo, error) {
|
||||||
|
return FS.Stat(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chmod changes file mode
|
||||||
|
func Chmod(name string, mode os.FileMode) error {
|
||||||
|
return FS.Chmod(name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chown changes file ownership (may not work on all filesystems)
|
||||||
|
func Chown(name string, uid, gid int) error {
|
||||||
|
return FS.Chown(name, uid, gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chtimes changes file access and modification times
|
||||||
|
func Chtimes(name string, atime, mtime time.Time) error {
|
||||||
|
return FS.Chtimes(name, atime, mtime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Directory Operations ---
|
||||||
|
|
||||||
|
// Mkdir creates a directory
|
||||||
|
func Mkdir(name string, perm os.FileMode) error {
|
||||||
|
return FS.Mkdir(name, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAll creates a directory and all parents
|
||||||
|
func MkdirAll(path string, perm os.FileMode) error {
|
||||||
|
return FS.MkdirAll(path, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDir reads a directory
|
||||||
|
func ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||||
|
return afero.ReadDir(FS, dirname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- File Content Operations ---
|
||||||
|
|
||||||
|
// ReadFile reads an entire file
|
||||||
|
func ReadFile(filename string) ([]byte, error) {
|
||||||
|
return afero.ReadFile(FS, filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteFile writes data to a file
|
||||||
|
func WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||||
|
return afero.WriteFile(FS, filename, data, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Existence Checks ---
|
||||||
|
|
||||||
|
// Exists checks if a file or directory exists
|
||||||
|
func Exists(path string) (bool, error) {
|
||||||
|
return afero.Exists(FS, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirExists checks if a directory exists
|
||||||
|
func DirExists(path string) (bool, error) {
|
||||||
|
return afero.DirExists(FS, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDir checks if path is a directory
|
||||||
|
func IsDir(path string) (bool, error) {
|
||||||
|
return afero.IsDir(FS, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEmpty checks if a directory is empty
|
||||||
|
func IsEmpty(path string) (bool, error) {
|
||||||
|
return afero.IsEmpty(FS, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Utility Functions ---
|
||||||
|
|
||||||
|
// Walk walks a directory tree
|
||||||
|
func Walk(root string, walkFn filepath.WalkFunc) error {
|
||||||
|
return afero.Walk(FS, root, walkFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Glob returns the names of all files matching pattern
|
||||||
|
func Glob(pattern string) ([]string, error) {
|
||||||
|
return afero.Glob(FS, pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempDir creates a temporary directory
|
||||||
|
func TempDir(dir, prefix string) (string, error) {
|
||||||
|
return afero.TempDir(FS, dir, prefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempFile creates a temporary file
|
||||||
|
func TempFile(dir, pattern string) (afero.File, error) {
|
||||||
|
return afero.TempFile(FS, dir, pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyFile copies a file from src to dst
|
||||||
|
func CopyFile(src, dst string) error {
|
||||||
|
srcFile, err := FS.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer srcFile.Close()
|
||||||
|
|
||||||
|
srcInfo, err := srcFile.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dstFile, err := FS.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, srcInfo.Mode())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer dstFile.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(dstFile, srcFile)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSize returns the size of a file
|
||||||
|
func FileSize(path string) (int64, error) {
|
||||||
|
info, err := FS.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return info.Size(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Testing Helpers ---
|
||||||
|
|
||||||
|
// WithMemFs executes a function with an in-memory filesystem, then restores the original
|
||||||
|
func WithMemFs(fn func(fs afero.Fs)) {
|
||||||
|
original := FS
|
||||||
|
memFs := afero.NewMemMapFs()
|
||||||
|
FS = memFs
|
||||||
|
defer func() { FS = original }()
|
||||||
|
fn(memFs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetupTestDir creates a test directory structure in-memory
|
||||||
|
func SetupTestDir(files map[string]string) afero.Fs {
|
||||||
|
memFs := afero.NewMemMapFs()
|
||||||
|
for path, content := range files {
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if dir != "." && dir != "/" {
|
||||||
|
_ = memFs.MkdirAll(dir, 0755)
|
||||||
|
}
|
||||||
|
_ = afero.WriteFile(memFs, path, []byte(content), 0644)
|
||||||
|
}
|
||||||
|
return memFs
|
||||||
|
}
|
||||||
191
internal/fs/fs_test.go
Normal file
191
internal/fs/fs_test.go
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/spf13/afero"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMemMapFs(t *testing.T) {
|
||||||
|
// Use in-memory filesystem for testing
|
||||||
|
WithMemFs(func(memFs afero.Fs) {
|
||||||
|
// Create a file
|
||||||
|
err := WriteFile("/test/file.txt", []byte("hello world"), 0644)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("WriteFile failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read it back
|
||||||
|
content, err := ReadFile("/test/file.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadFile failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(content) != "hello world" {
|
||||||
|
t.Errorf("expected 'hello world', got '%s'", string(content))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check existence
|
||||||
|
exists, err := Exists("/test/file.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Exists failed: %v", err)
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
t.Error("file should exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check non-existent file
|
||||||
|
exists, err = Exists("/nonexistent.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Exists failed: %v", err)
|
||||||
|
}
|
||||||
|
if exists {
|
||||||
|
t.Error("file should not exist")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetupTestDir(t *testing.T) {
|
||||||
|
// Create test directory structure
|
||||||
|
testFs := SetupTestDir(map[string]string{
|
||||||
|
"/backups/db1.dump": "database 1 content",
|
||||||
|
"/backups/db2.dump": "database 2 content",
|
||||||
|
"/config/settings.json": `{"key": "value"}`,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Verify files exist
|
||||||
|
content, err := afero.ReadFile(testFs, "/backups/db1.dump")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadFile failed: %v", err)
|
||||||
|
}
|
||||||
|
if string(content) != "database 1 content" {
|
||||||
|
t.Errorf("unexpected content: %s", string(content))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify directory structure
|
||||||
|
files, err := afero.ReadDir(testFs, "/backups")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadDir failed: %v", err)
|
||||||
|
}
|
||||||
|
if len(files) != 2 {
|
||||||
|
t.Errorf("expected 2 files, got %d", len(files))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCopyFile(t *testing.T) {
|
||||||
|
WithMemFs(func(memFs afero.Fs) {
|
||||||
|
// Create source file
|
||||||
|
err := WriteFile("/source.txt", []byte("copy me"), 0644)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("WriteFile failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy file
|
||||||
|
err = CopyFile("/source.txt", "/dest.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("CopyFile failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify copy
|
||||||
|
content, err := ReadFile("/dest.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadFile failed: %v", err)
|
||||||
|
}
|
||||||
|
if string(content) != "copy me" {
|
||||||
|
t.Errorf("unexpected content: %s", string(content))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFileSize(t *testing.T) {
|
||||||
|
WithMemFs(func(memFs afero.Fs) {
|
||||||
|
data := []byte("12345678901234567890") // 20 bytes
|
||||||
|
err := WriteFile("/sized.txt", data, 0644)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("WriteFile failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
size, err := FileSize("/sized.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("FileSize failed: %v", err)
|
||||||
|
}
|
||||||
|
if size != 20 {
|
||||||
|
t.Errorf("expected size 20, got %d", size)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTempDir(t *testing.T) {
|
||||||
|
WithMemFs(func(memFs afero.Fs) {
|
||||||
|
// Create temp dir
|
||||||
|
dir, err := TempDir("", "test-")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TempDir failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify it exists
|
||||||
|
isDir, err := IsDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("IsDir failed: %v", err)
|
||||||
|
}
|
||||||
|
if !isDir {
|
||||||
|
t.Error("temp dir should be a directory")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify it's empty
|
||||||
|
isEmpty, err := IsEmpty(dir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("IsEmpty failed: %v", err)
|
||||||
|
}
|
||||||
|
if !isEmpty {
|
||||||
|
t.Error("temp dir should be empty")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWalk(t *testing.T) {
|
||||||
|
WithMemFs(func(memFs afero.Fs) {
|
||||||
|
// Create directory structure
|
||||||
|
_ = MkdirAll("/root/a/b", 0755)
|
||||||
|
_ = WriteFile("/root/file1.txt", []byte("1"), 0644)
|
||||||
|
_ = WriteFile("/root/a/file2.txt", []byte("2"), 0644)
|
||||||
|
_ = WriteFile("/root/a/b/file3.txt", []byte("3"), 0644)
|
||||||
|
|
||||||
|
var files []string
|
||||||
|
err := Walk("/root", func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !info.IsDir() {
|
||||||
|
files = append(files, path)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Walk failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(files) != 3 {
|
||||||
|
t.Errorf("expected 3 files, got %d: %v", len(files), files)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGlob(t *testing.T) {
|
||||||
|
WithMemFs(func(memFs afero.Fs) {
|
||||||
|
_ = WriteFile("/data/backup1.dump", []byte("1"), 0644)
|
||||||
|
_ = WriteFile("/data/backup2.dump", []byte("2"), 0644)
|
||||||
|
_ = WriteFile("/data/config.json", []byte("{}"), 0644)
|
||||||
|
|
||||||
|
matches, err := Glob("/data/*.dump")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Glob failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(matches) != 2 {
|
||||||
|
t.Errorf("expected 2 matches, got %d: %v", len(matches), matches)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -53,16 +53,16 @@ type InstallOptions struct {
|
|||||||
|
|
||||||
// ServiceStatus contains information about installed services
|
// ServiceStatus contains information about installed services
|
||||||
type ServiceStatus struct {
|
type ServiceStatus struct {
|
||||||
Installed bool
|
Installed bool
|
||||||
Enabled bool
|
Enabled bool
|
||||||
Active bool
|
Active bool
|
||||||
TimerEnabled bool
|
TimerEnabled bool
|
||||||
TimerActive bool
|
TimerActive bool
|
||||||
LastRun string
|
LastRun string
|
||||||
NextRun string
|
NextRun string
|
||||||
ServicePath string
|
ServicePath string
|
||||||
TimerPath string
|
TimerPath string
|
||||||
ExporterPath string
|
ExporterPath string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInstaller creates a new Installer
|
// NewInstaller creates a new Installer
|
||||||
@@ -188,7 +188,7 @@ func (i *Installer) Uninstall(ctx context.Context, instance string, purge bool)
|
|||||||
if instance != "cluster" && instance != "" {
|
if instance != "cluster" && instance != "" {
|
||||||
templateService := filepath.Join(i.unitDir, "dbbackup@.service")
|
templateService := filepath.Join(i.unitDir, "dbbackup@.service")
|
||||||
templateTimer := filepath.Join(i.unitDir, "dbbackup@.timer")
|
templateTimer := filepath.Join(i.unitDir, "dbbackup@.timer")
|
||||||
|
|
||||||
// Only remove templates if no other instances are using them
|
// Only remove templates if no other instances are using them
|
||||||
if i.canRemoveTemplates() {
|
if i.canRemoveTemplates() {
|
||||||
if !i.dryRun {
|
if !i.dryRun {
|
||||||
@@ -644,11 +644,11 @@ func (i *Installer) canRemoveTemplates() bool {
|
|||||||
// Check if any dbbackup@*.service instances exist
|
// Check if any dbbackup@*.service instances exist
|
||||||
pattern := filepath.Join(i.unitDir, "dbbackup@*.service")
|
pattern := filepath.Join(i.unitDir, "dbbackup@*.service")
|
||||||
matches, _ := filepath.Glob(pattern)
|
matches, _ := filepath.Glob(pattern)
|
||||||
|
|
||||||
// Also check for running instances
|
// Also check for running instances
|
||||||
cmd := exec.Command("systemctl", "list-units", "--type=service", "--all", "dbbackup@*")
|
cmd := exec.Command("systemctl", "list-units", "--type=service", "--all", "dbbackup@*")
|
||||||
output, _ := cmd.Output()
|
output, _ := cmd.Output()
|
||||||
|
|
||||||
return len(matches) == 0 && !strings.Contains(string(output), "dbbackup@")
|
return len(matches) == 0 && !strings.Contains(string(output), "dbbackup@")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -658,9 +658,9 @@ func (i *Installer) printNextSteps(opts InstallOptions) {
|
|||||||
serviceName := strings.Replace(timerName, ".timer", ".service", 1)
|
serviceName := strings.Replace(timerName, ".timer", ".service", 1)
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("✅ Installation successful!")
|
fmt.Println("[OK] Installation successful!")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("📋 Next steps:")
|
fmt.Println("[NEXT] Next steps:")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf(" 1. Edit configuration: sudo nano %s\n", opts.ConfigPath)
|
fmt.Printf(" 1. Edit configuration: sudo nano %s\n", opts.ConfigPath)
|
||||||
fmt.Printf(" 2. Set credentials: sudo nano /etc/dbbackup/env.d/%s.conf\n", opts.Instance)
|
fmt.Printf(" 2. Set credentials: sudo nano /etc/dbbackup/env.d/%s.conf\n", opts.Instance)
|
||||||
@@ -668,12 +668,12 @@ func (i *Installer) printNextSteps(opts InstallOptions) {
|
|||||||
fmt.Printf(" 4. Verify timer status: sudo systemctl status %s\n", timerName)
|
fmt.Printf(" 4. Verify timer status: sudo systemctl status %s\n", timerName)
|
||||||
fmt.Printf(" 5. Run backup manually: sudo systemctl start %s\n", serviceName)
|
fmt.Printf(" 5. Run backup manually: sudo systemctl start %s\n", serviceName)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("📊 View backup logs:")
|
fmt.Println("[LOGS] View backup logs:")
|
||||||
fmt.Printf(" journalctl -u %s -f\n", serviceName)
|
fmt.Printf(" journalctl -u %s -f\n", serviceName)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
if opts.WithMetrics {
|
if opts.WithMetrics {
|
||||||
fmt.Println("📈 Prometheus metrics:")
|
fmt.Println("[METRICS] Prometheus metrics:")
|
||||||
fmt.Printf(" curl http://localhost:%d/metrics\n", opts.MetricsPort)
|
fmt.Printf(" curl http://localhost:%d/metrics\n", opts.MetricsPort)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,8 +33,11 @@ RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6
|
|||||||
# Environment
|
# Environment
|
||||||
EnvironmentFile=-/etc/dbbackup/env.d/cluster.conf
|
EnvironmentFile=-/etc/dbbackup/env.d/cluster.conf
|
||||||
|
|
||||||
|
# Working directory (config is loaded from .dbbackup.conf here)
|
||||||
|
WorkingDirectory=/var/lib/dbbackup
|
||||||
|
|
||||||
# Execution - cluster backup (all databases)
|
# Execution - cluster backup (all databases)
|
||||||
ExecStart={{.BinaryPath}} backup cluster --config {{.ConfigPath}}
|
ExecStart={{.BinaryPath}} backup cluster --backup-dir {{.BackupDir}}
|
||||||
TimeoutStartSec={{.TimeoutSeconds}}
|
TimeoutStartSec={{.TimeoutSeconds}}
|
||||||
|
|
||||||
# Post-backup metrics export
|
# Post-backup metrics export
|
||||||
|
|||||||
@@ -33,8 +33,11 @@ RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6
|
|||||||
# Environment
|
# Environment
|
||||||
EnvironmentFile=-/etc/dbbackup/env.d/%i.conf
|
EnvironmentFile=-/etc/dbbackup/env.d/%i.conf
|
||||||
|
|
||||||
|
# Working directory (config is loaded from .dbbackup.conf here)
|
||||||
|
WorkingDirectory=/var/lib/dbbackup
|
||||||
|
|
||||||
# Execution
|
# Execution
|
||||||
ExecStart={{.BinaryPath}} backup {{.BackupType}} %i --config {{.ConfigPath}}
|
ExecStart={{.BinaryPath}} backup {{.BackupType}} %i --backup-dir {{.BackupDir}}
|
||||||
TimeoutStartSec={{.TimeoutSeconds}}
|
TimeoutStartSec={{.TimeoutSeconds}}
|
||||||
|
|
||||||
# Post-backup metrics export
|
# Post-backup metrics export
|
||||||
|
|||||||
118
internal/logger/colors.go
Normal file
118
internal/logger/colors.go
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
package logger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/fatih/color"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CLI output helpers using fatih/color for cross-platform support
|
||||||
|
|
||||||
|
// Success prints a success message with green checkmark
|
||||||
|
func Success(format string, args ...interface{}) {
|
||||||
|
msg := fmt.Sprintf(format, args...)
|
||||||
|
SuccessColor.Fprint(os.Stdout, "✓ ")
|
||||||
|
fmt.Println(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error prints an error message with red X
|
||||||
|
func Error(format string, args ...interface{}) {
|
||||||
|
msg := fmt.Sprintf(format, args...)
|
||||||
|
ErrorColor.Fprint(os.Stderr, "✗ ")
|
||||||
|
fmt.Fprintln(os.Stderr, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warning prints a warning message with yellow exclamation
|
||||||
|
func Warning(format string, args ...interface{}) {
|
||||||
|
msg := fmt.Sprintf(format, args...)
|
||||||
|
WarnColor.Fprint(os.Stdout, "⚠ ")
|
||||||
|
fmt.Println(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info prints an info message with blue arrow
|
||||||
|
func Info(format string, args ...interface{}) {
|
||||||
|
msg := fmt.Sprintf(format, args...)
|
||||||
|
InfoColor.Fprint(os.Stdout, "→ ")
|
||||||
|
fmt.Println(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header prints a bold header
|
||||||
|
func Header(format string, args ...interface{}) {
|
||||||
|
msg := fmt.Sprintf(format, args...)
|
||||||
|
HighlightColor.Println(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dim prints dimmed/secondary text
|
||||||
|
func Dim(format string, args ...interface{}) {
|
||||||
|
msg := fmt.Sprintf(format, args...)
|
||||||
|
DimColor.Println(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bold returns bold text
|
||||||
|
func Bold(text string) string {
|
||||||
|
return color.New(color.Bold).Sprint(text)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Green returns green text
|
||||||
|
func Green(text string) string {
|
||||||
|
return SuccessColor.Sprint(text)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Red returns red text
|
||||||
|
func Red(text string) string {
|
||||||
|
return ErrorColor.Sprint(text)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Yellow returns yellow text
|
||||||
|
func Yellow(text string) string {
|
||||||
|
return WarnColor.Sprint(text)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cyan returns cyan text
|
||||||
|
func Cyan(text string) string {
|
||||||
|
return InfoColor.Sprint(text)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLine prints a key-value status line
|
||||||
|
func StatusLine(key, value string) {
|
||||||
|
DimColor.Printf(" %s: ", key)
|
||||||
|
fmt.Println(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProgressStatus prints operation status with timing
|
||||||
|
func ProgressStatus(operation string, status string, isSuccess bool) {
|
||||||
|
if isSuccess {
|
||||||
|
SuccessColor.Print("[OK] ")
|
||||||
|
} else {
|
||||||
|
ErrorColor.Print("[FAIL] ")
|
||||||
|
}
|
||||||
|
fmt.Printf("%s: %s\n", operation, status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Table prints a simple formatted table row
|
||||||
|
func TableRow(cols ...string) {
|
||||||
|
for i, col := range cols {
|
||||||
|
if i == 0 {
|
||||||
|
InfoColor.Printf("%-20s", col)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("%-15s", col)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableColors disables all color output (for non-TTY or --no-color flag)
|
||||||
|
func DisableColors() {
|
||||||
|
color.NoColor = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableColors enables color output
|
||||||
|
func EnableColors() {
|
||||||
|
color.NoColor = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsColorEnabled returns whether colors are enabled
|
||||||
|
func IsColorEnabled() bool {
|
||||||
|
return !color.NoColor
|
||||||
|
}
|
||||||
@@ -7,9 +7,29 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/fatih/color"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Color printers for consistent output across the application
|
||||||
|
var (
|
||||||
|
// Status colors
|
||||||
|
SuccessColor = color.New(color.FgGreen, color.Bold)
|
||||||
|
ErrorColor = color.New(color.FgRed, color.Bold)
|
||||||
|
WarnColor = color.New(color.FgYellow, color.Bold)
|
||||||
|
InfoColor = color.New(color.FgCyan)
|
||||||
|
DebugColor = color.New(color.FgWhite)
|
||||||
|
|
||||||
|
// Highlight colors
|
||||||
|
HighlightColor = color.New(color.FgMagenta, color.Bold)
|
||||||
|
DimColor = color.New(color.FgHiBlack)
|
||||||
|
|
||||||
|
// Data colors
|
||||||
|
NumberColor = color.New(color.FgYellow)
|
||||||
|
PathColor = color.New(color.FgBlue, color.Underline)
|
||||||
|
TimeColor = color.New(color.FgCyan)
|
||||||
|
)
|
||||||
|
|
||||||
// Logger defines the interface for logging
|
// Logger defines the interface for logging
|
||||||
type Logger interface {
|
type Logger interface {
|
||||||
Debug(msg string, args ...any)
|
Debug(msg string, args ...any)
|
||||||
@@ -226,34 +246,32 @@ type CleanFormatter struct{}
|
|||||||
func (f *CleanFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
func (f *CleanFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||||
timestamp := entry.Time.Format("2006-01-02T15:04:05")
|
timestamp := entry.Time.Format("2006-01-02T15:04:05")
|
||||||
|
|
||||||
// Color codes for different log levels
|
// Get level color and text using fatih/color
|
||||||
var levelColor, levelText string
|
var levelPrinter *color.Color
|
||||||
|
var levelText string
|
||||||
switch entry.Level {
|
switch entry.Level {
|
||||||
case logrus.DebugLevel:
|
case logrus.DebugLevel:
|
||||||
levelColor = "\033[36m" // Cyan
|
levelPrinter = DebugColor
|
||||||
levelText = "DEBUG"
|
levelText = "DEBUG"
|
||||||
case logrus.InfoLevel:
|
case logrus.InfoLevel:
|
||||||
levelColor = "\033[32m" // Green
|
levelPrinter = SuccessColor
|
||||||
levelText = "INFO "
|
levelText = "INFO "
|
||||||
case logrus.WarnLevel:
|
case logrus.WarnLevel:
|
||||||
levelColor = "\033[33m" // Yellow
|
levelPrinter = WarnColor
|
||||||
levelText = "WARN "
|
levelText = "WARN "
|
||||||
case logrus.ErrorLevel:
|
case logrus.ErrorLevel:
|
||||||
levelColor = "\033[31m" // Red
|
levelPrinter = ErrorColor
|
||||||
levelText = "ERROR"
|
levelText = "ERROR"
|
||||||
default:
|
default:
|
||||||
levelColor = "\033[0m" // Reset
|
levelPrinter = InfoColor
|
||||||
levelText = "INFO "
|
levelText = "INFO "
|
||||||
}
|
}
|
||||||
resetColor := "\033[0m"
|
|
||||||
|
|
||||||
// Build the message with perfectly aligned columns
|
// Build the message with perfectly aligned columns
|
||||||
var output strings.Builder
|
var output strings.Builder
|
||||||
|
|
||||||
// Column 1: Level (with color, fixed width 5 chars)
|
// Column 1: Level (with color, fixed width 5 chars)
|
||||||
output.WriteString(levelColor)
|
output.WriteString(levelPrinter.Sprint(levelText))
|
||||||
output.WriteString(levelText)
|
|
||||||
output.WriteString(resetColor)
|
|
||||||
output.WriteString(" ")
|
output.WriteString(" ")
|
||||||
|
|
||||||
// Column 2: Timestamp (fixed format)
|
// Column 2: Timestamp (fixed format)
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ func NewEngine(sourceCfg, targetCfg *config.Config, log logger.Logger) (*Engine,
|
|||||||
targetDB: targetDB,
|
targetDB: targetDB,
|
||||||
log: log,
|
log: log,
|
||||||
progress: progress.NewSpinner(),
|
progress: progress.NewSpinner(),
|
||||||
workDir: os.TempDir(),
|
workDir: sourceCfg.GetEffectiveWorkDir(),
|
||||||
keepBackup: false,
|
keepBackup: false,
|
||||||
jobs: 4,
|
jobs: 4,
|
||||||
dryRun: false,
|
dryRun: false,
|
||||||
|
|||||||
@@ -202,9 +202,9 @@ func (b *Batcher) formatSummaryDigest(events []*Event, success, failure, dbCount
|
|||||||
|
|
||||||
func (b *Batcher) formatCompactDigest(events []*Event, success, failure int) string {
|
func (b *Batcher) formatCompactDigest(events []*Event, success, failure int) string {
|
||||||
if failure > 0 {
|
if failure > 0 {
|
||||||
return fmt.Sprintf("⚠️ %d/%d operations failed", failure, len(events))
|
return fmt.Sprintf("[WARN] %d/%d operations failed", failure, len(events))
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("✅ All %d operations successful", success)
|
return fmt.Sprintf("[OK] All %d operations successful", success)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Batcher) formatDetailedDigest(events []*Event) string {
|
func (b *Batcher) formatDetailedDigest(events []*Event) string {
|
||||||
@@ -215,9 +215,9 @@ func (b *Batcher) formatDetailedDigest(events []*Event) string {
|
|||||||
icon := "•"
|
icon := "•"
|
||||||
switch e.Severity {
|
switch e.Severity {
|
||||||
case SeverityError, SeverityCritical:
|
case SeverityError, SeverityCritical:
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
case SeverityWarning:
|
case SeverityWarning:
|
||||||
icon = "⚠️"
|
icon = "[WARN]"
|
||||||
}
|
}
|
||||||
|
|
||||||
msg += fmt.Sprintf("%s [%s] %s: %s\n",
|
msg += fmt.Sprintf("%s [%s] %s: %s\n",
|
||||||
|
|||||||
@@ -183,43 +183,43 @@ func DefaultConfig() Config {
|
|||||||
|
|
||||||
// FormatEventSubject generates a subject line for notifications
|
// FormatEventSubject generates a subject line for notifications
|
||||||
func FormatEventSubject(event *Event) string {
|
func FormatEventSubject(event *Event) string {
|
||||||
icon := "ℹ️"
|
icon := "[INFO]"
|
||||||
switch event.Severity {
|
switch event.Severity {
|
||||||
case SeverityWarning:
|
case SeverityWarning:
|
||||||
icon = "⚠️"
|
icon = "[WARN]"
|
||||||
case SeverityError, SeverityCritical:
|
case SeverityError, SeverityCritical:
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
}
|
}
|
||||||
|
|
||||||
verb := "Event"
|
verb := "Event"
|
||||||
switch event.Type {
|
switch event.Type {
|
||||||
case EventBackupStarted:
|
case EventBackupStarted:
|
||||||
verb = "Backup Started"
|
verb = "Backup Started"
|
||||||
icon = "🔄"
|
icon = "[EXEC]"
|
||||||
case EventBackupCompleted:
|
case EventBackupCompleted:
|
||||||
verb = "Backup Completed"
|
verb = "Backup Completed"
|
||||||
icon = "✅"
|
icon = "[OK]"
|
||||||
case EventBackupFailed:
|
case EventBackupFailed:
|
||||||
verb = "Backup Failed"
|
verb = "Backup Failed"
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
case EventRestoreStarted:
|
case EventRestoreStarted:
|
||||||
verb = "Restore Started"
|
verb = "Restore Started"
|
||||||
icon = "🔄"
|
icon = "[EXEC]"
|
||||||
case EventRestoreCompleted:
|
case EventRestoreCompleted:
|
||||||
verb = "Restore Completed"
|
verb = "Restore Completed"
|
||||||
icon = "✅"
|
icon = "[OK]"
|
||||||
case EventRestoreFailed:
|
case EventRestoreFailed:
|
||||||
verb = "Restore Failed"
|
verb = "Restore Failed"
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
case EventCleanupCompleted:
|
case EventCleanupCompleted:
|
||||||
verb = "Cleanup Completed"
|
verb = "Cleanup Completed"
|
||||||
icon = "🗑️"
|
icon = "[DEL]"
|
||||||
case EventVerifyCompleted:
|
case EventVerifyCompleted:
|
||||||
verb = "Verification Passed"
|
verb = "Verification Passed"
|
||||||
icon = "✅"
|
icon = "[OK]"
|
||||||
case EventVerifyFailed:
|
case EventVerifyFailed:
|
||||||
verb = "Verification Failed"
|
verb = "Verification Failed"
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
case EventPITRRecovery:
|
case EventPITRRecovery:
|
||||||
verb = "PITR Recovery"
|
verb = "PITR Recovery"
|
||||||
icon = "⏪"
|
icon = "⏪"
|
||||||
|
|||||||
@@ -30,52 +30,52 @@ type Templates struct {
|
|||||||
func DefaultTemplates() map[EventType]Templates {
|
func DefaultTemplates() map[EventType]Templates {
|
||||||
return map[EventType]Templates{
|
return map[EventType]Templates{
|
||||||
EventBackupStarted: {
|
EventBackupStarted: {
|
||||||
Subject: "🔄 Backup Started: {{.Database}} on {{.Hostname}}",
|
Subject: "[EXEC] Backup Started: {{.Database}} on {{.Hostname}}",
|
||||||
TextBody: backupStartedText,
|
TextBody: backupStartedText,
|
||||||
HTMLBody: backupStartedHTML,
|
HTMLBody: backupStartedHTML,
|
||||||
},
|
},
|
||||||
EventBackupCompleted: {
|
EventBackupCompleted: {
|
||||||
Subject: "✅ Backup Completed: {{.Database}} on {{.Hostname}}",
|
Subject: "[OK] Backup Completed: {{.Database}} on {{.Hostname}}",
|
||||||
TextBody: backupCompletedText,
|
TextBody: backupCompletedText,
|
||||||
HTMLBody: backupCompletedHTML,
|
HTMLBody: backupCompletedHTML,
|
||||||
},
|
},
|
||||||
EventBackupFailed: {
|
EventBackupFailed: {
|
||||||
Subject: "❌ Backup FAILED: {{.Database}} on {{.Hostname}}",
|
Subject: "[FAIL] Backup FAILED: {{.Database}} on {{.Hostname}}",
|
||||||
TextBody: backupFailedText,
|
TextBody: backupFailedText,
|
||||||
HTMLBody: backupFailedHTML,
|
HTMLBody: backupFailedHTML,
|
||||||
},
|
},
|
||||||
EventRestoreStarted: {
|
EventRestoreStarted: {
|
||||||
Subject: "🔄 Restore Started: {{.Database}} on {{.Hostname}}",
|
Subject: "[EXEC] Restore Started: {{.Database}} on {{.Hostname}}",
|
||||||
TextBody: restoreStartedText,
|
TextBody: restoreStartedText,
|
||||||
HTMLBody: restoreStartedHTML,
|
HTMLBody: restoreStartedHTML,
|
||||||
},
|
},
|
||||||
EventRestoreCompleted: {
|
EventRestoreCompleted: {
|
||||||
Subject: "✅ Restore Completed: {{.Database}} on {{.Hostname}}",
|
Subject: "[OK] Restore Completed: {{.Database}} on {{.Hostname}}",
|
||||||
TextBody: restoreCompletedText,
|
TextBody: restoreCompletedText,
|
||||||
HTMLBody: restoreCompletedHTML,
|
HTMLBody: restoreCompletedHTML,
|
||||||
},
|
},
|
||||||
EventRestoreFailed: {
|
EventRestoreFailed: {
|
||||||
Subject: "❌ Restore FAILED: {{.Database}} on {{.Hostname}}",
|
Subject: "[FAIL] Restore FAILED: {{.Database}} on {{.Hostname}}",
|
||||||
TextBody: restoreFailedText,
|
TextBody: restoreFailedText,
|
||||||
HTMLBody: restoreFailedHTML,
|
HTMLBody: restoreFailedHTML,
|
||||||
},
|
},
|
||||||
EventVerificationPassed: {
|
EventVerificationPassed: {
|
||||||
Subject: "✅ Verification Passed: {{.Database}}",
|
Subject: "[OK] Verification Passed: {{.Database}}",
|
||||||
TextBody: verificationPassedText,
|
TextBody: verificationPassedText,
|
||||||
HTMLBody: verificationPassedHTML,
|
HTMLBody: verificationPassedHTML,
|
||||||
},
|
},
|
||||||
EventVerificationFailed: {
|
EventVerificationFailed: {
|
||||||
Subject: "❌ Verification FAILED: {{.Database}}",
|
Subject: "[FAIL] Verification FAILED: {{.Database}}",
|
||||||
TextBody: verificationFailedText,
|
TextBody: verificationFailedText,
|
||||||
HTMLBody: verificationFailedHTML,
|
HTMLBody: verificationFailedHTML,
|
||||||
},
|
},
|
||||||
EventDRDrillPassed: {
|
EventDRDrillPassed: {
|
||||||
Subject: "✅ DR Drill Passed: {{.Database}}",
|
Subject: "[OK] DR Drill Passed: {{.Database}}",
|
||||||
TextBody: drDrillPassedText,
|
TextBody: drDrillPassedText,
|
||||||
HTMLBody: drDrillPassedHTML,
|
HTMLBody: drDrillPassedHTML,
|
||||||
},
|
},
|
||||||
EventDRDrillFailed: {
|
EventDRDrillFailed: {
|
||||||
Subject: "❌ DR Drill FAILED: {{.Database}}",
|
Subject: "[FAIL] DR Drill FAILED: {{.Database}}",
|
||||||
TextBody: drDrillFailedText,
|
TextBody: drDrillFailedText,
|
||||||
HTMLBody: drDrillFailedHTML,
|
HTMLBody: drDrillFailedHTML,
|
||||||
},
|
},
|
||||||
@@ -95,7 +95,7 @@ Started At: {{formatTime .Timestamp}}
|
|||||||
|
|
||||||
const backupStartedHTML = `
|
const backupStartedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #3498db;">🔄 Backup Started</h2>
|
<h2 style="color: #3498db;">[EXEC] Backup Started</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -121,7 +121,7 @@ Completed: {{formatTime .Timestamp}}
|
|||||||
|
|
||||||
const backupCompletedHTML = `
|
const backupCompletedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #27ae60;">✅ Backup Completed</h2>
|
<h2 style="color: #27ae60;">[OK] Backup Completed</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -137,7 +137,7 @@ const backupCompletedHTML = `
|
|||||||
`
|
`
|
||||||
|
|
||||||
const backupFailedText = `
|
const backupFailedText = `
|
||||||
⚠️ BACKUP FAILED ⚠️
|
[WARN] BACKUP FAILED [WARN]
|
||||||
|
|
||||||
Database: {{.Database}}
|
Database: {{.Database}}
|
||||||
Hostname: {{.Hostname}}
|
Hostname: {{.Hostname}}
|
||||||
@@ -152,7 +152,7 @@ Please investigate immediately.
|
|||||||
|
|
||||||
const backupFailedHTML = `
|
const backupFailedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #e74c3c;">❌ Backup FAILED</h2>
|
<h2 style="color: #e74c3c;">[FAIL] Backup FAILED</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -176,7 +176,7 @@ Started At: {{formatTime .Timestamp}}
|
|||||||
|
|
||||||
const restoreStartedHTML = `
|
const restoreStartedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #3498db;">🔄 Restore Started</h2>
|
<h2 style="color: #3498db;">[EXEC] Restore Started</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -200,7 +200,7 @@ Completed: {{formatTime .Timestamp}}
|
|||||||
|
|
||||||
const restoreCompletedHTML = `
|
const restoreCompletedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #27ae60;">✅ Restore Completed</h2>
|
<h2 style="color: #27ae60;">[OK] Restore Completed</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -214,7 +214,7 @@ const restoreCompletedHTML = `
|
|||||||
`
|
`
|
||||||
|
|
||||||
const restoreFailedText = `
|
const restoreFailedText = `
|
||||||
⚠️ RESTORE FAILED ⚠️
|
[WARN] RESTORE FAILED [WARN]
|
||||||
|
|
||||||
Database: {{.Database}}
|
Database: {{.Database}}
|
||||||
Hostname: {{.Hostname}}
|
Hostname: {{.Hostname}}
|
||||||
@@ -229,7 +229,7 @@ Please investigate immediately.
|
|||||||
|
|
||||||
const restoreFailedHTML = `
|
const restoreFailedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #e74c3c;">❌ Restore FAILED</h2>
|
<h2 style="color: #e74c3c;">[FAIL] Restore FAILED</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -255,7 +255,7 @@ Verified: {{formatTime .Timestamp}}
|
|||||||
|
|
||||||
const verificationPassedHTML = `
|
const verificationPassedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #27ae60;">✅ Verification Passed</h2>
|
<h2 style="color: #27ae60;">[OK] Verification Passed</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -269,7 +269,7 @@ const verificationPassedHTML = `
|
|||||||
`
|
`
|
||||||
|
|
||||||
const verificationFailedText = `
|
const verificationFailedText = `
|
||||||
⚠️ VERIFICATION FAILED ⚠️
|
[WARN] VERIFICATION FAILED [WARN]
|
||||||
|
|
||||||
Database: {{.Database}}
|
Database: {{.Database}}
|
||||||
Hostname: {{.Hostname}}
|
Hostname: {{.Hostname}}
|
||||||
@@ -284,7 +284,7 @@ Backup integrity may be compromised. Please investigate.
|
|||||||
|
|
||||||
const verificationFailedHTML = `
|
const verificationFailedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #e74c3c;">❌ Verification FAILED</h2>
|
<h2 style="color: #e74c3c;">[FAIL] Verification FAILED</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -314,7 +314,7 @@ Backup restore capability verified.
|
|||||||
|
|
||||||
const drDrillPassedHTML = `
|
const drDrillPassedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #27ae60;">✅ DR Drill Passed</h2>
|
<h2 style="color: #27ae60;">[OK] DR Drill Passed</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -326,12 +326,12 @@ const drDrillPassedHTML = `
|
|||||||
{{end}}
|
{{end}}
|
||||||
</table>
|
</table>
|
||||||
{{if .Message}}<p style="margin-top: 20px; color: #27ae60;">{{.Message}}</p>{{end}}
|
{{if .Message}}<p style="margin-top: 20px; color: #27ae60;">{{.Message}}</p>{{end}}
|
||||||
<p style="margin-top: 20px; color: #27ae60;">✓ Backup restore capability verified</p>
|
<p style="margin-top: 20px; color: #27ae60;">[OK] Backup restore capability verified</p>
|
||||||
</div>
|
</div>
|
||||||
`
|
`
|
||||||
|
|
||||||
const drDrillFailedText = `
|
const drDrillFailedText = `
|
||||||
⚠️ DR DRILL FAILED ⚠️
|
[WARN] DR DRILL FAILED [WARN]
|
||||||
|
|
||||||
Database: {{.Database}}
|
Database: {{.Database}}
|
||||||
Hostname: {{.Hostname}}
|
Hostname: {{.Hostname}}
|
||||||
@@ -346,7 +346,7 @@ Backup may not be restorable. Please investigate immediately.
|
|||||||
|
|
||||||
const drDrillFailedHTML = `
|
const drDrillFailedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #e74c3c;">❌ DR Drill FAILED</h2>
|
<h2 style="color: #e74c3c;">[FAIL] DR Drill FAILED</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
|
|||||||
@@ -212,7 +212,11 @@ func (m *BinlogManager) detectTools() error {
|
|||||||
|
|
||||||
// detectServerType determines if we're working with MySQL or MariaDB
|
// detectServerType determines if we're working with MySQL or MariaDB
|
||||||
func (m *BinlogManager) detectServerType() DatabaseType {
|
func (m *BinlogManager) detectServerType() DatabaseType {
|
||||||
cmd := exec.Command(m.mysqlbinlogPath, "--version")
|
// Use timeout to prevent blocking if command hangs
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, m.mysqlbinlogPath, "--version")
|
||||||
output, err := cmd.Output()
|
output, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return DatabaseMySQL // Default to MySQL
|
return DatabaseMySQL // Default to MySQL
|
||||||
|
|||||||
@@ -43,9 +43,9 @@ type RestoreOptions struct {
|
|||||||
|
|
||||||
// RestorePointInTime performs a Point-in-Time Recovery
|
// RestorePointInTime performs a Point-in-Time Recovery
|
||||||
func (ro *RestoreOrchestrator) RestorePointInTime(ctx context.Context, opts *RestoreOptions) error {
|
func (ro *RestoreOrchestrator) RestorePointInTime(ctx context.Context, opts *RestoreOptions) error {
|
||||||
ro.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
ro.log.Info("=====================================================")
|
||||||
ro.log.Info(" Point-in-Time Recovery (PITR)")
|
ro.log.Info(" Point-in-Time Recovery (PITR)")
|
||||||
ro.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
ro.log.Info("=====================================================")
|
||||||
ro.log.Info("")
|
ro.log.Info("")
|
||||||
ro.log.Info("Target:", "summary", opts.Target.Summary())
|
ro.log.Info("Target:", "summary", opts.Target.Summary())
|
||||||
ro.log.Info("Base Backup:", "path", opts.BaseBackupPath)
|
ro.log.Info("Base Backup:", "path", opts.BaseBackupPath)
|
||||||
@@ -91,11 +91,11 @@ func (ro *RestoreOrchestrator) RestorePointInTime(ctx context.Context, opts *Res
|
|||||||
return fmt.Errorf("failed to generate recovery configuration: %w", err)
|
return fmt.Errorf("failed to generate recovery configuration: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ro.log.Info("✅ Recovery configuration generated successfully")
|
ro.log.Info("[OK] Recovery configuration generated successfully")
|
||||||
ro.log.Info("")
|
ro.log.Info("")
|
||||||
ro.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
ro.log.Info("=====================================================")
|
||||||
ro.log.Info(" Next Steps:")
|
ro.log.Info(" Next Steps:")
|
||||||
ro.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
ro.log.Info("=====================================================")
|
||||||
ro.log.Info("")
|
ro.log.Info("")
|
||||||
ro.log.Info("1. Start PostgreSQL to begin recovery:")
|
ro.log.Info("1. Start PostgreSQL to begin recovery:")
|
||||||
ro.log.Info(fmt.Sprintf(" pg_ctl -D %s start", opts.TargetDataDir))
|
ro.log.Info(fmt.Sprintf(" pg_ctl -D %s start", opts.TargetDataDir))
|
||||||
@@ -192,7 +192,7 @@ func (ro *RestoreOrchestrator) validateInputs(opts *RestoreOptions) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ro.log.Info("✅ Validation passed")
|
ro.log.Info("[OK] Validation passed")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,7 +238,7 @@ func (ro *RestoreOrchestrator) extractTarGzBackup(ctx context.Context, source, d
|
|||||||
return fmt.Errorf("tar extraction failed: %w", err)
|
return fmt.Errorf("tar extraction failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ro.log.Info("✅ Base backup extracted successfully")
|
ro.log.Info("[OK] Base backup extracted successfully")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -254,7 +254,7 @@ func (ro *RestoreOrchestrator) extractTarBackup(ctx context.Context, source, des
|
|||||||
return fmt.Errorf("tar extraction failed: %w", err)
|
return fmt.Errorf("tar extraction failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ro.log.Info("✅ Base backup extracted successfully")
|
ro.log.Info("[OK] Base backup extracted successfully")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -270,7 +270,7 @@ func (ro *RestoreOrchestrator) copyDirectoryBackup(ctx context.Context, source,
|
|||||||
return fmt.Errorf("directory copy failed: %w", err)
|
return fmt.Errorf("directory copy failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ro.log.Info("✅ Base backup copied successfully")
|
ro.log.Info("[OK] Base backup copied successfully")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -291,7 +291,7 @@ func (ro *RestoreOrchestrator) startPostgreSQL(ctx context.Context, opts *Restor
|
|||||||
return fmt.Errorf("pg_ctl start failed: %w", err)
|
return fmt.Errorf("pg_ctl start failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ro.log.Info("✅ PostgreSQL started successfully")
|
ro.log.Info("[OK] PostgreSQL started successfully")
|
||||||
ro.log.Info("PostgreSQL is now performing recovery...")
|
ro.log.Info("PostgreSQL is now performing recovery...")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -320,7 +320,7 @@ func (ro *RestoreOrchestrator) monitorRecovery(ctx context.Context, opts *Restor
|
|||||||
// Check if recovery is complete by looking for postmaster.pid
|
// Check if recovery is complete by looking for postmaster.pid
|
||||||
pidFile := filepath.Join(opts.TargetDataDir, "postmaster.pid")
|
pidFile := filepath.Join(opts.TargetDataDir, "postmaster.pid")
|
||||||
if _, err := os.Stat(pidFile); err == nil {
|
if _, err := os.Stat(pidFile); err == nil {
|
||||||
ro.log.Info("✅ PostgreSQL is running")
|
ro.log.Info("[OK] PostgreSQL is running")
|
||||||
|
|
||||||
// Check if recovery files still exist
|
// Check if recovery files still exist
|
||||||
recoverySignal := filepath.Join(opts.TargetDataDir, "recovery.signal")
|
recoverySignal := filepath.Join(opts.TargetDataDir, "recovery.signal")
|
||||||
@@ -328,7 +328,7 @@ func (ro *RestoreOrchestrator) monitorRecovery(ctx context.Context, opts *Restor
|
|||||||
|
|
||||||
if _, err := os.Stat(recoverySignal); os.IsNotExist(err) {
|
if _, err := os.Stat(recoverySignal); os.IsNotExist(err) {
|
||||||
if _, err := os.Stat(recoveryConf); os.IsNotExist(err) {
|
if _, err := os.Stat(recoveryConf); os.IsNotExist(err) {
|
||||||
ro.log.Info("✅ Recovery completed - PostgreSQL promoted to primary")
|
ro.log.Info("[OK] Recovery completed - PostgreSQL promoted to primary")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -256,7 +256,7 @@ func (ot *OperationTracker) Complete(message string) {
|
|||||||
|
|
||||||
// Complete visual indicator
|
// Complete visual indicator
|
||||||
if ot.reporter.indicator != nil {
|
if ot.reporter.indicator != nil {
|
||||||
ot.reporter.indicator.Complete(fmt.Sprintf("✅ %s", message))
|
ot.reporter.indicator.Complete(fmt.Sprintf("[OK] %s", message))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log completion with duration
|
// Log completion with duration
|
||||||
@@ -286,7 +286,7 @@ func (ot *OperationTracker) Fail(err error) {
|
|||||||
|
|
||||||
// Fail visual indicator
|
// Fail visual indicator
|
||||||
if ot.reporter.indicator != nil {
|
if ot.reporter.indicator != nil {
|
||||||
ot.reporter.indicator.Fail(fmt.Sprintf("❌ %s", err.Error()))
|
ot.reporter.indicator.Fail(fmt.Sprintf("[FAIL] %s", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log failure
|
// Log failure
|
||||||
@@ -427,7 +427,7 @@ type OperationSummary struct {
|
|||||||
// FormatSummary returns a formatted string representation of the summary
|
// FormatSummary returns a formatted string representation of the summary
|
||||||
func (os *OperationSummary) FormatSummary() string {
|
func (os *OperationSummary) FormatSummary() string {
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
"📊 Operations Summary:\n"+
|
"[STATS] Operations Summary:\n"+
|
||||||
" Total: %d | Completed: %d | Failed: %d | Running: %d\n"+
|
" Total: %d | Completed: %d | Failed: %d | Running: %d\n"+
|
||||||
" Total Duration: %s",
|
" Total Duration: %s",
|
||||||
os.TotalOperations,
|
os.TotalOperations,
|
||||||
|
|||||||
@@ -6,6 +6,16 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/fatih/color"
|
||||||
|
"github.com/schollz/progressbar/v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Color printers for progress indicators
|
||||||
|
var (
|
||||||
|
okColor = color.New(color.FgGreen, color.Bold)
|
||||||
|
failColor = color.New(color.FgRed, color.Bold)
|
||||||
|
warnColor = color.New(color.FgYellow, color.Bold)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Indicator represents a progress indicator interface
|
// Indicator represents a progress indicator interface
|
||||||
@@ -92,13 +102,15 @@ func (s *Spinner) Update(message string) {
|
|||||||
// Complete stops the spinner with a success message
|
// Complete stops the spinner with a success message
|
||||||
func (s *Spinner) Complete(message string) {
|
func (s *Spinner) Complete(message string) {
|
||||||
s.Stop()
|
s.Stop()
|
||||||
fmt.Fprintf(s.writer, "\n✅ %s\n", message)
|
okColor.Fprint(s.writer, "[OK] ")
|
||||||
|
fmt.Fprintln(s.writer, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fail stops the spinner with a failure message
|
// Fail stops the spinner with a failure message
|
||||||
func (s *Spinner) Fail(message string) {
|
func (s *Spinner) Fail(message string) {
|
||||||
s.Stop()
|
s.Stop()
|
||||||
fmt.Fprintf(s.writer, "\n❌ %s\n", message)
|
failColor.Fprint(s.writer, "[FAIL] ")
|
||||||
|
fmt.Fprintln(s.writer, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops the spinner
|
// Stop stops the spinner
|
||||||
@@ -167,13 +179,15 @@ func (d *Dots) Update(message string) {
|
|||||||
// Complete stops the dots with a success message
|
// Complete stops the dots with a success message
|
||||||
func (d *Dots) Complete(message string) {
|
func (d *Dots) Complete(message string) {
|
||||||
d.Stop()
|
d.Stop()
|
||||||
fmt.Fprintf(d.writer, " ✅ %s\n", message)
|
okColor.Fprint(d.writer, " [OK] ")
|
||||||
|
fmt.Fprintln(d.writer, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fail stops the dots with a failure message
|
// Fail stops the dots with a failure message
|
||||||
func (d *Dots) Fail(message string) {
|
func (d *Dots) Fail(message string) {
|
||||||
d.Stop()
|
d.Stop()
|
||||||
fmt.Fprintf(d.writer, " ❌ %s\n", message)
|
failColor.Fprint(d.writer, " [FAIL] ")
|
||||||
|
fmt.Fprintln(d.writer, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops the dots indicator
|
// Stop stops the dots indicator
|
||||||
@@ -239,14 +253,16 @@ func (p *ProgressBar) Complete(message string) {
|
|||||||
p.current = p.total
|
p.current = p.total
|
||||||
p.message = message
|
p.message = message
|
||||||
p.render()
|
p.render()
|
||||||
fmt.Fprintf(p.writer, " ✅ %s\n", message)
|
okColor.Fprint(p.writer, " [OK] ")
|
||||||
|
fmt.Fprintln(p.writer, message)
|
||||||
p.Stop()
|
p.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fail stops the progress bar with failure
|
// Fail stops the progress bar with failure
|
||||||
func (p *ProgressBar) Fail(message string) {
|
func (p *ProgressBar) Fail(message string) {
|
||||||
p.render()
|
p.render()
|
||||||
fmt.Fprintf(p.writer, " ❌ %s\n", message)
|
failColor.Fprint(p.writer, " [FAIL] ")
|
||||||
|
fmt.Fprintln(p.writer, message)
|
||||||
p.Stop()
|
p.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -298,12 +314,14 @@ func (s *Static) Update(message string) {
|
|||||||
|
|
||||||
// Complete shows completion message
|
// Complete shows completion message
|
||||||
func (s *Static) Complete(message string) {
|
func (s *Static) Complete(message string) {
|
||||||
fmt.Fprintf(s.writer, " ✅ %s\n", message)
|
okColor.Fprint(s.writer, " [OK] ")
|
||||||
|
fmt.Fprintln(s.writer, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fail shows failure message
|
// Fail shows failure message
|
||||||
func (s *Static) Fail(message string) {
|
func (s *Static) Fail(message string) {
|
||||||
fmt.Fprintf(s.writer, " ❌ %s\n", message)
|
failColor.Fprint(s.writer, " [FAIL] ")
|
||||||
|
fmt.Fprintln(s.writer, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop does nothing for static indicator
|
// Stop does nothing for static indicator
|
||||||
@@ -359,7 +377,7 @@ func (l *LineByLine) Start(message string) {
|
|||||||
if l.estimator != nil {
|
if l.estimator != nil {
|
||||||
displayMsg = l.estimator.GetFullStatus(message)
|
displayMsg = l.estimator.GetFullStatus(message)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(l.writer, "\n🔄 %s\n", displayMsg)
|
fmt.Fprintf(l.writer, "\n[SYNC] %s\n", displayMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update shows an update message
|
// Update shows an update message
|
||||||
@@ -380,12 +398,14 @@ func (l *LineByLine) SetEstimator(estimator *ETAEstimator) {
|
|||||||
|
|
||||||
// Complete shows completion message
|
// Complete shows completion message
|
||||||
func (l *LineByLine) Complete(message string) {
|
func (l *LineByLine) Complete(message string) {
|
||||||
fmt.Fprintf(l.writer, "✅ %s\n\n", message)
|
okColor.Fprint(l.writer, "[OK] ")
|
||||||
|
fmt.Fprintf(l.writer, "%s\n\n", message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fail shows failure message
|
// Fail shows failure message
|
||||||
func (l *LineByLine) Fail(message string) {
|
func (l *LineByLine) Fail(message string) {
|
||||||
fmt.Fprintf(l.writer, "❌ %s\n\n", message)
|
failColor.Fprint(l.writer, "[FAIL] ")
|
||||||
|
fmt.Fprintf(l.writer, "%s\n\n", message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop does nothing for line-by-line (no cleanup needed)
|
// Stop does nothing for line-by-line (no cleanup needed)
|
||||||
@@ -396,7 +416,7 @@ func (l *LineByLine) Stop() {
|
|||||||
// Light indicator methods - minimal output
|
// Light indicator methods - minimal output
|
||||||
func (l *Light) Start(message string) {
|
func (l *Light) Start(message string) {
|
||||||
if !l.silent {
|
if !l.silent {
|
||||||
fmt.Fprintf(l.writer, "▶ %s\n", message)
|
fmt.Fprintf(l.writer, "> %s\n", message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -408,13 +428,15 @@ func (l *Light) Update(message string) {
|
|||||||
|
|
||||||
func (l *Light) Complete(message string) {
|
func (l *Light) Complete(message string) {
|
||||||
if !l.silent {
|
if !l.silent {
|
||||||
fmt.Fprintf(l.writer, "✓ %s\n", message)
|
okColor.Fprint(l.writer, "[OK] ")
|
||||||
|
fmt.Fprintln(l.writer, message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Light) Fail(message string) {
|
func (l *Light) Fail(message string) {
|
||||||
if !l.silent {
|
if !l.silent {
|
||||||
fmt.Fprintf(l.writer, "✗ %s\n", message)
|
failColor.Fprint(l.writer, "[FAIL] ")
|
||||||
|
fmt.Fprintln(l.writer, message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -440,6 +462,8 @@ func NewIndicator(interactive bool, indicatorType string) Indicator {
|
|||||||
return NewDots()
|
return NewDots()
|
||||||
case "bar":
|
case "bar":
|
||||||
return NewProgressBar(100) // Default to 100 steps
|
return NewProgressBar(100) // Default to 100 steps
|
||||||
|
case "schollz":
|
||||||
|
return NewSchollzBarItems(100, "Progress")
|
||||||
case "line":
|
case "line":
|
||||||
return NewLineByLine()
|
return NewLineByLine()
|
||||||
case "light":
|
case "light":
|
||||||
@@ -463,3 +487,161 @@ func (n *NullIndicator) Complete(message string) {}
|
|||||||
func (n *NullIndicator) Fail(message string) {}
|
func (n *NullIndicator) Fail(message string) {}
|
||||||
func (n *NullIndicator) Stop() {}
|
func (n *NullIndicator) Stop() {}
|
||||||
func (n *NullIndicator) SetEstimator(estimator *ETAEstimator) {}
|
func (n *NullIndicator) SetEstimator(estimator *ETAEstimator) {}
|
||||||
|
|
||||||
|
// SchollzBar wraps schollz/progressbar for enhanced progress display
|
||||||
|
// Ideal for byte-based operations like archive extraction and file transfers
|
||||||
|
type SchollzBar struct {
|
||||||
|
bar *progressbar.ProgressBar
|
||||||
|
message string
|
||||||
|
total int64
|
||||||
|
estimator *ETAEstimator
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSchollzBar creates a new schollz progressbar with byte-based progress
|
||||||
|
func NewSchollzBar(total int64, description string) *SchollzBar {
|
||||||
|
bar := progressbar.NewOptions64(
|
||||||
|
total,
|
||||||
|
progressbar.OptionEnableColorCodes(true),
|
||||||
|
progressbar.OptionShowBytes(true),
|
||||||
|
progressbar.OptionSetWidth(40),
|
||||||
|
progressbar.OptionSetDescription(description),
|
||||||
|
progressbar.OptionSetTheme(progressbar.Theme{
|
||||||
|
Saucer: "[green]█[reset]",
|
||||||
|
SaucerHead: "[green]▌[reset]",
|
||||||
|
SaucerPadding: "░",
|
||||||
|
BarStart: "[",
|
||||||
|
BarEnd: "]",
|
||||||
|
}),
|
||||||
|
progressbar.OptionShowCount(),
|
||||||
|
progressbar.OptionSetPredictTime(true),
|
||||||
|
progressbar.OptionFullWidth(),
|
||||||
|
progressbar.OptionClearOnFinish(),
|
||||||
|
)
|
||||||
|
return &SchollzBar{
|
||||||
|
bar: bar,
|
||||||
|
message: description,
|
||||||
|
total: total,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSchollzBarItems creates a progressbar for item counts (not bytes)
|
||||||
|
func NewSchollzBarItems(total int, description string) *SchollzBar {
|
||||||
|
bar := progressbar.NewOptions(
|
||||||
|
total,
|
||||||
|
progressbar.OptionEnableColorCodes(true),
|
||||||
|
progressbar.OptionShowCount(),
|
||||||
|
progressbar.OptionSetWidth(40),
|
||||||
|
progressbar.OptionSetDescription(description),
|
||||||
|
progressbar.OptionSetTheme(progressbar.Theme{
|
||||||
|
Saucer: "[cyan]█[reset]",
|
||||||
|
SaucerHead: "[cyan]▌[reset]",
|
||||||
|
SaucerPadding: "░",
|
||||||
|
BarStart: "[",
|
||||||
|
BarEnd: "]",
|
||||||
|
}),
|
||||||
|
progressbar.OptionSetPredictTime(true),
|
||||||
|
progressbar.OptionFullWidth(),
|
||||||
|
progressbar.OptionClearOnFinish(),
|
||||||
|
)
|
||||||
|
return &SchollzBar{
|
||||||
|
bar: bar,
|
||||||
|
message: description,
|
||||||
|
total: int64(total),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSchollzSpinner creates an indeterminate spinner for unknown-length operations
|
||||||
|
func NewSchollzSpinner(description string) *SchollzBar {
|
||||||
|
bar := progressbar.NewOptions(
|
||||||
|
-1, // Indeterminate
|
||||||
|
progressbar.OptionEnableColorCodes(true),
|
||||||
|
progressbar.OptionSetWidth(40),
|
||||||
|
progressbar.OptionSetDescription(description),
|
||||||
|
progressbar.OptionSpinnerType(14), // Braille spinner
|
||||||
|
progressbar.OptionFullWidth(),
|
||||||
|
)
|
||||||
|
return &SchollzBar{
|
||||||
|
bar: bar,
|
||||||
|
message: description,
|
||||||
|
total: -1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start initializes the progress bar (Indicator interface)
|
||||||
|
func (s *SchollzBar) Start(message string) {
|
||||||
|
s.message = message
|
||||||
|
s.bar.Describe(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update updates the description (Indicator interface)
|
||||||
|
func (s *SchollzBar) Update(message string) {
|
||||||
|
s.message = message
|
||||||
|
s.bar.Describe(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds bytes/items to the progress
|
||||||
|
func (s *SchollzBar) Add(n int) error {
|
||||||
|
return s.bar.Add(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add64 adds bytes to the progress (for large files)
|
||||||
|
func (s *SchollzBar) Add64(n int64) error {
|
||||||
|
return s.bar.Add64(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the current progress value
|
||||||
|
func (s *SchollzBar) Set(n int) error {
|
||||||
|
return s.bar.Set(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set64 sets the current progress value (for large files)
|
||||||
|
func (s *SchollzBar) Set64(n int64) error {
|
||||||
|
return s.bar.Set64(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangeMax updates the maximum value
|
||||||
|
func (s *SchollzBar) ChangeMax(max int) {
|
||||||
|
s.bar.ChangeMax(max)
|
||||||
|
s.total = int64(max)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangeMax64 updates the maximum value (for large files)
|
||||||
|
func (s *SchollzBar) ChangeMax64(max int64) {
|
||||||
|
s.bar.ChangeMax64(max)
|
||||||
|
s.total = max
|
||||||
|
}
|
||||||
|
|
||||||
|
// Complete finishes with success (Indicator interface)
|
||||||
|
func (s *SchollzBar) Complete(message string) {
|
||||||
|
_ = s.bar.Finish()
|
||||||
|
okColor.Print("[OK] ")
|
||||||
|
fmt.Println(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fail finishes with failure (Indicator interface)
|
||||||
|
func (s *SchollzBar) Fail(message string) {
|
||||||
|
_ = s.bar.Clear()
|
||||||
|
failColor.Print("[FAIL] ")
|
||||||
|
fmt.Println(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the progress bar (Indicator interface)
|
||||||
|
func (s *SchollzBar) Stop() {
|
||||||
|
_ = s.bar.Clear()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEstimator is a no-op (schollz has built-in ETA)
|
||||||
|
func (s *SchollzBar) SetEstimator(estimator *ETAEstimator) {
|
||||||
|
s.estimator = estimator
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writer returns an io.Writer that updates progress as data is written
|
||||||
|
// Useful for wrapping readers/writers in copy operations
|
||||||
|
func (s *SchollzBar) Writer() io.Writer {
|
||||||
|
return s.bar
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finish marks the progress as complete
|
||||||
|
func (s *SchollzBar) Finish() error {
|
||||||
|
return s.bar.Finish()
|
||||||
|
}
|
||||||
|
|||||||
@@ -296,11 +296,11 @@ func generateID() string {
|
|||||||
func StatusIcon(s ComplianceStatus) string {
|
func StatusIcon(s ComplianceStatus) string {
|
||||||
switch s {
|
switch s {
|
||||||
case StatusCompliant:
|
case StatusCompliant:
|
||||||
return "✅"
|
return "[OK]"
|
||||||
case StatusNonCompliant:
|
case StatusNonCompliant:
|
||||||
return "❌"
|
return "[FAIL]"
|
||||||
case StatusPartial:
|
case StatusPartial:
|
||||||
return "⚠️"
|
return "[WARN]"
|
||||||
case StatusNotApplicable:
|
case StatusNotApplicable:
|
||||||
return "➖"
|
return "➖"
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"dbbackup/internal/cloud"
|
"dbbackup/internal/cloud"
|
||||||
"dbbackup/internal/logger"
|
"dbbackup/internal/logger"
|
||||||
"dbbackup/internal/metadata"
|
"dbbackup/internal/metadata"
|
||||||
|
"dbbackup/internal/progress"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CloudDownloader handles downloading backups from cloud storage
|
// CloudDownloader handles downloading backups from cloud storage
|
||||||
@@ -47,9 +48,10 @@ type DownloadResult struct {
|
|||||||
|
|
||||||
// Download downloads a backup from cloud storage
|
// Download downloads a backup from cloud storage
|
||||||
func (d *CloudDownloader) Download(ctx context.Context, remotePath string, opts DownloadOptions) (*DownloadResult, error) {
|
func (d *CloudDownloader) Download(ctx context.Context, remotePath string, opts DownloadOptions) (*DownloadResult, error) {
|
||||||
// Determine temp directory
|
// Determine temp directory (use from opts, or from config's WorkDir, or fallback to system temp)
|
||||||
tempDir := opts.TempDir
|
tempDir := opts.TempDir
|
||||||
if tempDir == "" {
|
if tempDir == "" {
|
||||||
|
// Try to get from config if available (passed via opts.TempDir)
|
||||||
tempDir = os.TempDir()
|
tempDir = os.TempDir()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,25 +74,43 @@ func (d *CloudDownloader) Download(ctx context.Context, remotePath string, opts
|
|||||||
size = 0 // Continue anyway
|
size = 0 // Continue anyway
|
||||||
}
|
}
|
||||||
|
|
||||||
// Progress callback
|
// Create schollz progressbar for visual download progress
|
||||||
var lastPercent int
|
var bar *progress.SchollzBar
|
||||||
|
if size > 0 {
|
||||||
|
bar = progress.NewSchollzBar(size, fmt.Sprintf("Downloading %s", filename))
|
||||||
|
} else {
|
||||||
|
bar = progress.NewSchollzSpinner(fmt.Sprintf("Downloading %s", filename))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Progress callback with schollz progressbar
|
||||||
|
var lastBytes int64
|
||||||
progressCallback := func(transferred, total int64) {
|
progressCallback := func(transferred, total int64) {
|
||||||
if total > 0 {
|
if bar != nil {
|
||||||
percent := int(float64(transferred) / float64(total) * 100)
|
// Update progress bar with delta
|
||||||
if percent != lastPercent && percent%10 == 0 {
|
delta := transferred - lastBytes
|
||||||
d.log.Info("Download progress", "percent", percent, "transferred", cloud.FormatSize(transferred), "total", cloud.FormatSize(total))
|
if delta > 0 {
|
||||||
lastPercent = percent
|
_ = bar.Add64(delta)
|
||||||
}
|
}
|
||||||
|
lastBytes = transferred
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Download file
|
// Download file
|
||||||
if err := d.backend.Download(ctx, remotePath, localPath, progressCallback); err != nil {
|
if err := d.backend.Download(ctx, remotePath, localPath, progressCallback); err != nil {
|
||||||
|
if bar != nil {
|
||||||
|
bar.Fail("Download failed")
|
||||||
|
}
|
||||||
// Cleanup on failure
|
// Cleanup on failure
|
||||||
os.RemoveAll(tempSubDir)
|
os.RemoveAll(tempSubDir)
|
||||||
return nil, fmt.Errorf("download failed: %w", err)
|
return nil, fmt.Errorf("download failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if bar != nil {
|
||||||
|
_ = bar.Finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
d.log.Info("Download completed", "size", cloud.FormatSize(size))
|
||||||
|
|
||||||
result := &DownloadResult{
|
result := &DownloadResult{
|
||||||
LocalPath: localPath,
|
LocalPath: localPath,
|
||||||
RemotePath: remotePath,
|
RemotePath: remotePath,
|
||||||
@@ -114,7 +134,7 @@ func (d *CloudDownloader) Download(ctx context.Context, remotePath string, opts
|
|||||||
// Verify checksum if requested
|
// Verify checksum if requested
|
||||||
if opts.VerifyChecksum {
|
if opts.VerifyChecksum {
|
||||||
d.log.Info("Verifying checksum...")
|
d.log.Info("Verifying checksum...")
|
||||||
checksum, err := calculateSHA256(localPath)
|
checksum, err := calculateSHA256WithProgress(localPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Cleanup on verification failure
|
// Cleanup on verification failure
|
||||||
os.RemoveAll(tempSubDir)
|
os.RemoveAll(tempSubDir)
|
||||||
@@ -185,6 +205,35 @@ func calculateSHA256(filePath string) (string, error) {
|
|||||||
return hex.EncodeToString(hash.Sum(nil)), nil
|
return hex.EncodeToString(hash.Sum(nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// calculateSHA256WithProgress calculates SHA-256 with visual progress bar
|
||||||
|
func calculateSHA256WithProgress(filePath string) (string, error) {
|
||||||
|
file, err := os.Open(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
// Get file size for progress bar
|
||||||
|
stat, err := file.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
bar := progress.NewSchollzBar(stat.Size(), "Verifying checksum")
|
||||||
|
hash := sha256.New()
|
||||||
|
|
||||||
|
// Create a multi-writer to update both hash and progress
|
||||||
|
writer := io.MultiWriter(hash, bar.Writer())
|
||||||
|
|
||||||
|
if _, err := io.Copy(writer, file); err != nil {
|
||||||
|
bar.Fail("Verification failed")
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = bar.Finish()
|
||||||
|
return hex.EncodeToString(hash.Sum(nil)), nil
|
||||||
|
}
|
||||||
|
|
||||||
// DownloadFromCloudURI is a convenience function to download from a cloud URI
|
// DownloadFromCloudURI is a convenience function to download from a cloud URI
|
||||||
func DownloadFromCloudURI(ctx context.Context, uri string, opts DownloadOptions) (*DownloadResult, error) {
|
func DownloadFromCloudURI(ctx context.Context, uri string, opts DownloadOptions) (*DownloadResult, error) {
|
||||||
// Parse URI
|
// Parse URI
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -12,6 +13,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"dbbackup/internal/logger"
|
"dbbackup/internal/logger"
|
||||||
)
|
)
|
||||||
@@ -60,9 +62,9 @@ type DiagnoseDetails struct {
|
|||||||
TableList []string `json:"table_list,omitempty"`
|
TableList []string `json:"table_list,omitempty"`
|
||||||
|
|
||||||
// Compression analysis
|
// Compression analysis
|
||||||
GzipValid bool `json:"gzip_valid,omitempty"`
|
GzipValid bool `json:"gzip_valid,omitempty"`
|
||||||
GzipError string `json:"gzip_error,omitempty"`
|
GzipError string `json:"gzip_error,omitempty"`
|
||||||
ExpandedSize int64 `json:"expanded_size,omitempty"`
|
ExpandedSize int64 `json:"expanded_size,omitempty"`
|
||||||
CompressionRatio float64 `json:"compression_ratio,omitempty"`
|
CompressionRatio float64 `json:"compression_ratio,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -157,7 +159,7 @@ func (d *Diagnoser) diagnosePgDump(filePath string, result *DiagnoseResult) {
|
|||||||
result.IsCorrupted = true
|
result.IsCorrupted = true
|
||||||
result.Details.HasPGDMPSignature = false
|
result.Details.HasPGDMPSignature = false
|
||||||
result.Details.FirstBytes = fmt.Sprintf("%q", header[:minInt(n, 20)])
|
result.Details.FirstBytes = fmt.Sprintf("%q", header[:minInt(n, 20)])
|
||||||
result.Errors = append(result.Errors,
|
result.Errors = append(result.Errors,
|
||||||
"Missing PGDMP signature - file is NOT PostgreSQL custom format",
|
"Missing PGDMP signature - file is NOT PostgreSQL custom format",
|
||||||
"This file may be SQL format incorrectly named as .dump",
|
"This file may be SQL format incorrectly named as .dump",
|
||||||
"Try: file "+filePath+" to check actual file type")
|
"Try: file "+filePath+" to check actual file type")
|
||||||
@@ -185,7 +187,7 @@ func (d *Diagnoser) diagnosePgDumpGz(filePath string, result *DiagnoseResult) {
|
|||||||
result.IsCorrupted = true
|
result.IsCorrupted = true
|
||||||
result.Details.GzipValid = false
|
result.Details.GzipValid = false
|
||||||
result.Details.GzipError = err.Error()
|
result.Details.GzipError = err.Error()
|
||||||
result.Errors = append(result.Errors,
|
result.Errors = append(result.Errors,
|
||||||
fmt.Sprintf("Invalid gzip format: %v", err),
|
fmt.Sprintf("Invalid gzip format: %v", err),
|
||||||
"The file may be truncated or corrupted during transfer")
|
"The file may be truncated or corrupted during transfer")
|
||||||
return
|
return
|
||||||
@@ -210,7 +212,7 @@ func (d *Diagnoser) diagnosePgDumpGz(filePath string, result *DiagnoseResult) {
|
|||||||
} else {
|
} else {
|
||||||
result.Details.HasPGDMPSignature = false
|
result.Details.HasPGDMPSignature = false
|
||||||
result.Details.FirstBytes = fmt.Sprintf("%q", header[:minInt(n, 20)])
|
result.Details.FirstBytes = fmt.Sprintf("%q", header[:minInt(n, 20)])
|
||||||
|
|
||||||
// Check if it's actually SQL content
|
// Check if it's actually SQL content
|
||||||
content := string(header[:n])
|
content := string(header[:n])
|
||||||
if strings.Contains(content, "PostgreSQL") || strings.Contains(content, "pg_dump") ||
|
if strings.Contains(content, "PostgreSQL") || strings.Contains(content, "pg_dump") ||
|
||||||
@@ -233,7 +235,7 @@ func (d *Diagnoser) diagnosePgDumpGz(filePath string, result *DiagnoseResult) {
|
|||||||
// Verify full gzip stream integrity by reading to end
|
// Verify full gzip stream integrity by reading to end
|
||||||
file.Seek(0, 0)
|
file.Seek(0, 0)
|
||||||
gz, _ = gzip.NewReader(file)
|
gz, _ = gzip.NewReader(file)
|
||||||
|
|
||||||
var totalRead int64
|
var totalRead int64
|
||||||
buf := make([]byte, 32*1024)
|
buf := make([]byte, 32*1024)
|
||||||
for {
|
for {
|
||||||
@@ -255,7 +257,7 @@ func (d *Diagnoser) diagnosePgDumpGz(filePath string, result *DiagnoseResult) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
gz.Close()
|
gz.Close()
|
||||||
|
|
||||||
result.Details.ExpandedSize = totalRead
|
result.Details.ExpandedSize = totalRead
|
||||||
if result.FileSize > 0 {
|
if result.FileSize > 0 {
|
||||||
result.Details.CompressionRatio = float64(totalRead) / float64(result.FileSize)
|
result.Details.CompressionRatio = float64(totalRead) / float64(result.FileSize)
|
||||||
@@ -366,7 +368,7 @@ func (d *Diagnoser) diagnoseSQLScript(filePath string, compressed bool, result *
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Store last line for termination check
|
// Store last line for termination check
|
||||||
if lineNumber > 0 && (lineNumber%100000 == 0) && d.verbose {
|
if lineNumber > 0 && (lineNumber%100000 == 0) && d.verbose && d.log != nil {
|
||||||
d.log.Debug("Scanning SQL file", "lines_processed", lineNumber)
|
d.log.Debug("Scanning SQL file", "lines_processed", lineNumber)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -392,7 +394,7 @@ func (d *Diagnoser) diagnoseSQLScript(filePath string, compressed bool, result *
|
|||||||
lastCopyTable, copyStartLine),
|
lastCopyTable, copyStartLine),
|
||||||
"The backup was truncated during data export",
|
"The backup was truncated during data export",
|
||||||
"This explains the 'syntax error' during restore - COPY data is being interpreted as SQL")
|
"This explains the 'syntax error' during restore - COPY data is being interpreted as SQL")
|
||||||
|
|
||||||
if len(copyDataSamples) > 0 {
|
if len(copyDataSamples) > 0 {
|
||||||
result.Errors = append(result.Errors,
|
result.Errors = append(result.Errors,
|
||||||
fmt.Sprintf("Sample orphaned data: %s", copyDataSamples[0]))
|
fmt.Sprintf("Sample orphaned data: %s", copyDataSamples[0]))
|
||||||
@@ -412,20 +414,123 @@ func (d *Diagnoser) diagnoseSQLScript(filePath string, compressed bool, result *
|
|||||||
|
|
||||||
// diagnoseClusterArchive analyzes a cluster tar.gz archive
|
// diagnoseClusterArchive analyzes a cluster tar.gz archive
|
||||||
func (d *Diagnoser) diagnoseClusterArchive(filePath string, result *DiagnoseResult) {
|
func (d *Diagnoser) diagnoseClusterArchive(filePath string, result *DiagnoseResult) {
|
||||||
// First verify tar.gz integrity
|
// Calculate dynamic timeout based on file size
|
||||||
cmd := exec.Command("tar", "-tzf", filePath)
|
// Large archives (100GB+) can take significant time to list
|
||||||
output, err := cmd.Output()
|
// Minimum 5 minutes, scales with file size, max 180 minutes for very large archives
|
||||||
if err != nil {
|
timeoutMinutes := 5
|
||||||
result.IsValid = false
|
if result.FileSize > 0 {
|
||||||
result.IsCorrupted = true
|
// 1 minute per 2 GB, minimum 5 minutes, max 180 minutes
|
||||||
result.Errors = append(result.Errors,
|
sizeGB := result.FileSize / (1024 * 1024 * 1024)
|
||||||
fmt.Sprintf("Tar archive is invalid or corrupted: %v", err),
|
estimatedMinutes := int(sizeGB/2) + 5
|
||||||
"Run: tar -tzf "+filePath+" 2>&1 | tail -20")
|
if estimatedMinutes > timeoutMinutes {
|
||||||
|
timeoutMinutes = estimatedMinutes
|
||||||
|
}
|
||||||
|
if timeoutMinutes > 180 {
|
||||||
|
timeoutMinutes = 180
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.log != nil {
|
||||||
|
d.log.Info("Verifying cluster archive integrity",
|
||||||
|
"size", fmt.Sprintf("%.1f GB", float64(result.FileSize)/(1024*1024*1024)),
|
||||||
|
"timeout", fmt.Sprintf("%d min", timeoutMinutes))
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutMinutes)*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Use streaming approach with pipes to avoid memory issues with large archives
|
||||||
|
cmd := exec.CommandContext(ctx, "tar", "-tzf", filePath)
|
||||||
|
stdout, pipeErr := cmd.StdoutPipe()
|
||||||
|
if pipeErr != nil {
|
||||||
|
// Pipe creation failed - not a corruption issue
|
||||||
|
result.Warnings = append(result.Warnings,
|
||||||
|
fmt.Sprintf("Cannot create pipe for verification: %v", pipeErr),
|
||||||
|
"Archive integrity cannot be verified but may still be valid")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse tar listing
|
var stderrBuf bytes.Buffer
|
||||||
files := strings.Split(strings.TrimSpace(string(output)), "\n")
|
cmd.Stderr = &stderrBuf
|
||||||
|
|
||||||
|
if startErr := cmd.Start(); startErr != nil {
|
||||||
|
result.Warnings = append(result.Warnings,
|
||||||
|
fmt.Sprintf("Cannot start tar verification: %v", startErr),
|
||||||
|
"Archive integrity cannot be verified but may still be valid")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stream output line by line to avoid buffering entire listing in memory
|
||||||
|
scanner := bufio.NewScanner(stdout)
|
||||||
|
scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024) // Allow long paths
|
||||||
|
|
||||||
|
var files []string
|
||||||
|
fileCount := 0
|
||||||
|
for scanner.Scan() {
|
||||||
|
fileCount++
|
||||||
|
line := scanner.Text()
|
||||||
|
// Only store dump/metadata files, not every file
|
||||||
|
if strings.HasSuffix(line, ".dump") || strings.HasSuffix(line, ".sql.gz") ||
|
||||||
|
strings.HasSuffix(line, ".sql") || strings.HasSuffix(line, ".json") ||
|
||||||
|
strings.Contains(line, "globals") || strings.Contains(line, "manifest") ||
|
||||||
|
strings.Contains(line, "metadata") {
|
||||||
|
files = append(files, line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
scanErr := scanner.Err()
|
||||||
|
waitErr := cmd.Wait()
|
||||||
|
stderrOutput := stderrBuf.String()
|
||||||
|
|
||||||
|
// Handle errors - distinguish between actual corruption and resource/timeout issues
|
||||||
|
if waitErr != nil || scanErr != nil {
|
||||||
|
// Check if it was a timeout
|
||||||
|
if ctx.Err() == context.DeadlineExceeded {
|
||||||
|
result.Warnings = append(result.Warnings,
|
||||||
|
fmt.Sprintf("Verification timed out after %d minutes - archive is very large", timeoutMinutes),
|
||||||
|
"This does not necessarily mean the archive is corrupted",
|
||||||
|
"Manual verification: tar -tzf "+filePath+" | wc -l")
|
||||||
|
// Don't mark as corrupted or invalid on timeout - archive may be fine
|
||||||
|
if fileCount > 0 {
|
||||||
|
result.Details.TableCount = len(files)
|
||||||
|
result.Details.TableList = files
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for specific gzip/tar corruption indicators
|
||||||
|
if strings.Contains(stderrOutput, "unexpected end of file") ||
|
||||||
|
strings.Contains(stderrOutput, "Unexpected EOF") ||
|
||||||
|
strings.Contains(stderrOutput, "gzip: stdin: unexpected end of file") ||
|
||||||
|
strings.Contains(stderrOutput, "not in gzip format") ||
|
||||||
|
strings.Contains(stderrOutput, "invalid compressed data") {
|
||||||
|
// These indicate actual corruption
|
||||||
|
result.IsValid = false
|
||||||
|
result.IsCorrupted = true
|
||||||
|
result.Errors = append(result.Errors,
|
||||||
|
"Tar archive appears truncated or corrupted",
|
||||||
|
fmt.Sprintf("Error: %s", truncateString(stderrOutput, 200)),
|
||||||
|
"Run: tar -tzf "+filePath+" 2>&1 | tail -20")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Other errors (signal killed, memory, etc.) - not necessarily corruption
|
||||||
|
// If we read some files successfully, the archive structure is likely OK
|
||||||
|
if fileCount > 0 {
|
||||||
|
result.Warnings = append(result.Warnings,
|
||||||
|
fmt.Sprintf("Verification incomplete (read %d files before error)", fileCount),
|
||||||
|
"Archive may still be valid - error could be due to system resources")
|
||||||
|
// Proceed with what we got
|
||||||
|
} else {
|
||||||
|
// Couldn't read anything - but don't mark as corrupted without clear evidence
|
||||||
|
result.Warnings = append(result.Warnings,
|
||||||
|
fmt.Sprintf("Cannot verify archive: %v", waitErr),
|
||||||
|
"Archive integrity is uncertain - proceed with caution or verify manually")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the collected file list
|
||||||
var dumpFiles []string
|
var dumpFiles []string
|
||||||
hasGlobals := false
|
hasGlobals := false
|
||||||
hasMetadata := false
|
hasMetadata := false
|
||||||
@@ -458,7 +563,7 @@ func (d *Diagnoser) diagnoseClusterArchive(filePath string, result *DiagnoseResu
|
|||||||
}
|
}
|
||||||
|
|
||||||
// For verbose mode, diagnose individual dumps inside the archive
|
// For verbose mode, diagnose individual dumps inside the archive
|
||||||
if d.verbose && len(dumpFiles) > 0 {
|
if d.verbose && len(dumpFiles) > 0 && d.log != nil {
|
||||||
d.log.Info("Cluster archive contains databases", "count", len(dumpFiles))
|
d.log.Info("Cluster archive contains databases", "count", len(dumpFiles))
|
||||||
for _, df := range dumpFiles {
|
for _, df := range dumpFiles {
|
||||||
d.log.Info(" - " + df)
|
d.log.Info(" - " + df)
|
||||||
@@ -491,13 +596,31 @@ func (d *Diagnoser) diagnoseUnknown(filePath string, result *DiagnoseResult) {
|
|||||||
|
|
||||||
// verifyWithPgRestore uses pg_restore --list to verify dump integrity
|
// verifyWithPgRestore uses pg_restore --list to verify dump integrity
|
||||||
func (d *Diagnoser) verifyWithPgRestore(filePath string, result *DiagnoseResult) {
|
func (d *Diagnoser) verifyWithPgRestore(filePath string, result *DiagnoseResult) {
|
||||||
cmd := exec.Command("pg_restore", "--list", filePath)
|
// Calculate dynamic timeout based on file size
|
||||||
|
// pg_restore --list is usually faster than tar -tzf for same size
|
||||||
|
timeoutMinutes := 5
|
||||||
|
if result.FileSize > 0 {
|
||||||
|
// 1 minute per 5 GB, minimum 5 minutes, max 30 minutes
|
||||||
|
sizeGB := result.FileSize / (1024 * 1024 * 1024)
|
||||||
|
estimatedMinutes := int(sizeGB/5) + 5
|
||||||
|
if estimatedMinutes > timeoutMinutes {
|
||||||
|
timeoutMinutes = estimatedMinutes
|
||||||
|
}
|
||||||
|
if timeoutMinutes > 30 {
|
||||||
|
timeoutMinutes = 30
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutMinutes)*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, "pg_restore", "--list", filePath)
|
||||||
output, err := cmd.CombinedOutput()
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
result.Details.PgRestoreListable = false
|
result.Details.PgRestoreListable = false
|
||||||
result.Details.PgRestoreError = string(output)
|
result.Details.PgRestoreError = string(output)
|
||||||
|
|
||||||
// Check for specific errors
|
// Check for specific errors
|
||||||
errStr := string(output)
|
errStr := string(output)
|
||||||
if strings.Contains(errStr, "unexpected end of file") ||
|
if strings.Contains(errStr, "unexpected end of file") ||
|
||||||
@@ -543,10 +666,74 @@ func (d *Diagnoser) verifyWithPgRestore(filePath string, result *DiagnoseResult)
|
|||||||
|
|
||||||
// DiagnoseClusterDumps extracts and diagnoses all dumps in a cluster archive
|
// DiagnoseClusterDumps extracts and diagnoses all dumps in a cluster archive
|
||||||
func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*DiagnoseResult, error) {
|
func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*DiagnoseResult, error) {
|
||||||
// First, try to list archive contents without extracting (fast check)
|
// Get archive size for dynamic timeout calculation
|
||||||
listCmd := exec.Command("tar", "-tzf", archivePath)
|
archiveInfo, err := os.Stat(archivePath)
|
||||||
listOutput, listErr := listCmd.CombinedOutput()
|
if err != nil {
|
||||||
if listErr != nil {
|
return nil, fmt.Errorf("cannot stat archive: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dynamic timeout based on archive size: base 10 min + 1 min per 3 GB
|
||||||
|
// Large archives like 100+ GB need more time for tar -tzf
|
||||||
|
timeoutMinutes := 10
|
||||||
|
if archiveInfo.Size() > 0 {
|
||||||
|
sizeGB := archiveInfo.Size() / (1024 * 1024 * 1024)
|
||||||
|
estimatedMinutes := int(sizeGB/3) + 10
|
||||||
|
if estimatedMinutes > timeoutMinutes {
|
||||||
|
timeoutMinutes = estimatedMinutes
|
||||||
|
}
|
||||||
|
if timeoutMinutes > 120 { // Max 2 hours
|
||||||
|
timeoutMinutes = 120
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.log != nil {
|
||||||
|
d.log.Info("Listing cluster archive contents",
|
||||||
|
"size", fmt.Sprintf("%.1f GB", float64(archiveInfo.Size())/(1024*1024*1024)),
|
||||||
|
"timeout", fmt.Sprintf("%d min", timeoutMinutes))
|
||||||
|
}
|
||||||
|
|
||||||
|
listCtx, listCancel := context.WithTimeout(context.Background(), time.Duration(timeoutMinutes)*time.Minute)
|
||||||
|
defer listCancel()
|
||||||
|
|
||||||
|
listCmd := exec.CommandContext(listCtx, "tar", "-tzf", archivePath)
|
||||||
|
|
||||||
|
// Use pipes for streaming to avoid buffering entire output in memory
|
||||||
|
// This prevents OOM kills on large archives (100GB+) with millions of files
|
||||||
|
stdout, err := listCmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create stdout pipe: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var stderrBuf bytes.Buffer
|
||||||
|
listCmd.Stderr = &stderrBuf
|
||||||
|
|
||||||
|
if err := listCmd.Start(); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to start tar listing: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stream the output line by line, only keeping relevant files
|
||||||
|
var files []string
|
||||||
|
scanner := bufio.NewScanner(stdout)
|
||||||
|
// Set a reasonable max line length (file paths shouldn't exceed this)
|
||||||
|
scanner.Buffer(make([]byte, 0, 4096), 1024*1024)
|
||||||
|
|
||||||
|
fileCount := 0
|
||||||
|
for scanner.Scan() {
|
||||||
|
fileCount++
|
||||||
|
line := scanner.Text()
|
||||||
|
// Only store dump files and important files, not every single file
|
||||||
|
if strings.HasSuffix(line, ".dump") || strings.HasSuffix(line, ".sql") ||
|
||||||
|
strings.HasSuffix(line, ".sql.gz") || strings.HasSuffix(line, ".json") ||
|
||||||
|
strings.Contains(line, "globals") || strings.Contains(line, "manifest") ||
|
||||||
|
strings.Contains(line, "metadata") || strings.HasSuffix(line, "/") {
|
||||||
|
files = append(files, line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
scanErr := scanner.Err()
|
||||||
|
listErr := listCmd.Wait()
|
||||||
|
|
||||||
|
if listErr != nil || scanErr != nil {
|
||||||
// Archive listing failed - likely corrupted
|
// Archive listing failed - likely corrupted
|
||||||
errResult := &DiagnoseResult{
|
errResult := &DiagnoseResult{
|
||||||
FilePath: archivePath,
|
FilePath: archivePath,
|
||||||
@@ -557,9 +744,14 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
IsCorrupted: true,
|
IsCorrupted: true,
|
||||||
Details: &DiagnoseDetails{},
|
Details: &DiagnoseDetails{},
|
||||||
}
|
}
|
||||||
|
|
||||||
errOutput := string(listOutput)
|
errOutput := stderrBuf.String()
|
||||||
if strings.Contains(errOutput, "unexpected end of file") ||
|
actualErr := listErr
|
||||||
|
if scanErr != nil {
|
||||||
|
actualErr = scanErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(errOutput, "unexpected end of file") ||
|
||||||
strings.Contains(errOutput, "Unexpected EOF") ||
|
strings.Contains(errOutput, "Unexpected EOF") ||
|
||||||
strings.Contains(errOutput, "truncated") {
|
strings.Contains(errOutput, "truncated") {
|
||||||
errResult.IsTruncated = true
|
errResult.IsTruncated = true
|
||||||
@@ -570,32 +762,41 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
"Solution: Re-create the backup from source database")
|
"Solution: Re-create the backup from source database")
|
||||||
} else {
|
} else {
|
||||||
errResult.Errors = append(errResult.Errors,
|
errResult.Errors = append(errResult.Errors,
|
||||||
fmt.Sprintf("Cannot list archive contents: %v", listErr),
|
fmt.Sprintf("Cannot list archive contents: %v", actualErr),
|
||||||
fmt.Sprintf("tar error: %s", truncateString(errOutput, 300)),
|
fmt.Sprintf("tar error: %s", truncateString(errOutput, 300)),
|
||||||
"Run manually: tar -tzf "+archivePath+" 2>&1 | tail -50")
|
"Run manually: tar -tzf "+archivePath+" 2>&1 | tail -50")
|
||||||
}
|
}
|
||||||
|
|
||||||
return []*DiagnoseResult{errResult}, nil
|
return []*DiagnoseResult{errResult}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Archive is listable - now check disk space before extraction
|
if d.log != nil {
|
||||||
files := strings.Split(strings.TrimSpace(string(listOutput)), "\n")
|
d.log.Debug("Archive listing streamed successfully", "total_files", fileCount, "relevant_files", len(files))
|
||||||
|
}
|
||||||
|
|
||||||
// Check if we have enough disk space (estimate 4x archive size needed)
|
// Check if we have enough disk space (estimate 4x archive size needed)
|
||||||
archiveInfo, _ := os.Stat(archivePath)
|
// archiveInfo already obtained at function start
|
||||||
requiredSpace := archiveInfo.Size() * 4
|
requiredSpace := archiveInfo.Size() * 4
|
||||||
|
|
||||||
// Check temp directory space - try to extract metadata first
|
// Check temp directory space - try to extract metadata first
|
||||||
if stat, err := os.Stat(tempDir); err == nil && stat.IsDir() {
|
if stat, err := os.Stat(tempDir); err == nil && stat.IsDir() {
|
||||||
// Try extraction of a small test file first
|
// Try extraction of a small test file first with timeout
|
||||||
testCmd := exec.Command("tar", "-xzf", archivePath, "-C", tempDir, "--wildcards", "*.json", "--wildcards", "globals.sql")
|
testCtx, testCancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
testCmd := exec.CommandContext(testCtx, "tar", "-xzf", archivePath, "-C", tempDir, "--wildcards", "*.json", "--wildcards", "globals.sql")
|
||||||
testCmd.Run() // Ignore error - just try to extract metadata
|
testCmd.Run() // Ignore error - just try to extract metadata
|
||||||
|
testCancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
d.log.Info("Archive listing successful", "files", len(files))
|
if d.log != nil {
|
||||||
|
d.log.Info("Archive listing successful", "files", len(files))
|
||||||
// Try full extraction
|
}
|
||||||
cmd := exec.Command("tar", "-xzf", archivePath, "-C", tempDir)
|
|
||||||
|
// Try full extraction - NO TIMEOUT here as large archives can take a long time
|
||||||
|
// Use a generous timeout (30 minutes) for very large archives
|
||||||
|
extractCtx, extractCancel := context.WithTimeout(context.Background(), 30*time.Minute)
|
||||||
|
defer extractCancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(extractCtx, "tar", "-xzf", archivePath, "-C", tempDir)
|
||||||
var stderr bytes.Buffer
|
var stderr bytes.Buffer
|
||||||
cmd.Stderr = &stderr
|
cmd.Stderr = &stderr
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
@@ -608,14 +809,14 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
IsValid: false,
|
IsValid: false,
|
||||||
Details: &DiagnoseDetails{},
|
Details: &DiagnoseDetails{},
|
||||||
}
|
}
|
||||||
|
|
||||||
errOutput := stderr.String()
|
errOutput := stderr.String()
|
||||||
if strings.Contains(errOutput, "No space left") ||
|
if strings.Contains(errOutput, "No space left") ||
|
||||||
strings.Contains(errOutput, "cannot write") ||
|
strings.Contains(errOutput, "cannot write") ||
|
||||||
strings.Contains(errOutput, "Disk quota exceeded") {
|
strings.Contains(errOutput, "Disk quota exceeded") {
|
||||||
errResult.Errors = append(errResult.Errors,
|
errResult.Errors = append(errResult.Errors,
|
||||||
"INSUFFICIENT DISK SPACE to extract archive for diagnosis",
|
"INSUFFICIENT DISK SPACE to extract archive for diagnosis",
|
||||||
fmt.Sprintf("Archive size: %s (needs ~%s for extraction)",
|
fmt.Sprintf("Archive size: %s (needs ~%s for extraction)",
|
||||||
formatBytes(archiveInfo.Size()), formatBytes(requiredSpace)),
|
formatBytes(archiveInfo.Size()), formatBytes(requiredSpace)),
|
||||||
"Use CLI diagnosis instead: dbbackup restore diagnose "+archivePath,
|
"Use CLI diagnosis instead: dbbackup restore diagnose "+archivePath,
|
||||||
"Or use --workdir flag to specify a location with more space")
|
"Or use --workdir flag to specify a location with more space")
|
||||||
@@ -634,7 +835,7 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
fmt.Sprintf("Extraction failed: %v", err),
|
fmt.Sprintf("Extraction failed: %v", err),
|
||||||
fmt.Sprintf("tar error: %s", truncateString(errOutput, 300)))
|
fmt.Sprintf("tar error: %s", truncateString(errOutput, 300)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Still report what files we found in the listing
|
// Still report what files we found in the listing
|
||||||
var dumpFiles []string
|
var dumpFiles []string
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
@@ -648,7 +849,7 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
errResult.Warnings = append(errResult.Warnings,
|
errResult.Warnings = append(errResult.Warnings,
|
||||||
fmt.Sprintf("Archive contains %d database dumps (listing only)", len(dumpFiles)))
|
fmt.Sprintf("Archive contains %d database dumps (listing only)", len(dumpFiles)))
|
||||||
}
|
}
|
||||||
|
|
||||||
return []*DiagnoseResult{errResult}, nil
|
return []*DiagnoseResult{errResult}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -677,11 +878,15 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
}
|
}
|
||||||
|
|
||||||
dumpPath := filepath.Join(dumpsDir, name)
|
dumpPath := filepath.Join(dumpsDir, name)
|
||||||
d.log.Info("Diagnosing dump file", "file", name)
|
if d.log != nil {
|
||||||
|
d.log.Info("Diagnosing dump file", "file", name)
|
||||||
|
}
|
||||||
|
|
||||||
result, err := d.DiagnoseFile(dumpPath)
|
result, err := d.DiagnoseFile(dumpPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.log.Warn("Failed to diagnose file", "file", name, "error", err)
|
if d.log != nil {
|
||||||
|
d.log.Warn("Failed to diagnose file", "file", name, "error", err)
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
results = append(results, result)
|
results = append(results, result)
|
||||||
@@ -693,7 +898,7 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
// PrintDiagnosis outputs a human-readable diagnosis report
|
// PrintDiagnosis outputs a human-readable diagnosis report
|
||||||
func (d *Diagnoser) PrintDiagnosis(result *DiagnoseResult) {
|
func (d *Diagnoser) PrintDiagnosis(result *DiagnoseResult) {
|
||||||
fmt.Println("\n" + strings.Repeat("=", 70))
|
fmt.Println("\n" + strings.Repeat("=", 70))
|
||||||
fmt.Printf("📋 DIAGNOSIS: %s\n", result.FileName)
|
fmt.Printf("[DIAG] DIAGNOSIS: %s\n", result.FileName)
|
||||||
fmt.Println(strings.Repeat("=", 70))
|
fmt.Println(strings.Repeat("=", 70))
|
||||||
|
|
||||||
// Basic info
|
// Basic info
|
||||||
@@ -703,69 +908,69 @@ func (d *Diagnoser) PrintDiagnosis(result *DiagnoseResult) {
|
|||||||
|
|
||||||
// Status
|
// Status
|
||||||
if result.IsValid {
|
if result.IsValid {
|
||||||
fmt.Println("\n✅ STATUS: VALID")
|
fmt.Println("\n[OK] STATUS: VALID")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("\n❌ STATUS: INVALID")
|
fmt.Println("\n[FAIL] STATUS: INVALID")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.IsTruncated {
|
if result.IsTruncated {
|
||||||
fmt.Println("⚠️ TRUNCATED: Yes - file appears incomplete")
|
fmt.Println("[WARN] TRUNCATED: Yes - file appears incomplete")
|
||||||
}
|
}
|
||||||
if result.IsCorrupted {
|
if result.IsCorrupted {
|
||||||
fmt.Println("⚠️ CORRUPTED: Yes - file structure is damaged")
|
fmt.Println("[WARN] CORRUPTED: Yes - file structure is damaged")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Details
|
// Details
|
||||||
if result.Details != nil {
|
if result.Details != nil {
|
||||||
fmt.Println("\n📊 DETAILS:")
|
fmt.Println("\n[DETAILS]:")
|
||||||
|
|
||||||
if result.Details.HasPGDMPSignature {
|
if result.Details.HasPGDMPSignature {
|
||||||
fmt.Println(" ✓ Has PGDMP signature (PostgreSQL custom format)")
|
fmt.Println(" [+] Has PGDMP signature (PostgreSQL custom format)")
|
||||||
}
|
}
|
||||||
if result.Details.HasSQLHeader {
|
if result.Details.HasSQLHeader {
|
||||||
fmt.Println(" ✓ Has PostgreSQL SQL header")
|
fmt.Println(" [+] Has PostgreSQL SQL header")
|
||||||
}
|
}
|
||||||
if result.Details.GzipValid {
|
if result.Details.GzipValid {
|
||||||
fmt.Println(" ✓ Gzip compression valid")
|
fmt.Println(" [+] Gzip compression valid")
|
||||||
}
|
}
|
||||||
if result.Details.PgRestoreListable {
|
if result.Details.PgRestoreListable {
|
||||||
fmt.Printf(" ✓ pg_restore can list contents (%d tables)\n", result.Details.TableCount)
|
fmt.Printf(" [+] pg_restore can list contents (%d tables)\n", result.Details.TableCount)
|
||||||
}
|
}
|
||||||
if result.Details.CopyBlockCount > 0 {
|
if result.Details.CopyBlockCount > 0 {
|
||||||
fmt.Printf(" • Contains %d COPY blocks\n", result.Details.CopyBlockCount)
|
fmt.Printf(" [-] Contains %d COPY blocks\n", result.Details.CopyBlockCount)
|
||||||
}
|
}
|
||||||
if result.Details.UnterminatedCopy {
|
if result.Details.UnterminatedCopy {
|
||||||
fmt.Printf(" ✗ Unterminated COPY block: %s (line %d)\n",
|
fmt.Printf(" [-] Unterminated COPY block: %s (line %d)\n",
|
||||||
result.Details.LastCopyTable, result.Details.LastCopyLineNumber)
|
result.Details.LastCopyTable, result.Details.LastCopyLineNumber)
|
||||||
}
|
}
|
||||||
if result.Details.ProperlyTerminated {
|
if result.Details.ProperlyTerminated {
|
||||||
fmt.Println(" ✓ All COPY blocks properly terminated")
|
fmt.Println(" [+] All COPY blocks properly terminated")
|
||||||
}
|
}
|
||||||
if result.Details.ExpandedSize > 0 {
|
if result.Details.ExpandedSize > 0 {
|
||||||
fmt.Printf(" • Expanded size: %s (ratio: %.1fx)\n",
|
fmt.Printf(" [-] Expanded size: %s (ratio: %.1fx)\n",
|
||||||
formatBytes(result.Details.ExpandedSize), result.Details.CompressionRatio)
|
formatBytes(result.Details.ExpandedSize), result.Details.CompressionRatio)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errors
|
// Errors
|
||||||
if len(result.Errors) > 0 {
|
if len(result.Errors) > 0 {
|
||||||
fmt.Println("\n❌ ERRORS:")
|
fmt.Println("\n[ERRORS]:")
|
||||||
for _, e := range result.Errors {
|
for _, e := range result.Errors {
|
||||||
fmt.Printf(" • %s\n", e)
|
fmt.Printf(" - %s\n", e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warnings
|
// Warnings
|
||||||
if len(result.Warnings) > 0 {
|
if len(result.Warnings) > 0 {
|
||||||
fmt.Println("\n⚠️ WARNINGS:")
|
fmt.Println("\n[WARNINGS]:")
|
||||||
for _, w := range result.Warnings {
|
for _, w := range result.Warnings {
|
||||||
fmt.Printf(" • %s\n", w)
|
fmt.Printf(" - %s\n", w)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recommendations
|
// Recommendations
|
||||||
if !result.IsValid {
|
if !result.IsValid {
|
||||||
fmt.Println("\n💡 RECOMMENDATIONS:")
|
fmt.Println("\n[HINT] RECOMMENDATIONS:")
|
||||||
if result.IsTruncated {
|
if result.IsTruncated {
|
||||||
fmt.Println(" 1. Re-run the backup process for this database")
|
fmt.Println(" 1. Re-run the backup process for this database")
|
||||||
fmt.Println(" 2. Check disk space on backup server during backup")
|
fmt.Println(" 2. Check disk space on backup server during backup")
|
||||||
|
|||||||
@@ -2,10 +2,12 @@ package restore
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@@ -17,6 +19,9 @@ import (
|
|||||||
"dbbackup/internal/logger"
|
"dbbackup/internal/logger"
|
||||||
"dbbackup/internal/progress"
|
"dbbackup/internal/progress"
|
||||||
"dbbackup/internal/security"
|
"dbbackup/internal/security"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-multierror"
|
||||||
|
_ "github.com/jackc/pgx/v5/stdlib" // PostgreSQL driver
|
||||||
)
|
)
|
||||||
|
|
||||||
// Engine handles database restore operations
|
// Engine handles database restore operations
|
||||||
@@ -27,8 +32,7 @@ type Engine struct {
|
|||||||
progress progress.Indicator
|
progress progress.Indicator
|
||||||
detailedReporter *progress.DetailedReporter
|
detailedReporter *progress.DetailedReporter
|
||||||
dryRun bool
|
dryRun bool
|
||||||
debugLogPath string // Path to save debug log on error
|
debugLogPath string // Path to save debug log on error
|
||||||
errorCollector *ErrorCollector // Collects detailed error info
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new restore engine
|
// New creates a new restore engine
|
||||||
@@ -128,7 +132,7 @@ func (e *Engine) RestoreSingle(ctx context.Context, archivePath, targetDB string
|
|||||||
e.log.Warn("Checksum verification failed", "error", checksumErr)
|
e.log.Warn("Checksum verification failed", "error", checksumErr)
|
||||||
e.log.Warn("Continuing restore without checksum verification (use with caution)")
|
e.log.Warn("Continuing restore without checksum verification (use with caution)")
|
||||||
} else {
|
} else {
|
||||||
e.log.Info("✓ Archive checksum verified successfully")
|
e.log.Info("[OK] Archive checksum verified successfully")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Detect archive format
|
// Detect archive format
|
||||||
@@ -224,7 +228,18 @@ func (e *Engine) restorePostgreSQLDump(ctx context.Context, archivePath, targetD
|
|||||||
|
|
||||||
// restorePostgreSQLDumpWithOwnership restores from PostgreSQL custom dump with ownership control
|
// restorePostgreSQLDumpWithOwnership restores from PostgreSQL custom dump with ownership control
|
||||||
func (e *Engine) restorePostgreSQLDumpWithOwnership(ctx context.Context, archivePath, targetDB string, compressed bool, preserveOwnership bool) error {
|
func (e *Engine) restorePostgreSQLDumpWithOwnership(ctx context.Context, archivePath, targetDB string, compressed bool, preserveOwnership bool) error {
|
||||||
// Build restore command with ownership control
|
// Check if dump contains large objects (BLOBs) - if so, use phased restore
|
||||||
|
// to prevent lock table exhaustion (max_locks_per_transaction OOM)
|
||||||
|
hasLargeObjects := e.checkDumpHasLargeObjects(archivePath)
|
||||||
|
|
||||||
|
if hasLargeObjects {
|
||||||
|
e.log.Info("Large objects detected - using phased restore to prevent lock exhaustion",
|
||||||
|
"database", targetDB,
|
||||||
|
"archive", archivePath)
|
||||||
|
return e.restorePostgreSQLDumpPhased(ctx, archivePath, targetDB, preserveOwnership)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Standard restore for dumps without large objects
|
||||||
opts := database.RestoreOptions{
|
opts := database.RestoreOptions{
|
||||||
Parallel: 1,
|
Parallel: 1,
|
||||||
Clean: false, // We already dropped the database
|
Clean: false, // We already dropped the database
|
||||||
@@ -250,6 +265,113 @@ func (e *Engine) restorePostgreSQLDumpWithOwnership(ctx context.Context, archive
|
|||||||
return e.executeRestoreCommand(ctx, cmd)
|
return e.executeRestoreCommand(ctx, cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// restorePostgreSQLDumpPhased performs a multi-phase restore to prevent lock table exhaustion
|
||||||
|
// Phase 1: pre-data (schema, types, functions)
|
||||||
|
// Phase 2: data (table data, excluding BLOBs)
|
||||||
|
// Phase 3: blobs (large objects in smaller batches)
|
||||||
|
// Phase 4: post-data (indexes, constraints, triggers)
|
||||||
|
//
|
||||||
|
// This approach prevents OOM errors by committing and releasing locks between phases.
|
||||||
|
func (e *Engine) restorePostgreSQLDumpPhased(ctx context.Context, archivePath, targetDB string, preserveOwnership bool) error {
|
||||||
|
e.log.Info("Starting phased restore for database with large objects",
|
||||||
|
"database", targetDB,
|
||||||
|
"archive", archivePath)
|
||||||
|
|
||||||
|
// Phase definitions with --section flag
|
||||||
|
phases := []struct {
|
||||||
|
name string
|
||||||
|
section string
|
||||||
|
desc string
|
||||||
|
}{
|
||||||
|
{"pre-data", "pre-data", "Schema, types, functions"},
|
||||||
|
{"data", "data", "Table data"},
|
||||||
|
{"post-data", "post-data", "Indexes, constraints, triggers"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, phase := range phases {
|
||||||
|
e.log.Info(fmt.Sprintf("Phase %d/%d: Restoring %s", i+1, len(phases), phase.name),
|
||||||
|
"database", targetDB,
|
||||||
|
"section", phase.section,
|
||||||
|
"description", phase.desc)
|
||||||
|
|
||||||
|
if err := e.restoreSection(ctx, archivePath, targetDB, phase.section, preserveOwnership); err != nil {
|
||||||
|
// Check if it's an ignorable error
|
||||||
|
if e.isIgnorableError(err.Error()) {
|
||||||
|
e.log.Warn(fmt.Sprintf("Phase %d completed with ignorable errors", i+1),
|
||||||
|
"section", phase.section,
|
||||||
|
"error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return fmt.Errorf("phase %d (%s) failed: %w", i+1, phase.name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.log.Info(fmt.Sprintf("Phase %d/%d completed successfully", i+1, len(phases)),
|
||||||
|
"section", phase.section)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.log.Info("Phased restore completed successfully", "database", targetDB)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// restoreSection restores a specific section of a PostgreSQL dump
|
||||||
|
func (e *Engine) restoreSection(ctx context.Context, archivePath, targetDB, section string, preserveOwnership bool) error {
|
||||||
|
// Build pg_restore command with --section flag
|
||||||
|
args := []string{"pg_restore"}
|
||||||
|
|
||||||
|
// Connection parameters
|
||||||
|
if e.cfg.Host != "localhost" {
|
||||||
|
args = append(args, "-h", e.cfg.Host)
|
||||||
|
args = append(args, "-p", fmt.Sprintf("%d", e.cfg.Port))
|
||||||
|
args = append(args, "--no-password")
|
||||||
|
}
|
||||||
|
args = append(args, "-U", e.cfg.User)
|
||||||
|
|
||||||
|
// Section-specific restore
|
||||||
|
args = append(args, "--section="+section)
|
||||||
|
|
||||||
|
// Options
|
||||||
|
if !preserveOwnership {
|
||||||
|
args = append(args, "--no-owner", "--no-privileges")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip data for failed tables (prevents cascading errors)
|
||||||
|
args = append(args, "--no-data-for-failed-tables")
|
||||||
|
|
||||||
|
// Database and input
|
||||||
|
args = append(args, "--dbname="+targetDB)
|
||||||
|
args = append(args, archivePath)
|
||||||
|
|
||||||
|
return e.executeRestoreCommand(ctx, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkDumpHasLargeObjects checks if a PostgreSQL custom dump contains large objects (BLOBs)
|
||||||
|
func (e *Engine) checkDumpHasLargeObjects(archivePath string) bool {
|
||||||
|
// Use pg_restore -l to list contents without restoring
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, "pg_restore", "-l", archivePath)
|
||||||
|
output, err := cmd.Output()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// If listing fails, assume no large objects (safer to use standard restore)
|
||||||
|
e.log.Debug("Could not list dump contents, assuming no large objects", "error", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
outputStr := string(output)
|
||||||
|
|
||||||
|
// Check for BLOB/LARGE OBJECT indicators
|
||||||
|
if strings.Contains(outputStr, "BLOB") ||
|
||||||
|
strings.Contains(outputStr, "LARGE OBJECT") ||
|
||||||
|
strings.Contains(outputStr, " BLOBS ") ||
|
||||||
|
strings.Contains(outputStr, "lo_create") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// restorePostgreSQLSQL restores from PostgreSQL SQL script
|
// restorePostgreSQLSQL restores from PostgreSQL SQL script
|
||||||
func (e *Engine) restorePostgreSQLSQL(ctx context.Context, archivePath, targetDB string, compressed bool) error {
|
func (e *Engine) restorePostgreSQLSQL(ctx context.Context, archivePath, targetDB string, compressed bool) error {
|
||||||
// Pre-validate SQL dump to detect truncation BEFORE attempting restore
|
// Pre-validate SQL dump to detect truncation BEFORE attempting restore
|
||||||
@@ -357,43 +479,68 @@ func (e *Engine) executeRestoreCommandWithContext(ctx context.Context, cmdArgs [
|
|||||||
return fmt.Errorf("failed to start restore command: %w", err)
|
return fmt.Errorf("failed to start restore command: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read stderr in chunks to log errors without loading all into memory
|
// Read stderr in goroutine to avoid blocking
|
||||||
buf := make([]byte, 4096)
|
|
||||||
var lastError string
|
var lastError string
|
||||||
var errorCount int
|
var errorCount int
|
||||||
const maxErrors = 10 // Limit captured errors to prevent OOM
|
stderrDone := make(chan struct{})
|
||||||
for {
|
go func() {
|
||||||
n, err := stderr.Read(buf)
|
defer close(stderrDone)
|
||||||
if n > 0 {
|
buf := make([]byte, 4096)
|
||||||
chunk := string(buf[:n])
|
const maxErrors = 10 // Limit captured errors to prevent OOM
|
||||||
|
for {
|
||||||
// Feed to error collector if enabled
|
n, err := stderr.Read(buf)
|
||||||
if collector != nil {
|
if n > 0 {
|
||||||
collector.CaptureStderr(chunk)
|
chunk := string(buf[:n])
|
||||||
}
|
|
||||||
|
// Feed to error collector if enabled
|
||||||
// Only capture REAL errors, not verbose output
|
if collector != nil {
|
||||||
if strings.Contains(chunk, "ERROR:") || strings.Contains(chunk, "FATAL:") || strings.Contains(chunk, "error:") {
|
collector.CaptureStderr(chunk)
|
||||||
lastError = strings.TrimSpace(chunk)
|
|
||||||
errorCount++
|
|
||||||
if errorCount <= maxErrors {
|
|
||||||
e.log.Warn("Restore stderr", "output", chunk)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Only capture REAL errors, not verbose output
|
||||||
|
if strings.Contains(chunk, "ERROR:") || strings.Contains(chunk, "FATAL:") || strings.Contains(chunk, "error:") {
|
||||||
|
lastError = strings.TrimSpace(chunk)
|
||||||
|
errorCount++
|
||||||
|
if errorCount <= maxErrors {
|
||||||
|
e.log.Warn("Restore stderr", "output", chunk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Note: --verbose output is discarded to prevent OOM
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
// Note: --verbose output is discarded to prevent OOM
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for command with proper context handling
|
||||||
|
cmdDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed (success or failure)
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Context cancelled - kill process
|
||||||
|
e.log.Warn("Restore cancelled - killing process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone
|
||||||
|
cmdErr = ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cmd.Wait(); err != nil {
|
// Wait for stderr reader to finish
|
||||||
|
<-stderrDone
|
||||||
|
|
||||||
|
if cmdErr != nil {
|
||||||
// Get exit code
|
// Get exit code
|
||||||
exitCode := 1
|
exitCode := 1
|
||||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
if exitErr, ok := cmdErr.(*exec.ExitError); ok {
|
||||||
exitCode = exitErr.ExitCode()
|
exitCode = exitErr.ExitCode()
|
||||||
}
|
}
|
||||||
|
|
||||||
// PostgreSQL pg_restore returns exit code 1 even for ignorable errors
|
// PostgreSQL pg_restore returns exit code 1 even for ignorable errors
|
||||||
// Check if errors are ignorable (already exists, duplicate, etc.)
|
// Check if errors are ignorable (already exists, duplicate, etc.)
|
||||||
if lastError != "" && e.isIgnorableError(lastError) {
|
if lastError != "" && e.isIgnorableError(lastError) {
|
||||||
@@ -427,17 +574,17 @@ func (e *Engine) executeRestoreCommandWithContext(ctx context.Context, cmdArgs [
|
|||||||
errType,
|
errType,
|
||||||
errHint,
|
errHint,
|
||||||
)
|
)
|
||||||
|
|
||||||
// Print report to console
|
// Print report to console
|
||||||
collector.PrintReport(report)
|
collector.PrintReport(report)
|
||||||
|
|
||||||
// Save to file
|
// Save to file
|
||||||
if e.debugLogPath != "" {
|
if e.debugLogPath != "" {
|
||||||
if saveErr := collector.SaveReport(report, e.debugLogPath); saveErr != nil {
|
if saveErr := collector.SaveReport(report, e.debugLogPath); saveErr != nil {
|
||||||
e.log.Warn("Failed to save debug log", "error", saveErr)
|
e.log.Warn("Failed to save debug log", "error", saveErr)
|
||||||
} else {
|
} else {
|
||||||
e.log.Info("Debug log saved", "path", e.debugLogPath)
|
e.log.Info("Debug log saved", "path", e.debugLogPath)
|
||||||
fmt.Printf("\n📋 Detailed error report saved to: %s\n", e.debugLogPath)
|
fmt.Printf("\n[LOG] Detailed error report saved to: %s\n", e.debugLogPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -481,31 +628,56 @@ func (e *Engine) executeRestoreWithDecompression(ctx context.Context, archivePat
|
|||||||
return fmt.Errorf("failed to start restore command: %w", err)
|
return fmt.Errorf("failed to start restore command: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read stderr in chunks to log errors without loading all into memory
|
// Read stderr in goroutine to avoid blocking
|
||||||
buf := make([]byte, 4096)
|
|
||||||
var lastError string
|
var lastError string
|
||||||
var errorCount int
|
var errorCount int
|
||||||
const maxErrors = 10 // Limit captured errors to prevent OOM
|
stderrDone := make(chan struct{})
|
||||||
for {
|
go func() {
|
||||||
n, err := stderr.Read(buf)
|
defer close(stderrDone)
|
||||||
if n > 0 {
|
buf := make([]byte, 4096)
|
||||||
chunk := string(buf[:n])
|
const maxErrors = 10 // Limit captured errors to prevent OOM
|
||||||
// Only capture REAL errors, not verbose output
|
for {
|
||||||
if strings.Contains(chunk, "ERROR:") || strings.Contains(chunk, "FATAL:") || strings.Contains(chunk, "error:") {
|
n, err := stderr.Read(buf)
|
||||||
lastError = strings.TrimSpace(chunk)
|
if n > 0 {
|
||||||
errorCount++
|
chunk := string(buf[:n])
|
||||||
if errorCount <= maxErrors {
|
// Only capture REAL errors, not verbose output
|
||||||
e.log.Warn("Restore stderr", "output", chunk)
|
if strings.Contains(chunk, "ERROR:") || strings.Contains(chunk, "FATAL:") || strings.Contains(chunk, "error:") {
|
||||||
|
lastError = strings.TrimSpace(chunk)
|
||||||
|
errorCount++
|
||||||
|
if errorCount <= maxErrors {
|
||||||
|
e.log.Warn("Restore stderr", "output", chunk)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
// Note: --verbose output is discarded to prevent OOM
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
// Note: --verbose output is discarded to prevent OOM
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for command with proper context handling
|
||||||
|
cmdDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed (success or failure)
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Context cancelled - kill process
|
||||||
|
e.log.Warn("Restore with decompression cancelled - killing process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone
|
||||||
|
cmdErr = ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cmd.Wait(); err != nil {
|
// Wait for stderr reader to finish
|
||||||
|
<-stderrDone
|
||||||
|
|
||||||
|
if cmdErr != nil {
|
||||||
// PostgreSQL pg_restore returns exit code 1 even for ignorable errors
|
// PostgreSQL pg_restore returns exit code 1 even for ignorable errors
|
||||||
// Check if errors are ignorable (already exists, duplicate, etc.)
|
// Check if errors are ignorable (already exists, duplicate, etc.)
|
||||||
if lastError != "" && e.isIgnorableError(lastError) {
|
if lastError != "" && e.isIgnorableError(lastError) {
|
||||||
@@ -517,18 +689,18 @@ func (e *Engine) executeRestoreWithDecompression(ctx context.Context, archivePat
|
|||||||
if lastError != "" {
|
if lastError != "" {
|
||||||
classification := checks.ClassifyError(lastError)
|
classification := checks.ClassifyError(lastError)
|
||||||
e.log.Error("Restore with decompression failed",
|
e.log.Error("Restore with decompression failed",
|
||||||
"error", err,
|
"error", cmdErr,
|
||||||
"last_stderr", lastError,
|
"last_stderr", lastError,
|
||||||
"error_count", errorCount,
|
"error_count", errorCount,
|
||||||
"error_type", classification.Type,
|
"error_type", classification.Type,
|
||||||
"hint", classification.Hint,
|
"hint", classification.Hint,
|
||||||
"action", classification.Action)
|
"action", classification.Action)
|
||||||
return fmt.Errorf("restore failed: %w (last error: %s, total errors: %d) - %s",
|
return fmt.Errorf("restore failed: %w (last error: %s, total errors: %d) - %s",
|
||||||
err, lastError, errorCount, classification.Hint)
|
cmdErr, lastError, errorCount, classification.Hint)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.log.Error("Restore with decompression failed", "error", err, "last_stderr", lastError, "error_count", errorCount)
|
e.log.Error("Restore with decompression failed", "error", cmdErr, "last_stderr", lastError, "error_count", errorCount)
|
||||||
return fmt.Errorf("restore failed: %w", err)
|
return fmt.Errorf("restore failed: %w", cmdErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -563,7 +735,7 @@ func (e *Engine) previewRestore(archivePath, targetDB string, format ArchiveForm
|
|||||||
fmt.Printf(" 1. Execute: mysql %s < %s\n", targetDB, archivePath)
|
fmt.Printf(" 1. Execute: mysql %s < %s\n", targetDB, archivePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("\n⚠️ WARNING: This will restore data to the target database.")
|
fmt.Println("\n[WARN] WARNING: This will restore data to the target database.")
|
||||||
fmt.Println(" Existing data may be overwritten or merged.")
|
fmt.Println(" Existing data may be overwritten or merged.")
|
||||||
fmt.Println("\nTo execute this restore, add the --confirm flag.")
|
fmt.Println("\nTo execute this restore, add the --confirm flag.")
|
||||||
fmt.Println(strings.Repeat("=", 60) + "\n")
|
fmt.Println(strings.Repeat("=", 60) + "\n")
|
||||||
@@ -594,7 +766,7 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
e.log.Warn("Checksum verification failed", "error", checksumErr)
|
e.log.Warn("Checksum verification failed", "error", checksumErr)
|
||||||
e.log.Warn("Continuing restore without checksum verification (use with caution)")
|
e.log.Warn("Continuing restore without checksum verification (use with caution)")
|
||||||
} else {
|
} else {
|
||||||
e.log.Info("✓ Cluster archive checksum verified successfully")
|
e.log.Info("[OK] Cluster archive checksum verified successfully")
|
||||||
}
|
}
|
||||||
|
|
||||||
format := DetectArchiveFormat(archivePath)
|
format := DetectArchiveFormat(archivePath)
|
||||||
@@ -628,11 +800,12 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
|
|
||||||
e.progress.Start(fmt.Sprintf("Restoring cluster from %s", filepath.Base(archivePath)))
|
e.progress.Start(fmt.Sprintf("Restoring cluster from %s", filepath.Base(archivePath)))
|
||||||
|
|
||||||
// Create temporary extraction directory
|
// Create temporary extraction directory in configured WorkDir
|
||||||
tempDir := filepath.Join(e.cfg.BackupDir, fmt.Sprintf(".restore_%d", time.Now().Unix()))
|
workDir := e.cfg.GetEffectiveWorkDir()
|
||||||
|
tempDir := filepath.Join(workDir, fmt.Sprintf(".restore_%d", time.Now().Unix()))
|
||||||
if err := os.MkdirAll(tempDir, 0755); err != nil {
|
if err := os.MkdirAll(tempDir, 0755); err != nil {
|
||||||
operation.Fail("Failed to create temporary directory")
|
operation.Fail("Failed to create temporary directory")
|
||||||
return fmt.Errorf("failed to create temp directory: %w", err)
|
return fmt.Errorf("failed to create temp directory in %s: %w", workDir, err)
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(tempDir)
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
@@ -653,7 +826,7 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
|
|
||||||
if !isSuperuser {
|
if !isSuperuser {
|
||||||
e.log.Warn("Current user is not a superuser - database ownership may not be fully restored")
|
e.log.Warn("Current user is not a superuser - database ownership may not be fully restored")
|
||||||
e.progress.Update("⚠️ Warning: Non-superuser - ownership restoration limited")
|
e.progress.Update("[WARN] Warning: Non-superuser - ownership restoration limited")
|
||||||
time.Sleep(2 * time.Second) // Give user time to see warning
|
time.Sleep(2 * time.Second) // Give user time to see warning
|
||||||
} else {
|
} else {
|
||||||
e.log.Info("Superuser privileges confirmed - full ownership restoration enabled")
|
e.log.Info("Superuser privileges confirmed - full ownership restoration enabled")
|
||||||
@@ -726,7 +899,7 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
}
|
}
|
||||||
} else if strings.HasSuffix(dumpFile, ".dump") {
|
} else if strings.HasSuffix(dumpFile, ".dump") {
|
||||||
// Validate custom format dumps using pg_restore --list
|
// Validate custom format dumps using pg_restore --list
|
||||||
cmd := exec.Command("pg_restore", "--list", dumpFile)
|
cmd := exec.CommandContext(ctx, "pg_restore", "--list", dumpFile)
|
||||||
output, err := cmd.CombinedOutput()
|
output, err := cmd.CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dbName := strings.TrimSuffix(entry.Name(), ".dump")
|
dbName := strings.TrimSuffix(entry.Name(), ".dump")
|
||||||
@@ -752,12 +925,45 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
if len(corruptedDumps) > 0 {
|
if len(corruptedDumps) > 0 {
|
||||||
operation.Fail("Corrupted dump files detected")
|
operation.Fail("Corrupted dump files detected")
|
||||||
e.progress.Fail(fmt.Sprintf("Found %d corrupted dump files - restore aborted", len(corruptedDumps)))
|
e.progress.Fail(fmt.Sprintf("Found %d corrupted dump files - restore aborted", len(corruptedDumps)))
|
||||||
return fmt.Errorf("pre-validation failed: %d corrupted dump files detected:\n %s\n\nThe backup archive appears to be damaged. You need to restore from a different backup.",
|
return fmt.Errorf("pre-validation failed: %d corrupted dump files detected: %s - the backup archive appears to be damaged, restore from a different backup",
|
||||||
len(corruptedDumps), strings.Join(corruptedDumps, "\n "))
|
len(corruptedDumps), strings.Join(corruptedDumps, ", "))
|
||||||
}
|
}
|
||||||
e.log.Info("All dump files passed validation")
|
e.log.Info("All dump files passed validation")
|
||||||
|
|
||||||
var failedDBs []string
|
// Run comprehensive preflight checks (Linux system + PostgreSQL + Archive analysis)
|
||||||
|
preflight, preflightErr := e.RunPreflightChecks(ctx, dumpsDir, entries)
|
||||||
|
if preflightErr != nil {
|
||||||
|
e.log.Warn("Preflight checks failed", "error", preflightErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate optimal lock boost based on BLOB count
|
||||||
|
lockBoostValue := 2048 // Default
|
||||||
|
if preflight != nil && preflight.Archive.RecommendedLockBoost > 0 {
|
||||||
|
lockBoostValue = preflight.Archive.RecommendedLockBoost
|
||||||
|
}
|
||||||
|
|
||||||
|
// AUTO-TUNE: Boost PostgreSQL settings for large restores
|
||||||
|
e.progress.Update("Tuning PostgreSQL for large restore...")
|
||||||
|
originalSettings, tuneErr := e.boostPostgreSQLSettings(ctx, lockBoostValue)
|
||||||
|
if tuneErr != nil {
|
||||||
|
e.log.Warn("Could not boost PostgreSQL settings - restore may fail on BLOB-heavy databases",
|
||||||
|
"error", tuneErr)
|
||||||
|
} else {
|
||||||
|
e.log.Info("Boosted PostgreSQL settings for restore",
|
||||||
|
"max_locks_per_transaction", fmt.Sprintf("%d → %d", originalSettings.MaxLocks, lockBoostValue),
|
||||||
|
"maintenance_work_mem", fmt.Sprintf("%s → 2GB", originalSettings.MaintenanceWorkMem))
|
||||||
|
// Ensure we reset settings when done (even on failure)
|
||||||
|
defer func() {
|
||||||
|
if resetErr := e.resetPostgreSQLSettings(ctx, originalSettings); resetErr != nil {
|
||||||
|
e.log.Warn("Could not reset PostgreSQL settings", "error", resetErr)
|
||||||
|
} else {
|
||||||
|
e.log.Info("Reset PostgreSQL settings to original values")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
var restoreErrors *multierror.Error
|
||||||
|
var restoreErrorsMu sync.Mutex
|
||||||
totalDBs := 0
|
totalDBs := 0
|
||||||
|
|
||||||
// Count total databases
|
// Count total databases
|
||||||
@@ -785,13 +991,12 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
e.log.Warn("Large objects detected in dump files - reducing parallelism to avoid lock contention",
|
e.log.Warn("Large objects detected in dump files - reducing parallelism to avoid lock contention",
|
||||||
"original_parallelism", parallelism,
|
"original_parallelism", parallelism,
|
||||||
"adjusted_parallelism", 1)
|
"adjusted_parallelism", 1)
|
||||||
e.progress.Update("⚠️ Large objects detected - using sequential restore to avoid lock conflicts")
|
e.progress.Update("[WARN] Large objects detected - using sequential restore to avoid lock conflicts")
|
||||||
time.Sleep(2 * time.Second) // Give user time to see warning
|
time.Sleep(2 * time.Second) // Give user time to see warning
|
||||||
parallelism = 1
|
parallelism = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
var successCount, failCount int32
|
var successCount, failCount int32
|
||||||
var failedDBsMu sync.Mutex
|
|
||||||
var mu sync.Mutex // Protect shared resources (progress, logger)
|
var mu sync.Mutex // Protect shared resources (progress, logger)
|
||||||
|
|
||||||
// Create semaphore to limit concurrency
|
// Create semaphore to limit concurrency
|
||||||
@@ -811,6 +1016,14 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer func() { <-semaphore }() // Release
|
defer func() { <-semaphore }() // Release
|
||||||
|
|
||||||
|
// Panic recovery - prevent one database failure from crashing entire cluster restore
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
e.log.Error("Panic in database restore goroutine", "file", filename, "panic", r)
|
||||||
|
atomic.AddInt32(&failCount, 1)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Update estimator progress (thread-safe)
|
// Update estimator progress (thread-safe)
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
estimator.UpdateProgress(idx)
|
estimator.UpdateProgress(idx)
|
||||||
@@ -838,9 +1051,9 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
// STEP 2: Create fresh database
|
// STEP 2: Create fresh database
|
||||||
if err := e.ensureDatabaseExists(ctx, dbName); err != nil {
|
if err := e.ensureDatabaseExists(ctx, dbName); err != nil {
|
||||||
e.log.Error("Failed to create database", "name", dbName, "error", err)
|
e.log.Error("Failed to create database", "name", dbName, "error", err)
|
||||||
failedDBsMu.Lock()
|
restoreErrorsMu.Lock()
|
||||||
failedDBs = append(failedDBs, fmt.Sprintf("%s: failed to create database: %v", dbName, err))
|
restoreErrors = multierror.Append(restoreErrors, fmt.Errorf("%s: failed to create database: %w", dbName, err))
|
||||||
failedDBsMu.Unlock()
|
restoreErrorsMu.Unlock()
|
||||||
atomic.AddInt32(&failCount, 1)
|
atomic.AddInt32(&failCount, 1)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -883,10 +1096,10 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
failedDBsMu.Lock()
|
restoreErrorsMu.Lock()
|
||||||
// Include more context in the error message
|
// Include more context in the error message
|
||||||
failedDBs = append(failedDBs, fmt.Sprintf("%s: restore failed: %v", dbName, restoreErr))
|
restoreErrors = multierror.Append(restoreErrors, fmt.Errorf("%s: restore failed: %w", dbName, restoreErr))
|
||||||
failedDBsMu.Unlock()
|
restoreErrorsMu.Unlock()
|
||||||
atomic.AddInt32(&failCount, 1)
|
atomic.AddInt32(&failCount, 1)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -904,7 +1117,17 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
failCountFinal := int(atomic.LoadInt32(&failCount))
|
failCountFinal := int(atomic.LoadInt32(&failCount))
|
||||||
|
|
||||||
if failCountFinal > 0 {
|
if failCountFinal > 0 {
|
||||||
failedList := strings.Join(failedDBs, "\n ")
|
// Format multi-error with detailed output
|
||||||
|
restoreErrors.ErrorFormat = func(errs []error) string {
|
||||||
|
if len(errs) == 1 {
|
||||||
|
return errs[0].Error()
|
||||||
|
}
|
||||||
|
points := make([]string, len(errs))
|
||||||
|
for i, err := range errs {
|
||||||
|
points[i] = fmt.Sprintf(" • %s", err.Error())
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d database(s) failed:\n%s", len(errs), strings.Join(points, "\n"))
|
||||||
|
}
|
||||||
|
|
||||||
// Log summary
|
// Log summary
|
||||||
e.log.Info("Cluster restore completed with failures",
|
e.log.Info("Cluster restore completed with failures",
|
||||||
@@ -915,7 +1138,7 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
e.progress.Fail(fmt.Sprintf("Cluster restore: %d succeeded, %d failed out of %d total", successCountFinal, failCountFinal, totalDBs))
|
e.progress.Fail(fmt.Sprintf("Cluster restore: %d succeeded, %d failed out of %d total", successCountFinal, failCountFinal, totalDBs))
|
||||||
operation.Complete(fmt.Sprintf("Partial restore: %d/%d databases succeeded", successCountFinal, totalDBs))
|
operation.Complete(fmt.Sprintf("Partial restore: %d/%d databases succeeded", successCountFinal, totalDBs))
|
||||||
|
|
||||||
return fmt.Errorf("cluster restore completed with %d failures:\n %s", failCountFinal, failedList)
|
return fmt.Errorf("cluster restore completed with %d failures:\n%s", failCountFinal, restoreErrors.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
e.progress.Complete(fmt.Sprintf("Cluster restored successfully: %d databases", successCountFinal))
|
e.progress.Complete(fmt.Sprintf("Cluster restored successfully: %d databases", successCountFinal))
|
||||||
@@ -938,16 +1161,39 @@ func (e *Engine) extractArchive(ctx context.Context, archivePath, destDir string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Discard stderr output in chunks to prevent memory buildup
|
// Discard stderr output in chunks to prevent memory buildup
|
||||||
buf := make([]byte, 4096)
|
stderrDone := make(chan struct{})
|
||||||
for {
|
go func() {
|
||||||
_, err := stderr.Read(buf)
|
defer close(stderrDone)
|
||||||
if err != nil {
|
buf := make([]byte, 4096)
|
||||||
break
|
for {
|
||||||
|
_, err := stderr.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for command with proper context handling
|
||||||
|
cmdDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.log.Warn("Archive extraction cancelled - killing process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone
|
||||||
|
cmdErr = ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cmd.Wait(); err != nil {
|
<-stderrDone
|
||||||
return fmt.Errorf("tar extraction failed: %w", err)
|
|
||||||
|
if cmdErr != nil {
|
||||||
|
return fmt.Errorf("tar extraction failed: %w", cmdErr)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -980,25 +1226,48 @@ func (e *Engine) restoreGlobals(ctx context.Context, globalsFile string) error {
|
|||||||
return fmt.Errorf("failed to start psql: %w", err)
|
return fmt.Errorf("failed to start psql: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read stderr in chunks
|
// Read stderr in chunks in goroutine
|
||||||
buf := make([]byte, 4096)
|
|
||||||
var lastError string
|
var lastError string
|
||||||
for {
|
stderrDone := make(chan struct{})
|
||||||
n, err := stderr.Read(buf)
|
go func() {
|
||||||
if n > 0 {
|
defer close(stderrDone)
|
||||||
chunk := string(buf[:n])
|
buf := make([]byte, 4096)
|
||||||
if strings.Contains(chunk, "ERROR") || strings.Contains(chunk, "FATAL") {
|
for {
|
||||||
lastError = chunk
|
n, err := stderr.Read(buf)
|
||||||
e.log.Warn("Globals restore stderr", "output", chunk)
|
if n > 0 {
|
||||||
|
chunk := string(buf[:n])
|
||||||
|
if strings.Contains(chunk, "ERROR") || strings.Contains(chunk, "FATAL") {
|
||||||
|
lastError = chunk
|
||||||
|
e.log.Warn("Globals restore stderr", "output", chunk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
}()
|
||||||
break
|
|
||||||
}
|
// Wait for command with proper context handling
|
||||||
|
cmdDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.log.Warn("Globals restore cancelled - killing process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone
|
||||||
|
cmdErr = ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cmd.Wait(); err != nil {
|
<-stderrDone
|
||||||
return fmt.Errorf("failed to restore globals: %w (last error: %s)", err, lastError)
|
|
||||||
|
if cmdErr != nil {
|
||||||
|
return fmt.Errorf("failed to restore globals: %w (last error: %s)", cmdErr, lastError)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -1235,7 +1504,7 @@ func (e *Engine) previewClusterRestore(archivePath string) error {
|
|||||||
fmt.Println(" 3. Restore all databases found in archive")
|
fmt.Println(" 3. Restore all databases found in archive")
|
||||||
fmt.Println(" 4. Cleanup temporary files")
|
fmt.Println(" 4. Cleanup temporary files")
|
||||||
|
|
||||||
fmt.Println("\n⚠️ WARNING: This will restore multiple databases.")
|
fmt.Println("\n[WARN] WARNING: This will restore multiple databases.")
|
||||||
fmt.Println(" Existing databases may be overwritten or merged.")
|
fmt.Println(" Existing databases may be overwritten or merged.")
|
||||||
fmt.Println("\nTo execute this restore, add the --confirm flag.")
|
fmt.Println("\nTo execute this restore, add the --confirm flag.")
|
||||||
fmt.Println(strings.Repeat("=", 60) + "\n")
|
fmt.Println(strings.Repeat("=", 60) + "\n")
|
||||||
@@ -1262,7 +1531,8 @@ func (e *Engine) detectLargeObjectsInDumps(dumpsDir string, entries []os.DirEntr
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Use pg_restore -l to list contents (fast, doesn't restore data)
|
// Use pg_restore -l to list contents (fast, doesn't restore data)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
// 2 minutes for large dumps with many objects
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, "pg_restore", "-l", dumpFile)
|
cmd := exec.CommandContext(ctx, "pg_restore", "-l", dumpFile)
|
||||||
@@ -1394,3 +1664,173 @@ func (e *Engine) quickValidateSQLDump(archivePath string, compressed bool) error
|
|||||||
e.log.Debug("SQL dump validation passed", "path", archivePath)
|
e.log.Debug("SQL dump validation passed", "path", archivePath)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// boostLockCapacity temporarily increases max_locks_per_transaction to prevent OOM
|
||||||
|
// during large restores with many BLOBs. Returns the original value for later reset.
|
||||||
|
// Uses ALTER SYSTEM + pg_reload_conf() so no restart is needed.
|
||||||
|
func (e *Engine) boostLockCapacity(ctx context.Context) (int, error) {
|
||||||
|
// Connect to PostgreSQL to run system commands
|
||||||
|
connStr := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=postgres sslmode=disable",
|
||||||
|
e.cfg.Host, e.cfg.Port, e.cfg.User, e.cfg.Password)
|
||||||
|
|
||||||
|
// For localhost, use Unix socket
|
||||||
|
if e.cfg.Host == "localhost" || e.cfg.Host == "" {
|
||||||
|
connStr = fmt.Sprintf("user=%s password=%s dbname=postgres sslmode=disable",
|
||||||
|
e.cfg.User, e.cfg.Password)
|
||||||
|
}
|
||||||
|
|
||||||
|
db, err := sql.Open("pgx", connStr)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to connect: %w", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Get current value
|
||||||
|
var currentValue int
|
||||||
|
err = db.QueryRowContext(ctx, "SHOW max_locks_per_transaction").Scan(¤tValue)
|
||||||
|
if err != nil {
|
||||||
|
// Try parsing as string (some versions return string)
|
||||||
|
var currentValueStr string
|
||||||
|
err = db.QueryRowContext(ctx, "SHOW max_locks_per_transaction").Scan(¤tValueStr)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to get current max_locks_per_transaction: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Sscanf(currentValueStr, "%d", ¤tValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip if already high enough
|
||||||
|
if currentValue >= 2048 {
|
||||||
|
e.log.Info("max_locks_per_transaction already sufficient", "value", currentValue)
|
||||||
|
return currentValue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Boost to 2048 (enough for most BLOB-heavy databases)
|
||||||
|
_, err = db.ExecContext(ctx, "ALTER SYSTEM SET max_locks_per_transaction = 2048")
|
||||||
|
if err != nil {
|
||||||
|
return currentValue, fmt.Errorf("failed to set max_locks_per_transaction: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reload config without restart
|
||||||
|
_, err = db.ExecContext(ctx, "SELECT pg_reload_conf()")
|
||||||
|
if err != nil {
|
||||||
|
return currentValue, fmt.Errorf("failed to reload config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return currentValue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// resetLockCapacity restores the original max_locks_per_transaction value
|
||||||
|
func (e *Engine) resetLockCapacity(ctx context.Context, originalValue int) error {
|
||||||
|
connStr := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=postgres sslmode=disable",
|
||||||
|
e.cfg.Host, e.cfg.Port, e.cfg.User, e.cfg.Password)
|
||||||
|
|
||||||
|
if e.cfg.Host == "localhost" || e.cfg.Host == "" {
|
||||||
|
connStr = fmt.Sprintf("user=%s password=%s dbname=postgres sslmode=disable",
|
||||||
|
e.cfg.User, e.cfg.Password)
|
||||||
|
}
|
||||||
|
|
||||||
|
db, err := sql.Open("pgx", connStr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to connect: %w", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Reset to original value (or use RESET to go back to default)
|
||||||
|
if originalValue == 64 { // Default value
|
||||||
|
_, err = db.ExecContext(ctx, "ALTER SYSTEM RESET max_locks_per_transaction")
|
||||||
|
} else {
|
||||||
|
_, err = db.ExecContext(ctx, fmt.Sprintf("ALTER SYSTEM SET max_locks_per_transaction = %d", originalValue))
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to reset max_locks_per_transaction: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reload config
|
||||||
|
_, err = db.ExecContext(ctx, "SELECT pg_reload_conf()")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to reload config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OriginalSettings stores PostgreSQL settings to restore after operation
|
||||||
|
type OriginalSettings struct {
|
||||||
|
MaxLocks int
|
||||||
|
MaintenanceWorkMem string
|
||||||
|
}
|
||||||
|
|
||||||
|
// boostPostgreSQLSettings boosts multiple PostgreSQL settings for large restores
|
||||||
|
func (e *Engine) boostPostgreSQLSettings(ctx context.Context, lockBoostValue int) (*OriginalSettings, error) {
|
||||||
|
connStr := e.buildConnString()
|
||||||
|
db, err := sql.Open("pgx", connStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to connect: %w", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
original := &OriginalSettings{}
|
||||||
|
|
||||||
|
// Get current max_locks_per_transaction
|
||||||
|
var maxLocksStr string
|
||||||
|
if err := db.QueryRowContext(ctx, "SHOW max_locks_per_transaction").Scan(&maxLocksStr); err == nil {
|
||||||
|
original.MaxLocks, _ = strconv.Atoi(maxLocksStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get current maintenance_work_mem
|
||||||
|
db.QueryRowContext(ctx, "SHOW maintenance_work_mem").Scan(&original.MaintenanceWorkMem)
|
||||||
|
|
||||||
|
// Boost max_locks_per_transaction (if not already high enough)
|
||||||
|
if original.MaxLocks < lockBoostValue {
|
||||||
|
_, err = db.ExecContext(ctx, fmt.Sprintf("ALTER SYSTEM SET max_locks_per_transaction = %d", lockBoostValue))
|
||||||
|
if err != nil {
|
||||||
|
e.log.Warn("Could not boost max_locks_per_transaction", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Boost maintenance_work_mem to 2GB for faster index creation
|
||||||
|
_, err = db.ExecContext(ctx, "ALTER SYSTEM SET maintenance_work_mem = '2GB'")
|
||||||
|
if err != nil {
|
||||||
|
e.log.Warn("Could not boost maintenance_work_mem", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reload config to apply changes (no restart needed for these settings)
|
||||||
|
_, err = db.ExecContext(ctx, "SELECT pg_reload_conf()")
|
||||||
|
if err != nil {
|
||||||
|
return original, fmt.Errorf("failed to reload config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return original, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// resetPostgreSQLSettings restores original PostgreSQL settings
|
||||||
|
func (e *Engine) resetPostgreSQLSettings(ctx context.Context, original *OriginalSettings) error {
|
||||||
|
connStr := e.buildConnString()
|
||||||
|
db, err := sql.Open("pgx", connStr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to connect: %w", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Reset max_locks_per_transaction
|
||||||
|
if original.MaxLocks == 64 { // Default
|
||||||
|
db.ExecContext(ctx, "ALTER SYSTEM RESET max_locks_per_transaction")
|
||||||
|
} else if original.MaxLocks > 0 {
|
||||||
|
db.ExecContext(ctx, fmt.Sprintf("ALTER SYSTEM SET max_locks_per_transaction = %d", original.MaxLocks))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset maintenance_work_mem
|
||||||
|
if original.MaintenanceWorkMem == "64MB" { // Default
|
||||||
|
db.ExecContext(ctx, "ALTER SYSTEM RESET maintenance_work_mem")
|
||||||
|
} else if original.MaintenanceWorkMem != "" {
|
||||||
|
db.ExecContext(ctx, fmt.Sprintf("ALTER SYSTEM SET maintenance_work_mem = '%s'", original.MaintenanceWorkMem))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reload config
|
||||||
|
_, err = db.ExecContext(ctx, "SELECT pg_reload_conf()")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to reload config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package restore
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -20,43 +21,43 @@ import (
|
|||||||
// RestoreErrorReport contains comprehensive information about a restore failure
|
// RestoreErrorReport contains comprehensive information about a restore failure
|
||||||
type RestoreErrorReport struct {
|
type RestoreErrorReport struct {
|
||||||
// Metadata
|
// Metadata
|
||||||
Timestamp time.Time `json:"timestamp"`
|
Timestamp time.Time `json:"timestamp"`
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
GoVersion string `json:"go_version"`
|
GoVersion string `json:"go_version"`
|
||||||
OS string `json:"os"`
|
OS string `json:"os"`
|
||||||
Arch string `json:"arch"`
|
Arch string `json:"arch"`
|
||||||
|
|
||||||
// Archive info
|
// Archive info
|
||||||
ArchivePath string `json:"archive_path"`
|
ArchivePath string `json:"archive_path"`
|
||||||
ArchiveSize int64 `json:"archive_size"`
|
ArchiveSize int64 `json:"archive_size"`
|
||||||
ArchiveFormat string `json:"archive_format"`
|
ArchiveFormat string `json:"archive_format"`
|
||||||
|
|
||||||
// Database info
|
// Database info
|
||||||
TargetDB string `json:"target_db"`
|
TargetDB string `json:"target_db"`
|
||||||
DatabaseType string `json:"database_type"`
|
DatabaseType string `json:"database_type"`
|
||||||
|
|
||||||
// Error details
|
// Error details
|
||||||
ExitCode int `json:"exit_code"`
|
ExitCode int `json:"exit_code"`
|
||||||
ErrorMessage string `json:"error_message"`
|
ErrorMessage string `json:"error_message"`
|
||||||
ErrorType string `json:"error_type"`
|
ErrorType string `json:"error_type"`
|
||||||
ErrorHint string `json:"error_hint"`
|
ErrorHint string `json:"error_hint"`
|
||||||
TotalErrors int `json:"total_errors"`
|
TotalErrors int `json:"total_errors"`
|
||||||
|
|
||||||
// Captured output
|
// Captured output
|
||||||
LastStderr []string `json:"last_stderr"`
|
LastStderr []string `json:"last_stderr"`
|
||||||
FirstErrors []string `json:"first_errors"`
|
FirstErrors []string `json:"first_errors"`
|
||||||
|
|
||||||
// Context around failure
|
// Context around failure
|
||||||
FailureContext *FailureContext `json:"failure_context,omitempty"`
|
FailureContext *FailureContext `json:"failure_context,omitempty"`
|
||||||
|
|
||||||
// Diagnosis results
|
// Diagnosis results
|
||||||
DiagnosisResult *DiagnoseResult `json:"diagnosis_result,omitempty"`
|
DiagnosisResult *DiagnoseResult `json:"diagnosis_result,omitempty"`
|
||||||
|
|
||||||
// Environment (sanitized)
|
// Environment (sanitized)
|
||||||
PostgresVersion string `json:"postgres_version,omitempty"`
|
PostgresVersion string `json:"postgres_version,omitempty"`
|
||||||
PgRestoreVersion string `json:"pg_restore_version,omitempty"`
|
PgRestoreVersion string `json:"pg_restore_version,omitempty"`
|
||||||
PsqlVersion string `json:"psql_version,omitempty"`
|
PsqlVersion string `json:"psql_version,omitempty"`
|
||||||
|
|
||||||
// Recommendations
|
// Recommendations
|
||||||
Recommendations []string `json:"recommendations"`
|
Recommendations []string `json:"recommendations"`
|
||||||
}
|
}
|
||||||
@@ -67,40 +68,40 @@ type FailureContext struct {
|
|||||||
FailedLine int `json:"failed_line,omitempty"`
|
FailedLine int `json:"failed_line,omitempty"`
|
||||||
FailedStatement string `json:"failed_statement,omitempty"`
|
FailedStatement string `json:"failed_statement,omitempty"`
|
||||||
SurroundingLines []string `json:"surrounding_lines,omitempty"`
|
SurroundingLines []string `json:"surrounding_lines,omitempty"`
|
||||||
|
|
||||||
// For COPY block errors
|
// For COPY block errors
|
||||||
InCopyBlock bool `json:"in_copy_block,omitempty"`
|
InCopyBlock bool `json:"in_copy_block,omitempty"`
|
||||||
CopyTableName string `json:"copy_table_name,omitempty"`
|
CopyTableName string `json:"copy_table_name,omitempty"`
|
||||||
CopyStartLine int `json:"copy_start_line,omitempty"`
|
CopyStartLine int `json:"copy_start_line,omitempty"`
|
||||||
SampleCopyData []string `json:"sample_copy_data,omitempty"`
|
SampleCopyData []string `json:"sample_copy_data,omitempty"`
|
||||||
|
|
||||||
// File position info
|
// File position info
|
||||||
BytePosition int64 `json:"byte_position,omitempty"`
|
BytePosition int64 `json:"byte_position,omitempty"`
|
||||||
PercentComplete float64 `json:"percent_complete,omitempty"`
|
PercentComplete float64 `json:"percent_complete,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrorCollector captures detailed error information during restore
|
// ErrorCollector captures detailed error information during restore
|
||||||
type ErrorCollector struct {
|
type ErrorCollector struct {
|
||||||
log logger.Logger
|
log logger.Logger
|
||||||
cfg *config.Config
|
cfg *config.Config
|
||||||
archivePath string
|
archivePath string
|
||||||
targetDB string
|
targetDB string
|
||||||
format ArchiveFormat
|
format ArchiveFormat
|
||||||
|
|
||||||
// Captured data
|
// Captured data
|
||||||
stderrLines []string
|
stderrLines []string
|
||||||
firstErrors []string
|
firstErrors []string
|
||||||
lastErrors []string
|
lastErrors []string
|
||||||
totalErrors int
|
totalErrors int
|
||||||
exitCode int
|
exitCode int
|
||||||
|
|
||||||
// Limits
|
// Limits
|
||||||
maxStderrLines int
|
maxStderrLines int
|
||||||
maxErrorCapture int
|
maxErrorCapture int
|
||||||
|
|
||||||
// State
|
// State
|
||||||
startTime time.Time
|
startTime time.Time
|
||||||
enabled bool
|
enabled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewErrorCollector creates a new error collector
|
// NewErrorCollector creates a new error collector
|
||||||
@@ -126,30 +127,30 @@ func (ec *ErrorCollector) CaptureStderr(chunk string) {
|
|||||||
if !ec.enabled {
|
if !ec.enabled {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
lines := strings.Split(chunk, "\n")
|
lines := strings.Split(chunk, "\n")
|
||||||
for _, line := range lines {
|
for _, line := range lines {
|
||||||
line = strings.TrimSpace(line)
|
line = strings.TrimSpace(line)
|
||||||
if line == "" {
|
if line == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store last N lines of stderr
|
// Store last N lines of stderr
|
||||||
if len(ec.stderrLines) >= ec.maxStderrLines {
|
if len(ec.stderrLines) >= ec.maxStderrLines {
|
||||||
// Shift array, drop oldest
|
// Shift array, drop oldest
|
||||||
ec.stderrLines = ec.stderrLines[1:]
|
ec.stderrLines = ec.stderrLines[1:]
|
||||||
}
|
}
|
||||||
ec.stderrLines = append(ec.stderrLines, line)
|
ec.stderrLines = append(ec.stderrLines, line)
|
||||||
|
|
||||||
// Check if this is an error line
|
// Check if this is an error line
|
||||||
if isErrorLine(line) {
|
if isErrorLine(line) {
|
||||||
ec.totalErrors++
|
ec.totalErrors++
|
||||||
|
|
||||||
// Capture first N errors
|
// Capture first N errors
|
||||||
if len(ec.firstErrors) < ec.maxErrorCapture {
|
if len(ec.firstErrors) < ec.maxErrorCapture {
|
||||||
ec.firstErrors = append(ec.firstErrors, line)
|
ec.firstErrors = append(ec.firstErrors, line)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Keep last N errors (ring buffer style)
|
// Keep last N errors (ring buffer style)
|
||||||
if len(ec.lastErrors) >= ec.maxErrorCapture {
|
if len(ec.lastErrors) >= ec.maxErrorCapture {
|
||||||
ec.lastErrors = ec.lastErrors[1:]
|
ec.lastErrors = ec.lastErrors[1:]
|
||||||
@@ -184,36 +185,36 @@ func (ec *ErrorCollector) GenerateReport(errMessage string, errType string, errH
|
|||||||
LastStderr: ec.stderrLines,
|
LastStderr: ec.stderrLines,
|
||||||
FirstErrors: ec.firstErrors,
|
FirstErrors: ec.firstErrors,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get archive size
|
// Get archive size
|
||||||
if stat, err := os.Stat(ec.archivePath); err == nil {
|
if stat, err := os.Stat(ec.archivePath); err == nil {
|
||||||
report.ArchiveSize = stat.Size()
|
report.ArchiveSize = stat.Size()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get tool versions
|
// Get tool versions
|
||||||
report.PostgresVersion = getCommandVersion("postgres", "--version")
|
report.PostgresVersion = getCommandVersion("postgres", "--version")
|
||||||
report.PgRestoreVersion = getCommandVersion("pg_restore", "--version")
|
report.PgRestoreVersion = getCommandVersion("pg_restore", "--version")
|
||||||
report.PsqlVersion = getCommandVersion("psql", "--version")
|
report.PsqlVersion = getCommandVersion("psql", "--version")
|
||||||
|
|
||||||
// Analyze failure context
|
// Analyze failure context
|
||||||
report.FailureContext = ec.analyzeFailureContext()
|
report.FailureContext = ec.analyzeFailureContext()
|
||||||
|
|
||||||
// Run diagnosis if not already done
|
// Run diagnosis if not already done
|
||||||
diagnoser := NewDiagnoser(ec.log, false)
|
diagnoser := NewDiagnoser(ec.log, false)
|
||||||
if diagResult, err := diagnoser.DiagnoseFile(ec.archivePath); err == nil {
|
if diagResult, err := diagnoser.DiagnoseFile(ec.archivePath); err == nil {
|
||||||
report.DiagnosisResult = diagResult
|
report.DiagnosisResult = diagResult
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate recommendations
|
// Generate recommendations
|
||||||
report.Recommendations = ec.generateRecommendations(report)
|
report.Recommendations = ec.generateRecommendations(report)
|
||||||
|
|
||||||
return report
|
return report
|
||||||
}
|
}
|
||||||
|
|
||||||
// analyzeFailureContext extracts context around the failure
|
// analyzeFailureContext extracts context around the failure
|
||||||
func (ec *ErrorCollector) analyzeFailureContext() *FailureContext {
|
func (ec *ErrorCollector) analyzeFailureContext() *FailureContext {
|
||||||
ctx := &FailureContext{}
|
ctx := &FailureContext{}
|
||||||
|
|
||||||
// Look for line number in errors
|
// Look for line number in errors
|
||||||
for _, errLine := range ec.lastErrors {
|
for _, errLine := range ec.lastErrors {
|
||||||
if lineNum := extractLineNumber(errLine); lineNum > 0 {
|
if lineNum := extractLineNumber(errLine); lineNum > 0 {
|
||||||
@@ -221,7 +222,7 @@ func (ec *ErrorCollector) analyzeFailureContext() *FailureContext {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look for COPY-related errors
|
// Look for COPY-related errors
|
||||||
for _, errLine := range ec.lastErrors {
|
for _, errLine := range ec.lastErrors {
|
||||||
if strings.Contains(errLine, "COPY") || strings.Contains(errLine, "syntax error") {
|
if strings.Contains(errLine, "COPY") || strings.Contains(errLine, "syntax error") {
|
||||||
@@ -233,12 +234,12 @@ func (ec *ErrorCollector) analyzeFailureContext() *FailureContext {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we have a line number, try to get surrounding context from the dump
|
// If we have a line number, try to get surrounding context from the dump
|
||||||
if ctx.FailedLine > 0 && ec.archivePath != "" {
|
if ctx.FailedLine > 0 && ec.archivePath != "" {
|
||||||
ctx.SurroundingLines = ec.getSurroundingLines(ctx.FailedLine, 5)
|
ctx.SurroundingLines = ec.getSurroundingLines(ctx.FailedLine, 5)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctx
|
return ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -246,13 +247,13 @@ func (ec *ErrorCollector) analyzeFailureContext() *FailureContext {
|
|||||||
func (ec *ErrorCollector) getSurroundingLines(lineNum int, context int) []string {
|
func (ec *ErrorCollector) getSurroundingLines(lineNum int, context int) []string {
|
||||||
var reader io.Reader
|
var reader io.Reader
|
||||||
var lines []string
|
var lines []string
|
||||||
|
|
||||||
file, err := os.Open(ec.archivePath)
|
file, err := os.Open(ec.archivePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
// Handle compressed files
|
// Handle compressed files
|
||||||
if strings.HasSuffix(ec.archivePath, ".gz") {
|
if strings.HasSuffix(ec.archivePath, ".gz") {
|
||||||
gz, err := gzip.NewReader(file)
|
gz, err := gzip.NewReader(file)
|
||||||
@@ -264,19 +265,19 @@ func (ec *ErrorCollector) getSurroundingLines(lineNum int, context int) []string
|
|||||||
} else {
|
} else {
|
||||||
reader = file
|
reader = file
|
||||||
}
|
}
|
||||||
|
|
||||||
scanner := bufio.NewScanner(reader)
|
scanner := bufio.NewScanner(reader)
|
||||||
buf := make([]byte, 0, 1024*1024)
|
buf := make([]byte, 0, 1024*1024)
|
||||||
scanner.Buffer(buf, 10*1024*1024)
|
scanner.Buffer(buf, 10*1024*1024)
|
||||||
|
|
||||||
currentLine := 0
|
currentLine := 0
|
||||||
startLine := lineNum - context
|
startLine := lineNum - context
|
||||||
endLine := lineNum + context
|
endLine := lineNum + context
|
||||||
|
|
||||||
if startLine < 1 {
|
if startLine < 1 {
|
||||||
startLine = 1
|
startLine = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
currentLine++
|
currentLine++
|
||||||
if currentLine >= startLine && currentLine <= endLine {
|
if currentLine >= startLine && currentLine <= endLine {
|
||||||
@@ -290,18 +291,18 @@ func (ec *ErrorCollector) getSurroundingLines(lineNum int, context int) []string
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return lines
|
return lines
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateRecommendations provides actionable recommendations based on the error
|
// generateRecommendations provides actionable recommendations based on the error
|
||||||
func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []string {
|
func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []string {
|
||||||
var recs []string
|
var recs []string
|
||||||
|
|
||||||
// Check diagnosis results
|
// Check diagnosis results
|
||||||
if report.DiagnosisResult != nil {
|
if report.DiagnosisResult != nil {
|
||||||
if report.DiagnosisResult.IsTruncated {
|
if report.DiagnosisResult.IsTruncated {
|
||||||
recs = append(recs,
|
recs = append(recs,
|
||||||
"CRITICAL: Backup file is truncated/incomplete",
|
"CRITICAL: Backup file is truncated/incomplete",
|
||||||
"Action: Re-run the backup for the affected database",
|
"Action: Re-run the backup for the affected database",
|
||||||
"Check: Verify disk space was available during backup",
|
"Check: Verify disk space was available during backup",
|
||||||
@@ -317,14 +318,14 @@ func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []
|
|||||||
}
|
}
|
||||||
if report.DiagnosisResult.Details != nil && report.DiagnosisResult.Details.UnterminatedCopy {
|
if report.DiagnosisResult.Details != nil && report.DiagnosisResult.Details.UnterminatedCopy {
|
||||||
recs = append(recs,
|
recs = append(recs,
|
||||||
fmt.Sprintf("ISSUE: COPY block for table '%s' was not terminated",
|
fmt.Sprintf("ISSUE: COPY block for table '%s' was not terminated",
|
||||||
report.DiagnosisResult.Details.LastCopyTable),
|
report.DiagnosisResult.Details.LastCopyTable),
|
||||||
"Cause: Backup was interrupted during data export",
|
"Cause: Backup was interrupted during data export",
|
||||||
"Action: Re-run backup ensuring it completes fully",
|
"Action: Re-run backup ensuring it completes fully",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check error patterns
|
// Check error patterns
|
||||||
if report.TotalErrors > 1000000 {
|
if report.TotalErrors > 1000000 {
|
||||||
recs = append(recs,
|
recs = append(recs,
|
||||||
@@ -333,7 +334,7 @@ func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []
|
|||||||
"Check: Verify dump format matches restore command",
|
"Check: Verify dump format matches restore command",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for common error types
|
// Check for common error types
|
||||||
errLower := strings.ToLower(report.ErrorMessage)
|
errLower := strings.ToLower(report.ErrorMessage)
|
||||||
if strings.Contains(errLower, "syntax error") {
|
if strings.Contains(errLower, "syntax error") {
|
||||||
@@ -343,7 +344,7 @@ func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []
|
|||||||
"Check: Run 'dbbackup restore diagnose <archive>' for detailed analysis",
|
"Check: Run 'dbbackup restore diagnose <archive>' for detailed analysis",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.Contains(errLower, "permission denied") {
|
if strings.Contains(errLower, "permission denied") {
|
||||||
recs = append(recs,
|
recs = append(recs,
|
||||||
"ISSUE: Permission denied",
|
"ISSUE: Permission denied",
|
||||||
@@ -351,7 +352,7 @@ func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []
|
|||||||
"Action: For ownership preservation, use a superuser account",
|
"Action: For ownership preservation, use a superuser account",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.Contains(errLower, "does not exist") {
|
if strings.Contains(errLower, "does not exist") {
|
||||||
recs = append(recs,
|
recs = append(recs,
|
||||||
"ISSUE: Missing object reference",
|
"ISSUE: Missing object reference",
|
||||||
@@ -359,7 +360,7 @@ func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []
|
|||||||
"Action: Check if target database was created",
|
"Action: Check if target database was created",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(recs) == 0 {
|
if len(recs) == 0 {
|
||||||
recs = append(recs,
|
recs = append(recs,
|
||||||
"Run 'dbbackup restore diagnose <archive>' for detailed analysis",
|
"Run 'dbbackup restore diagnose <archive>' for detailed analysis",
|
||||||
@@ -367,7 +368,7 @@ func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []
|
|||||||
"Review the PostgreSQL/MySQL logs on the target server",
|
"Review the PostgreSQL/MySQL logs on the target server",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
return recs
|
return recs
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -378,56 +379,56 @@ func (ec *ErrorCollector) SaveReport(report *RestoreErrorReport, outputPath stri
|
|||||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create directory: %w", err)
|
return fmt.Errorf("failed to create directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Marshal to JSON with indentation
|
// Marshal to JSON with indentation
|
||||||
data, err := json.MarshalIndent(report, "", " ")
|
data, err := json.MarshalIndent(report, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to marshal report: %w", err)
|
return fmt.Errorf("failed to marshal report: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write file
|
// Write file
|
||||||
if err := os.WriteFile(outputPath, data, 0644); err != nil {
|
if err := os.WriteFile(outputPath, data, 0644); err != nil {
|
||||||
return fmt.Errorf("failed to write report: %w", err)
|
return fmt.Errorf("failed to write report: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrintReport prints a human-readable summary of the error report
|
// PrintReport prints a human-readable summary of the error report
|
||||||
func (ec *ErrorCollector) PrintReport(report *RestoreErrorReport) {
|
func (ec *ErrorCollector) PrintReport(report *RestoreErrorReport) {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println(strings.Repeat("═", 70))
|
fmt.Println(strings.Repeat("=", 70))
|
||||||
fmt.Println(" 🔴 RESTORE ERROR REPORT")
|
fmt.Println(" [ERROR] RESTORE ERROR REPORT")
|
||||||
fmt.Println(strings.Repeat("═", 70))
|
fmt.Println(strings.Repeat("=", 70))
|
||||||
|
|
||||||
fmt.Printf("\n📅 Timestamp: %s\n", report.Timestamp.Format("2006-01-02 15:04:05"))
|
fmt.Printf("\n[TIME] Timestamp: %s\n", report.Timestamp.Format("2006-01-02 15:04:05"))
|
||||||
fmt.Printf("📦 Archive: %s\n", filepath.Base(report.ArchivePath))
|
fmt.Printf("[FILE] Archive: %s\n", filepath.Base(report.ArchivePath))
|
||||||
fmt.Printf("📊 Format: %s\n", report.ArchiveFormat)
|
fmt.Printf("[FMT] Format: %s\n", report.ArchiveFormat)
|
||||||
fmt.Printf("🎯 Target DB: %s\n", report.TargetDB)
|
fmt.Printf("[TGT] Target DB: %s\n", report.TargetDB)
|
||||||
fmt.Printf("⚠️ Exit Code: %d\n", report.ExitCode)
|
fmt.Printf("[CODE] Exit Code: %d\n", report.ExitCode)
|
||||||
fmt.Printf("❌ Total Errors: %d\n", report.TotalErrors)
|
fmt.Printf("[ERR] Total Errors: %d\n", report.TotalErrors)
|
||||||
|
|
||||||
fmt.Println("\n" + strings.Repeat("─", 70))
|
fmt.Println("\n" + strings.Repeat("-", 70))
|
||||||
fmt.Println("ERROR DETAILS:")
|
fmt.Println("ERROR DETAILS:")
|
||||||
fmt.Println(strings.Repeat("─", 70))
|
fmt.Println(strings.Repeat("-", 70))
|
||||||
|
|
||||||
fmt.Printf("\nType: %s\n", report.ErrorType)
|
fmt.Printf("\nType: %s\n", report.ErrorType)
|
||||||
fmt.Printf("Message: %s\n", report.ErrorMessage)
|
fmt.Printf("Message: %s\n", report.ErrorMessage)
|
||||||
if report.ErrorHint != "" {
|
if report.ErrorHint != "" {
|
||||||
fmt.Printf("Hint: %s\n", report.ErrorHint)
|
fmt.Printf("Hint: %s\n", report.ErrorHint)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Show failure context
|
// Show failure context
|
||||||
if report.FailureContext != nil && report.FailureContext.FailedLine > 0 {
|
if report.FailureContext != nil && report.FailureContext.FailedLine > 0 {
|
||||||
fmt.Println("\n" + strings.Repeat("─", 70))
|
fmt.Println("\n" + strings.Repeat("-", 70))
|
||||||
fmt.Println("FAILURE CONTEXT:")
|
fmt.Println("FAILURE CONTEXT:")
|
||||||
fmt.Println(strings.Repeat("─", 70))
|
fmt.Println(strings.Repeat("-", 70))
|
||||||
|
|
||||||
fmt.Printf("\nFailed at line: %d\n", report.FailureContext.FailedLine)
|
fmt.Printf("\nFailed at line: %d\n", report.FailureContext.FailedLine)
|
||||||
if report.FailureContext.InCopyBlock {
|
if report.FailureContext.InCopyBlock {
|
||||||
fmt.Printf("Inside COPY block for table: %s\n", report.FailureContext.CopyTableName)
|
fmt.Printf("Inside COPY block for table: %s\n", report.FailureContext.CopyTableName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(report.FailureContext.SurroundingLines) > 0 {
|
if len(report.FailureContext.SurroundingLines) > 0 {
|
||||||
fmt.Println("\nSurrounding lines:")
|
fmt.Println("\nSurrounding lines:")
|
||||||
for _, line := range report.FailureContext.SurroundingLines {
|
for _, line := range report.FailureContext.SurroundingLines {
|
||||||
@@ -435,13 +436,13 @@ func (ec *ErrorCollector) PrintReport(report *RestoreErrorReport) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Show first few errors
|
// Show first few errors
|
||||||
if len(report.FirstErrors) > 0 {
|
if len(report.FirstErrors) > 0 {
|
||||||
fmt.Println("\n" + strings.Repeat("─", 70))
|
fmt.Println("\n" + strings.Repeat("-", 70))
|
||||||
fmt.Println("FIRST ERRORS:")
|
fmt.Println("FIRST ERRORS:")
|
||||||
fmt.Println(strings.Repeat("─", 70))
|
fmt.Println(strings.Repeat("-", 70))
|
||||||
|
|
||||||
for i, err := range report.FirstErrors {
|
for i, err := range report.FirstErrors {
|
||||||
if i >= 5 {
|
if i >= 5 {
|
||||||
fmt.Printf("... and %d more\n", len(report.FirstErrors)-5)
|
fmt.Printf("... and %d more\n", len(report.FirstErrors)-5)
|
||||||
@@ -450,18 +451,18 @@ func (ec *ErrorCollector) PrintReport(report *RestoreErrorReport) {
|
|||||||
fmt.Printf(" %d. %s\n", i+1, truncateString(err, 100))
|
fmt.Printf(" %d. %s\n", i+1, truncateString(err, 100))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Show diagnosis summary
|
// Show diagnosis summary
|
||||||
if report.DiagnosisResult != nil && !report.DiagnosisResult.IsValid {
|
if report.DiagnosisResult != nil && !report.DiagnosisResult.IsValid {
|
||||||
fmt.Println("\n" + strings.Repeat("─", 70))
|
fmt.Println("\n" + strings.Repeat("-", 70))
|
||||||
fmt.Println("DIAGNOSIS:")
|
fmt.Println("DIAGNOSIS:")
|
||||||
fmt.Println(strings.Repeat("─", 70))
|
fmt.Println(strings.Repeat("-", 70))
|
||||||
|
|
||||||
if report.DiagnosisResult.IsTruncated {
|
if report.DiagnosisResult.IsTruncated {
|
||||||
fmt.Println(" ❌ File is TRUNCATED")
|
fmt.Println(" [FAIL] File is TRUNCATED")
|
||||||
}
|
}
|
||||||
if report.DiagnosisResult.IsCorrupted {
|
if report.DiagnosisResult.IsCorrupted {
|
||||||
fmt.Println(" ❌ File is CORRUPTED")
|
fmt.Println(" [FAIL] File is CORRUPTED")
|
||||||
}
|
}
|
||||||
for i, err := range report.DiagnosisResult.Errors {
|
for i, err := range report.DiagnosisResult.Errors {
|
||||||
if i >= 3 {
|
if i >= 3 {
|
||||||
@@ -470,21 +471,21 @@ func (ec *ErrorCollector) PrintReport(report *RestoreErrorReport) {
|
|||||||
fmt.Printf(" • %s\n", err)
|
fmt.Printf(" • %s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Show recommendations
|
// Show recommendations
|
||||||
fmt.Println("\n" + strings.Repeat("─", 70))
|
fmt.Println("\n" + strings.Repeat("-", 70))
|
||||||
fmt.Println("💡 RECOMMENDATIONS:")
|
fmt.Println("[HINT] RECOMMENDATIONS:")
|
||||||
fmt.Println(strings.Repeat("─", 70))
|
fmt.Println(strings.Repeat("-", 70))
|
||||||
|
|
||||||
for _, rec := range report.Recommendations {
|
for _, rec := range report.Recommendations {
|
||||||
fmt.Printf(" • %s\n", rec)
|
fmt.Printf(" - %s\n", rec)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Show tool versions
|
// Show tool versions
|
||||||
fmt.Println("\n" + strings.Repeat("─", 70))
|
fmt.Println("\n" + strings.Repeat("-", 70))
|
||||||
fmt.Println("ENVIRONMENT:")
|
fmt.Println("ENVIRONMENT:")
|
||||||
fmt.Println(strings.Repeat("─", 70))
|
fmt.Println(strings.Repeat("-", 70))
|
||||||
|
|
||||||
fmt.Printf(" OS: %s/%s\n", report.OS, report.Arch)
|
fmt.Printf(" OS: %s/%s\n", report.OS, report.Arch)
|
||||||
fmt.Printf(" Go: %s\n", report.GoVersion)
|
fmt.Printf(" Go: %s\n", report.GoVersion)
|
||||||
if report.PgRestoreVersion != "" {
|
if report.PgRestoreVersion != "" {
|
||||||
@@ -493,15 +494,15 @@ func (ec *ErrorCollector) PrintReport(report *RestoreErrorReport) {
|
|||||||
if report.PsqlVersion != "" {
|
if report.PsqlVersion != "" {
|
||||||
fmt.Printf(" psql: %s\n", report.PsqlVersion)
|
fmt.Printf(" psql: %s\n", report.PsqlVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(strings.Repeat("═", 70))
|
fmt.Println(strings.Repeat("=", 70))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper functions
|
// Helper functions
|
||||||
|
|
||||||
func isErrorLine(line string) bool {
|
func isErrorLine(line string) bool {
|
||||||
return strings.Contains(line, "ERROR:") ||
|
return strings.Contains(line, "ERROR:") ||
|
||||||
strings.Contains(line, "FATAL:") ||
|
strings.Contains(line, "FATAL:") ||
|
||||||
strings.Contains(line, "error:") ||
|
strings.Contains(line, "error:") ||
|
||||||
strings.Contains(line, "PANIC:")
|
strings.Contains(line, "PANIC:")
|
||||||
}
|
}
|
||||||
@@ -556,7 +557,11 @@ func getDatabaseType(format ArchiveFormat) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getCommandVersion(cmd string, arg string) string {
|
func getCommandVersion(cmd string, arg string) string {
|
||||||
output, err := exec.Command(cmd, arg).CombinedOutput()
|
// Use timeout to prevent blocking if command hangs
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
output, err := exec.CommandContext(ctx, cmd, arg).CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|||||||
429
internal/restore/preflight.go
Normal file
429
internal/restore/preflight.go
Normal file
@@ -0,0 +1,429 @@
|
|||||||
|
package restore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/dustin/go-humanize"
|
||||||
|
"github.com/shirou/gopsutil/v3/mem"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PreflightResult contains all preflight check results
|
||||||
|
type PreflightResult struct {
|
||||||
|
// Linux system checks
|
||||||
|
Linux LinuxChecks
|
||||||
|
|
||||||
|
// PostgreSQL checks
|
||||||
|
PostgreSQL PostgreSQLChecks
|
||||||
|
|
||||||
|
// Archive analysis
|
||||||
|
Archive ArchiveChecks
|
||||||
|
|
||||||
|
// Overall status
|
||||||
|
CanProceed bool
|
||||||
|
Warnings []string
|
||||||
|
Errors []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// LinuxChecks contains Linux kernel/system checks
|
||||||
|
type LinuxChecks struct {
|
||||||
|
ShmMax int64 // /proc/sys/kernel/shmmax
|
||||||
|
ShmAll int64 // /proc/sys/kernel/shmall
|
||||||
|
MemTotal uint64 // Total RAM in bytes
|
||||||
|
MemAvailable uint64 // Available RAM in bytes
|
||||||
|
MemUsedPercent float64 // Memory usage percentage
|
||||||
|
ShmMaxOK bool // Is shmmax sufficient?
|
||||||
|
ShmAllOK bool // Is shmall sufficient?
|
||||||
|
MemAvailableOK bool // Is available RAM sufficient?
|
||||||
|
IsLinux bool // Are we running on Linux?
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostgreSQLChecks contains PostgreSQL configuration checks
|
||||||
|
type PostgreSQLChecks struct {
|
||||||
|
MaxLocksPerTransaction int // Current setting
|
||||||
|
MaintenanceWorkMem string // Current setting
|
||||||
|
SharedBuffers string // Current setting (info only)
|
||||||
|
MaxConnections int // Current setting
|
||||||
|
Version string // PostgreSQL version
|
||||||
|
IsSuperuser bool // Can we modify settings?
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchiveChecks contains analysis of the backup archive
|
||||||
|
type ArchiveChecks struct {
|
||||||
|
TotalDatabases int
|
||||||
|
TotalBlobCount int // Estimated total BLOBs across all databases
|
||||||
|
BlobsByDB map[string]int // BLOBs per database
|
||||||
|
HasLargeBlobs bool // Any DB with >1000 BLOBs?
|
||||||
|
RecommendedLockBoost int // Calculated lock boost value
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunPreflightChecks performs all preflight checks before a cluster restore
|
||||||
|
func (e *Engine) RunPreflightChecks(ctx context.Context, dumpsDir string, entries []os.DirEntry) (*PreflightResult, error) {
|
||||||
|
result := &PreflightResult{
|
||||||
|
CanProceed: true,
|
||||||
|
Archive: ArchiveChecks{
|
||||||
|
BlobsByDB: make(map[string]int),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
e.progress.Update("[PREFLIGHT] Running system checks...")
|
||||||
|
e.log.Info("Starting preflight checks for cluster restore")
|
||||||
|
|
||||||
|
// 1. System checks (cross-platform via gopsutil)
|
||||||
|
e.checkSystemResources(result)
|
||||||
|
|
||||||
|
// 2. PostgreSQL checks (via existing connection)
|
||||||
|
e.checkPostgreSQL(ctx, result)
|
||||||
|
|
||||||
|
// 3. Archive analysis (count BLOBs to scale lock boost)
|
||||||
|
e.analyzeArchive(ctx, dumpsDir, entries, result)
|
||||||
|
|
||||||
|
// 4. Calculate recommended settings
|
||||||
|
e.calculateRecommendations(result)
|
||||||
|
|
||||||
|
// 5. Print summary
|
||||||
|
e.printPreflightSummary(result)
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkSystemResources uses gopsutil for cross-platform system checks
|
||||||
|
func (e *Engine) checkSystemResources(result *PreflightResult) {
|
||||||
|
result.Linux.IsLinux = runtime.GOOS == "linux"
|
||||||
|
|
||||||
|
// Get memory info (works on Linux, macOS, Windows, BSD)
|
||||||
|
if vmem, err := mem.VirtualMemory(); err == nil {
|
||||||
|
result.Linux.MemTotal = vmem.Total
|
||||||
|
result.Linux.MemAvailable = vmem.Available
|
||||||
|
result.Linux.MemUsedPercent = vmem.UsedPercent
|
||||||
|
|
||||||
|
// 4GB minimum available for large restores
|
||||||
|
result.Linux.MemAvailableOK = vmem.Available >= 4*1024*1024*1024
|
||||||
|
|
||||||
|
e.log.Info("System memory detected",
|
||||||
|
"total", humanize.Bytes(vmem.Total),
|
||||||
|
"available", humanize.Bytes(vmem.Available),
|
||||||
|
"used_percent", fmt.Sprintf("%.1f%%", vmem.UsedPercent))
|
||||||
|
} else {
|
||||||
|
e.log.Warn("Could not detect system memory", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Linux-specific kernel checks (shmmax, shmall)
|
||||||
|
if result.Linux.IsLinux {
|
||||||
|
e.checkLinuxKernel(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add warnings for insufficient resources
|
||||||
|
if !result.Linux.MemAvailableOK && result.Linux.MemAvailable > 0 {
|
||||||
|
result.Warnings = append(result.Warnings,
|
||||||
|
fmt.Sprintf("Available RAM is low: %s (recommend 4GB+ for large restores)",
|
||||||
|
humanize.Bytes(result.Linux.MemAvailable)))
|
||||||
|
}
|
||||||
|
if result.Linux.MemUsedPercent > 85 {
|
||||||
|
result.Warnings = append(result.Warnings,
|
||||||
|
fmt.Sprintf("High memory usage: %.1f%% - restore may cause OOM", result.Linux.MemUsedPercent))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkLinuxKernel reads Linux-specific kernel limits from /proc
|
||||||
|
func (e *Engine) checkLinuxKernel(result *PreflightResult) {
|
||||||
|
// Read shmmax
|
||||||
|
if data, err := os.ReadFile("/proc/sys/kernel/shmmax"); err == nil {
|
||||||
|
val, _ := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
|
||||||
|
result.Linux.ShmMax = val
|
||||||
|
// 8GB minimum for large restores
|
||||||
|
result.Linux.ShmMaxOK = val >= 8*1024*1024*1024
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read shmall (in pages, typically 4KB each)
|
||||||
|
if data, err := os.ReadFile("/proc/sys/kernel/shmall"); err == nil {
|
||||||
|
val, _ := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
|
||||||
|
result.Linux.ShmAll = val
|
||||||
|
// 2M pages = 8GB minimum
|
||||||
|
result.Linux.ShmAllOK = val >= 2*1024*1024
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add kernel warnings
|
||||||
|
if !result.Linux.ShmMaxOK && result.Linux.ShmMax > 0 {
|
||||||
|
result.Warnings = append(result.Warnings,
|
||||||
|
fmt.Sprintf("Linux shmmax is low: %s (recommend 8GB+). Fix: sudo sysctl -w kernel.shmmax=17179869184",
|
||||||
|
humanize.Bytes(uint64(result.Linux.ShmMax))))
|
||||||
|
}
|
||||||
|
if !result.Linux.ShmAllOK && result.Linux.ShmAll > 0 {
|
||||||
|
result.Warnings = append(result.Warnings,
|
||||||
|
fmt.Sprintf("Linux shmall is low: %s pages (recommend 2M+). Fix: sudo sysctl -w kernel.shmall=4194304",
|
||||||
|
humanize.Comma(result.Linux.ShmAll)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkPostgreSQL checks PostgreSQL configuration via SQL
|
||||||
|
func (e *Engine) checkPostgreSQL(ctx context.Context, result *PreflightResult) {
|
||||||
|
connStr := e.buildConnString()
|
||||||
|
db, err := sql.Open("pgx", connStr)
|
||||||
|
if err != nil {
|
||||||
|
e.log.Warn("Could not connect to PostgreSQL for preflight checks", "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Check max_locks_per_transaction
|
||||||
|
var maxLocks string
|
||||||
|
if err := db.QueryRowContext(ctx, "SHOW max_locks_per_transaction").Scan(&maxLocks); err == nil {
|
||||||
|
result.PostgreSQL.MaxLocksPerTransaction, _ = strconv.Atoi(maxLocks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check maintenance_work_mem
|
||||||
|
db.QueryRowContext(ctx, "SHOW maintenance_work_mem").Scan(&result.PostgreSQL.MaintenanceWorkMem)
|
||||||
|
|
||||||
|
// Check shared_buffers (info only, can't change without restart)
|
||||||
|
db.QueryRowContext(ctx, "SHOW shared_buffers").Scan(&result.PostgreSQL.SharedBuffers)
|
||||||
|
|
||||||
|
// Check max_connections
|
||||||
|
var maxConn string
|
||||||
|
if err := db.QueryRowContext(ctx, "SHOW max_connections").Scan(&maxConn); err == nil {
|
||||||
|
result.PostgreSQL.MaxConnections, _ = strconv.Atoi(maxConn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check version
|
||||||
|
db.QueryRowContext(ctx, "SHOW server_version").Scan(&result.PostgreSQL.Version)
|
||||||
|
|
||||||
|
// Check if superuser
|
||||||
|
var isSuperuser bool
|
||||||
|
if err := db.QueryRowContext(ctx, "SELECT current_setting('is_superuser') = 'on'").Scan(&isSuperuser); err == nil {
|
||||||
|
result.PostgreSQL.IsSuperuser = isSuperuser
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add info/warnings
|
||||||
|
if result.PostgreSQL.MaxLocksPerTransaction < 256 {
|
||||||
|
e.log.Info("PostgreSQL max_locks_per_transaction is low - will auto-boost",
|
||||||
|
"current", result.PostgreSQL.MaxLocksPerTransaction)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse shared_buffers and warn if very low
|
||||||
|
sharedBuffersMB := parseMemoryToMB(result.PostgreSQL.SharedBuffers)
|
||||||
|
if sharedBuffersMB > 0 && sharedBuffersMB < 256 {
|
||||||
|
result.Warnings = append(result.Warnings,
|
||||||
|
fmt.Sprintf("PostgreSQL shared_buffers is low: %s (recommend 1GB+, requires restart)",
|
||||||
|
result.PostgreSQL.SharedBuffers))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// analyzeArchive counts BLOBs in dump files to calculate optimal lock boost
|
||||||
|
func (e *Engine) analyzeArchive(ctx context.Context, dumpsDir string, entries []os.DirEntry, result *PreflightResult) {
|
||||||
|
e.progress.Update("[PREFLIGHT] Analyzing archive for large objects...")
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Archive.TotalDatabases++
|
||||||
|
dumpFile := filepath.Join(dumpsDir, entry.Name())
|
||||||
|
dbName := strings.TrimSuffix(entry.Name(), ".dump")
|
||||||
|
dbName = strings.TrimSuffix(dbName, ".sql.gz")
|
||||||
|
|
||||||
|
// For custom format dumps, use pg_restore -l to count BLOBs
|
||||||
|
if strings.HasSuffix(entry.Name(), ".dump") {
|
||||||
|
blobCount := e.countBlobsInDump(ctx, dumpFile)
|
||||||
|
if blobCount > 0 {
|
||||||
|
result.Archive.BlobsByDB[dbName] = blobCount
|
||||||
|
result.Archive.TotalBlobCount += blobCount
|
||||||
|
if blobCount > 1000 {
|
||||||
|
result.Archive.HasLargeBlobs = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For SQL format, try to estimate from file content (sample check)
|
||||||
|
if strings.HasSuffix(entry.Name(), ".sql.gz") {
|
||||||
|
// Check for lo_create patterns in compressed SQL
|
||||||
|
blobCount := e.estimateBlobsInSQL(dumpFile)
|
||||||
|
if blobCount > 0 {
|
||||||
|
result.Archive.BlobsByDB[dbName] = blobCount
|
||||||
|
result.Archive.TotalBlobCount += blobCount
|
||||||
|
if blobCount > 1000 {
|
||||||
|
result.Archive.HasLargeBlobs = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// countBlobsInDump uses pg_restore -l to count BLOB entries
|
||||||
|
func (e *Engine) countBlobsInDump(ctx context.Context, dumpFile string) int {
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, "pg_restore", "-l", dumpFile)
|
||||||
|
output, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count lines containing BLOB/LARGE OBJECT
|
||||||
|
count := 0
|
||||||
|
for _, line := range strings.Split(string(output), "\n") {
|
||||||
|
if strings.Contains(line, "BLOB") || strings.Contains(line, "LARGE OBJECT") {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// estimateBlobsInSQL samples compressed SQL for lo_create patterns
|
||||||
|
func (e *Engine) estimateBlobsInSQL(sqlFile string) int {
|
||||||
|
// Use zgrep for efficient searching in gzipped files
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Count lo_create calls (each = one large object)
|
||||||
|
cmd := exec.CommandContext(ctx, "zgrep", "-c", "lo_create", sqlFile)
|
||||||
|
output, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
// Also try SELECT lo_create pattern
|
||||||
|
cmd2 := exec.CommandContext(ctx, "zgrep", "-c", "SELECT.*lo_create", sqlFile)
|
||||||
|
output, err = cmd2.Output()
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
count, _ := strconv.Atoi(strings.TrimSpace(string(output)))
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateRecommendations determines optimal settings based on analysis
|
||||||
|
func (e *Engine) calculateRecommendations(result *PreflightResult) {
|
||||||
|
// Base lock boost
|
||||||
|
lockBoost := 2048
|
||||||
|
|
||||||
|
// Scale up based on BLOB count
|
||||||
|
if result.Archive.TotalBlobCount > 5000 {
|
||||||
|
lockBoost = 4096
|
||||||
|
}
|
||||||
|
if result.Archive.TotalBlobCount > 10000 {
|
||||||
|
lockBoost = 8192
|
||||||
|
}
|
||||||
|
if result.Archive.TotalBlobCount > 50000 {
|
||||||
|
lockBoost = 16384
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cap at reasonable maximum
|
||||||
|
if lockBoost > 16384 {
|
||||||
|
lockBoost = 16384
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Archive.RecommendedLockBoost = lockBoost
|
||||||
|
|
||||||
|
// Log recommendation
|
||||||
|
e.log.Info("Calculated recommended lock boost",
|
||||||
|
"total_blobs", result.Archive.TotalBlobCount,
|
||||||
|
"recommended_locks", lockBoost)
|
||||||
|
}
|
||||||
|
|
||||||
|
// printPreflightSummary prints a nice summary of all checks
|
||||||
|
func (e *Engine) printPreflightSummary(result *PreflightResult) {
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Println(strings.Repeat("─", 60))
|
||||||
|
fmt.Println(" PREFLIGHT CHECKS")
|
||||||
|
fmt.Println(strings.Repeat("─", 60))
|
||||||
|
|
||||||
|
// System checks (cross-platform)
|
||||||
|
fmt.Println("\n System Resources:")
|
||||||
|
printCheck("Total RAM", humanize.Bytes(result.Linux.MemTotal), true)
|
||||||
|
printCheck("Available RAM", humanize.Bytes(result.Linux.MemAvailable), result.Linux.MemAvailableOK || result.Linux.MemAvailable == 0)
|
||||||
|
printCheck("Memory Usage", fmt.Sprintf("%.1f%%", result.Linux.MemUsedPercent), result.Linux.MemUsedPercent < 85)
|
||||||
|
|
||||||
|
// Linux-specific kernel checks
|
||||||
|
if result.Linux.IsLinux && result.Linux.ShmMax > 0 {
|
||||||
|
fmt.Println("\n Linux Kernel:")
|
||||||
|
printCheck("shmmax", humanize.Bytes(uint64(result.Linux.ShmMax)), result.Linux.ShmMaxOK)
|
||||||
|
printCheck("shmall", humanize.Comma(result.Linux.ShmAll)+" pages", result.Linux.ShmAllOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostgreSQL checks
|
||||||
|
fmt.Println("\n PostgreSQL:")
|
||||||
|
printCheck("Version", result.PostgreSQL.Version, true)
|
||||||
|
printCheck("max_locks_per_transaction", fmt.Sprintf("%s → %s (auto-boost)",
|
||||||
|
humanize.Comma(int64(result.PostgreSQL.MaxLocksPerTransaction)),
|
||||||
|
humanize.Comma(int64(result.Archive.RecommendedLockBoost))),
|
||||||
|
true)
|
||||||
|
printCheck("maintenance_work_mem", fmt.Sprintf("%s → 2GB (auto-boost)",
|
||||||
|
result.PostgreSQL.MaintenanceWorkMem), true)
|
||||||
|
printInfo("shared_buffers", result.PostgreSQL.SharedBuffers)
|
||||||
|
printCheck("Superuser", fmt.Sprintf("%v", result.PostgreSQL.IsSuperuser), result.PostgreSQL.IsSuperuser)
|
||||||
|
|
||||||
|
// Archive analysis
|
||||||
|
fmt.Println("\n Archive Analysis:")
|
||||||
|
printInfo("Total databases", humanize.Comma(int64(result.Archive.TotalDatabases)))
|
||||||
|
printInfo("Total BLOBs detected", humanize.Comma(int64(result.Archive.TotalBlobCount)))
|
||||||
|
if len(result.Archive.BlobsByDB) > 0 {
|
||||||
|
fmt.Println(" Databases with BLOBs:")
|
||||||
|
for db, count := range result.Archive.BlobsByDB {
|
||||||
|
status := "✓"
|
||||||
|
if count > 1000 {
|
||||||
|
status := "⚠"
|
||||||
|
_ = status
|
||||||
|
}
|
||||||
|
fmt.Printf(" %s %s: %s BLOBs\n", status, db, humanize.Comma(int64(count)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warnings
|
||||||
|
if len(result.Warnings) > 0 {
|
||||||
|
fmt.Println("\n ⚠ Warnings:")
|
||||||
|
for _, w := range result.Warnings {
|
||||||
|
fmt.Printf(" • %s\n", w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(strings.Repeat("─", 60))
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
func printCheck(name, value string, ok bool) {
|
||||||
|
status := "✓"
|
||||||
|
if !ok {
|
||||||
|
status = "⚠"
|
||||||
|
}
|
||||||
|
fmt.Printf(" %s %s: %s\n", status, name, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func printInfo(name, value string) {
|
||||||
|
fmt.Printf(" ℹ %s: %s\n", name, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseMemoryToMB(memStr string) int {
|
||||||
|
memStr = strings.ToUpper(strings.TrimSpace(memStr))
|
||||||
|
var value int
|
||||||
|
var unit string
|
||||||
|
fmt.Sscanf(memStr, "%d%s", &value, &unit)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case strings.HasPrefix(unit, "G"):
|
||||||
|
return value * 1024
|
||||||
|
case strings.HasPrefix(unit, "M"):
|
||||||
|
return value
|
||||||
|
case strings.HasPrefix(unit, "K"):
|
||||||
|
return value / 1024
|
||||||
|
default:
|
||||||
|
return value / (1024 * 1024) // Assume bytes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Engine) buildConnString() string {
|
||||||
|
if e.cfg.Host == "localhost" || e.cfg.Host == "" {
|
||||||
|
return fmt.Sprintf("user=%s password=%s dbname=postgres sslmode=disable",
|
||||||
|
e.cfg.User, e.cfg.Password)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=postgres sslmode=disable",
|
||||||
|
e.cfg.Host, e.cfg.Port, e.cfg.User, e.cfg.Password)
|
||||||
|
}
|
||||||
@@ -229,8 +229,14 @@ func containsSQLKeywords(content string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CheckDiskSpace verifies sufficient disk space for restore
|
// CheckDiskSpace verifies sufficient disk space for restore
|
||||||
|
// Uses the effective work directory (WorkDir if set, otherwise BackupDir) since
|
||||||
|
// that's where extraction actually happens for large databases
|
||||||
func (s *Safety) CheckDiskSpace(archivePath string, multiplier float64) error {
|
func (s *Safety) CheckDiskSpace(archivePath string, multiplier float64) error {
|
||||||
return s.CheckDiskSpaceAt(archivePath, s.cfg.BackupDir, multiplier)
|
checkDir := s.cfg.GetEffectiveWorkDir()
|
||||||
|
if checkDir == "" {
|
||||||
|
checkDir = s.cfg.BackupDir
|
||||||
|
}
|
||||||
|
return s.CheckDiskSpaceAt(archivePath, checkDir, multiplier)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckDiskSpaceAt verifies sufficient disk space at a specific directory
|
// CheckDiskSpaceAt verifies sufficient disk space at a specific directory
|
||||||
@@ -249,7 +255,9 @@ func (s *Safety) CheckDiskSpaceAt(archivePath string, checkDir string, multiplie
|
|||||||
// Get available disk space
|
// Get available disk space
|
||||||
availableSpace, err := getDiskSpace(checkDir)
|
availableSpace, err := getDiskSpace(checkDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("Cannot check disk space", "error", err)
|
if s.log != nil {
|
||||||
|
s.log.Warn("Cannot check disk space", "error", err)
|
||||||
|
}
|
||||||
return nil // Don't fail if we can't check
|
return nil // Don't fail if we can't check
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -272,10 +280,12 @@ func (s *Safety) CheckDiskSpaceAt(archivePath string, checkDir string, multiplie
|
|||||||
checkDir)
|
checkDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log.Info("Disk space check passed",
|
if s.log != nil {
|
||||||
"location", checkDir,
|
s.log.Info("Disk space check passed",
|
||||||
"required", FormatBytes(requiredSpace),
|
"location", checkDir,
|
||||||
"available", FormatBytes(availableSpace))
|
"required", FormatBytes(requiredSpace),
|
||||||
|
"available", FormatBytes(availableSpace))
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
"dbbackup/internal/database"
|
"dbbackup/internal/database"
|
||||||
)
|
)
|
||||||
@@ -47,8 +48,13 @@ func ParsePostgreSQLVersion(versionStr string) (*VersionInfo, error) {
|
|||||||
|
|
||||||
// GetDumpFileVersion extracts the PostgreSQL version from a dump file
|
// GetDumpFileVersion extracts the PostgreSQL version from a dump file
|
||||||
// Uses pg_restore -l to read the dump metadata
|
// Uses pg_restore -l to read the dump metadata
|
||||||
|
// Uses a 30-second timeout to avoid blocking on large files
|
||||||
func GetDumpFileVersion(dumpPath string) (*VersionInfo, error) {
|
func GetDumpFileVersion(dumpPath string) (*VersionInfo, error) {
|
||||||
cmd := exec.Command("pg_restore", "-l", dumpPath)
|
// Use a timeout context to prevent blocking on very large dump files
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, "pg_restore", "-l", dumpPath)
|
||||||
output, err := cmd.CombinedOutput()
|
output, err := cmd.CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to read dump file metadata: %w (output: %s)", err, string(output))
|
return nil, fmt.Errorf("failed to read dump file metadata: %w (output: %s)", err, string(output))
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ func (pc *PrivilegeChecker) CheckAndWarn(allowRoot bool) error {
|
|||||||
isRoot, user := pc.isRunningAsRoot()
|
isRoot, user := pc.isRunningAsRoot()
|
||||||
|
|
||||||
if isRoot {
|
if isRoot {
|
||||||
pc.log.Warn("⚠️ Running with elevated privileges (root/Administrator)")
|
pc.log.Warn("[WARN] Running with elevated privileges (root/Administrator)")
|
||||||
pc.log.Warn("Security recommendation: Create a dedicated backup user with minimal privileges")
|
pc.log.Warn("Security recommendation: Create a dedicated backup user with minimal privileges")
|
||||||
|
|
||||||
if !allowRoot {
|
if !allowRoot {
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ func (rc *ResourceChecker) ValidateResourcesForBackup(estimatedSize int64) error
|
|||||||
|
|
||||||
if len(warnings) > 0 {
|
if len(warnings) > 0 {
|
||||||
for _, warning := range warnings {
|
for _, warning := range warnings {
|
||||||
rc.log.Warn("⚠️ Resource constraint: " + warning)
|
rc.log.Warn("[WARN] Resource constraint: " + warning)
|
||||||
}
|
}
|
||||||
rc.log.Info("Continuing backup operation (warnings are informational)")
|
rc.log.Info("Continuing backup operation (warnings are informational)")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ func (rc *ResourceChecker) checkPlatformLimits() (*ResourceLimits, error) {
|
|||||||
rc.log.Debug("Resource limit: max open files", "limit", rLimit.Cur, "max", rLimit.Max)
|
rc.log.Debug("Resource limit: max open files", "limit", rLimit.Cur, "max", rLimit.Max)
|
||||||
|
|
||||||
if rLimit.Cur < 1024 {
|
if rLimit.Cur < 1024 {
|
||||||
rc.log.Warn("⚠️ Low file descriptor limit detected",
|
rc.log.Warn("[WARN] Low file descriptor limit detected",
|
||||||
"current", rLimit.Cur,
|
"current", rLimit.Cur,
|
||||||
"recommended", 4096,
|
"recommended", 4096,
|
||||||
"hint", "Increase with: ulimit -n 4096")
|
"hint", "Increase with: ulimit -n 4096")
|
||||||
|
|||||||
@@ -209,12 +209,12 @@ func (m ArchiveBrowserModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
|
|
||||||
// Validate selection based on mode
|
// Validate selection based on mode
|
||||||
if m.mode == "restore-cluster" && !selected.Format.IsClusterBackup() {
|
if m.mode == "restore-cluster" && !selected.Format.IsClusterBackup() {
|
||||||
m.message = errorStyle.Render("❌ Please select a cluster backup (.tar.gz)")
|
m.message = errorStyle.Render("[FAIL] Please select a cluster backup (.tar.gz)")
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.mode == "restore-single" && selected.Format.IsClusterBackup() {
|
if m.mode == "restore-single" && selected.Format.IsClusterBackup() {
|
||||||
m.message = errorStyle.Render("❌ Please select a single database backup")
|
m.message = errorStyle.Render("[FAIL] Please select a single database backup")
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -227,7 +227,7 @@ func (m ArchiveBrowserModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
// Show detailed info
|
// Show detailed info
|
||||||
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
||||||
selected := m.archives[m.cursor]
|
selected := m.archives[m.cursor]
|
||||||
m.message = fmt.Sprintf("📦 %s | Format: %s | Size: %s | Modified: %s",
|
m.message = fmt.Sprintf("[PKG] %s | Format: %s | Size: %s | Modified: %s",
|
||||||
selected.Name,
|
selected.Name,
|
||||||
selected.Format.String(),
|
selected.Format.String(),
|
||||||
formatSize(selected.Size),
|
formatSize(selected.Size),
|
||||||
@@ -251,13 +251,13 @@ func (m ArchiveBrowserModel) View() string {
|
|||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
// Header
|
// Header
|
||||||
title := "📦 Backup Archives"
|
title := "[SELECT] Backup Archives"
|
||||||
if m.mode == "restore-single" {
|
if m.mode == "restore-single" {
|
||||||
title = "📦 Select Archive to Restore (Single Database)"
|
title = "[SELECT] Select Archive to Restore (Single Database)"
|
||||||
} else if m.mode == "restore-cluster" {
|
} else if m.mode == "restore-cluster" {
|
||||||
title = "📦 Select Archive to Restore (Cluster)"
|
title = "[SELECT] Select Archive to Restore (Cluster)"
|
||||||
} else if m.mode == "diagnose" {
|
} else if m.mode == "diagnose" {
|
||||||
title = "🔍 Select Archive to Diagnose"
|
title = "[SELECT] Select Archive to Diagnose"
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString(titleStyle.Render(title))
|
s.WriteString(titleStyle.Render(title))
|
||||||
@@ -269,7 +269,7 @@ func (m ArchiveBrowserModel) View() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(errorStyle.Render(fmt.Sprintf("❌ Error: %v", m.err)))
|
s.WriteString(errorStyle.Render(fmt.Sprintf("[FAIL] Error: %v", m.err)))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(infoStyle.Render("Press Esc to go back"))
|
s.WriteString(infoStyle.Render("Press Esc to go back"))
|
||||||
return s.String()
|
return s.String()
|
||||||
@@ -293,7 +293,7 @@ func (m ArchiveBrowserModel) View() string {
|
|||||||
s.WriteString(archiveHeaderStyle.Render(fmt.Sprintf("%-40s %-25s %-12s %-20s",
|
s.WriteString(archiveHeaderStyle.Render(fmt.Sprintf("%-40s %-25s %-12s %-20s",
|
||||||
"FILENAME", "FORMAT", "SIZE", "MODIFIED")))
|
"FILENAME", "FORMAT", "SIZE", "MODIFIED")))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(strings.Repeat("─", 100))
|
s.WriteString(strings.Repeat("-", 100))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
// Show archives (limit to visible area)
|
// Show archives (limit to visible area)
|
||||||
@@ -317,13 +317,13 @@ func (m ArchiveBrowserModel) View() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Color code based on validity and age
|
// Color code based on validity and age
|
||||||
statusIcon := "✓"
|
statusIcon := "[+]"
|
||||||
if !archive.Valid {
|
if !archive.Valid {
|
||||||
statusIcon = "✗"
|
statusIcon = "[-]"
|
||||||
style = archiveInvalidStyle
|
style = archiveInvalidStyle
|
||||||
} else if time.Since(archive.Modified) > 30*24*time.Hour {
|
} else if time.Since(archive.Modified) > 30*24*time.Hour {
|
||||||
style = archiveOldStyle
|
style = archiveOldStyle
|
||||||
statusIcon = "⚠"
|
statusIcon = "[WARN]"
|
||||||
}
|
}
|
||||||
|
|
||||||
filename := truncate(archive.Name, 38)
|
filename := truncate(archive.Name, 38)
|
||||||
@@ -351,7 +351,7 @@ func (m ArchiveBrowserModel) View() string {
|
|||||||
s.WriteString(infoStyle.Render(fmt.Sprintf("Total: %d archive(s) | Selected: %d/%d",
|
s.WriteString(infoStyle.Render(fmt.Sprintf("Total: %d archive(s) | Selected: %d/%d",
|
||||||
len(m.archives), m.cursor+1, len(m.archives))))
|
len(m.archives), m.cursor+1, len(m.archives))))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(infoStyle.Render("⌨️ ↑/↓: Navigate | Enter: Select | d: Diagnose | f: Filter | i: Info | Esc: Back"))
|
s.WriteString(infoStyle.Render("[KEY] ↑/↓: Navigate | Enter: Select | d: Diagnose | f: Filter | i: Info | Esc: Back"))
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -83,10 +83,10 @@ type backupCompleteMsg struct {
|
|||||||
|
|
||||||
func executeBackupWithTUIProgress(parentCtx context.Context, cfg *config.Config, log logger.Logger, backupType, dbName string, ratio int) tea.Cmd {
|
func executeBackupWithTUIProgress(parentCtx context.Context, cfg *config.Config, log logger.Logger, backupType, dbName string, ratio int) tea.Cmd {
|
||||||
return func() tea.Msg {
|
return func() tea.Msg {
|
||||||
// Use configurable cluster timeout (minutes) from config; default set in config.New()
|
// NO TIMEOUT for backup operations - a backup takes as long as it takes
|
||||||
// Use parent context to inherit cancellation from TUI
|
// Large databases can take many hours
|
||||||
clusterTimeout := time.Duration(cfg.ClusterTimeoutMinutes) * time.Minute
|
// Only manual cancellation (Ctrl+C) should stop the backup
|
||||||
ctx, cancel := context.WithTimeout(parentCtx, clusterTimeout)
|
ctx, cancel := context.WithCancel(parentCtx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
@@ -136,11 +136,11 @@ func executeBackupWithTUIProgress(parentCtx context.Context, cfg *config.Config,
|
|||||||
var result string
|
var result string
|
||||||
switch backupType {
|
switch backupType {
|
||||||
case "single":
|
case "single":
|
||||||
result = fmt.Sprintf("✓ Single database backup of '%s' completed successfully in %v", dbName, elapsed)
|
result = fmt.Sprintf("[+] Single database backup of '%s' completed successfully in %v", dbName, elapsed)
|
||||||
case "sample":
|
case "sample":
|
||||||
result = fmt.Sprintf("✓ Sample backup of '%s' (ratio: %d) completed successfully in %v", dbName, ratio, elapsed)
|
result = fmt.Sprintf("[+] Sample backup of '%s' (ratio: %d) completed successfully in %v", dbName, ratio, elapsed)
|
||||||
case "cluster":
|
case "cluster":
|
||||||
result = fmt.Sprintf("✓ Cluster backup completed successfully in %v", elapsed)
|
result = fmt.Sprintf("[+] Cluster backup completed successfully in %v", elapsed)
|
||||||
}
|
}
|
||||||
|
|
||||||
return backupCompleteMsg{
|
return backupCompleteMsg{
|
||||||
@@ -200,9 +200,9 @@ func (m BackupExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
m.err = msg.err
|
m.err = msg.err
|
||||||
m.result = msg.result
|
m.result = msg.result
|
||||||
if m.err == nil {
|
if m.err == nil {
|
||||||
m.status = "✅ Backup completed successfully!"
|
m.status = "[OK] Backup completed successfully!"
|
||||||
} else {
|
} else {
|
||||||
m.status = fmt.Sprintf("❌ Backup failed: %v", m.err)
|
m.status = fmt.Sprintf("[FAIL] Backup failed: %v", m.err)
|
||||||
}
|
}
|
||||||
// Auto-forward in debug/auto-confirm mode
|
// Auto-forward in debug/auto-confirm mode
|
||||||
if m.config.TUIAutoConfirm {
|
if m.config.TUIAutoConfirm {
|
||||||
@@ -216,7 +216,7 @@ func (m BackupExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
if !m.done && !m.cancelling {
|
if !m.done && !m.cancelling {
|
||||||
// User requested cancellation - cancel the context
|
// User requested cancellation - cancel the context
|
||||||
m.cancelling = true
|
m.cancelling = true
|
||||||
m.status = "⏹️ Cancelling backup... (please wait)"
|
m.status = "[STOP] Cancelling backup... (please wait)"
|
||||||
if m.cancel != nil {
|
if m.cancel != nil {
|
||||||
m.cancel()
|
m.cancel()
|
||||||
}
|
}
|
||||||
@@ -240,7 +240,7 @@ func (m BackupExecutionModel) View() string {
|
|||||||
|
|
||||||
// Clear screen with newlines and render header
|
// Clear screen with newlines and render header
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
header := titleStyle.Render("🔄 Backup Execution")
|
header := titleStyle.Render("[EXEC] Backup Execution")
|
||||||
s.WriteString(header)
|
s.WriteString(header)
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
@@ -261,13 +261,13 @@ func (m BackupExecutionModel) View() string {
|
|||||||
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
||||||
s.WriteString("\n ⌨️ Press Ctrl+C or ESC to cancel\n")
|
s.WriteString("\n [KEY] Press Ctrl+C or ESC to cancel\n")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(fmt.Sprintf(" %s\n\n", m.status))
|
s.WriteString(fmt.Sprintf(" %s\n\n", m.status))
|
||||||
|
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(fmt.Sprintf(" ❌ Error: %v\n", m.err))
|
s.WriteString(fmt.Sprintf(" [FAIL] Error: %v\n", m.err))
|
||||||
} else if m.result != "" {
|
} else if m.result != "" {
|
||||||
// Parse and display result cleanly
|
// Parse and display result cleanly
|
||||||
lines := strings.Split(m.result, "\n")
|
lines := strings.Split(m.result, "\n")
|
||||||
@@ -278,7 +278,7 @@ func (m BackupExecutionModel) View() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.WriteString("\n ⌨️ Press Enter or ESC to return to menu\n")
|
s.WriteString("\n [KEY] Press Enter or ESC to return to menu\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
|
|||||||
@@ -11,40 +11,102 @@ import (
|
|||||||
|
|
||||||
"dbbackup/internal/config"
|
"dbbackup/internal/config"
|
||||||
"dbbackup/internal/logger"
|
"dbbackup/internal/logger"
|
||||||
|
"dbbackup/internal/restore"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OperationState represents the current operation state
|
||||||
|
type OperationState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
OpIdle OperationState = iota
|
||||||
|
OpVerifying
|
||||||
|
OpDeleting
|
||||||
)
|
)
|
||||||
|
|
||||||
// BackupManagerModel manages backup archives
|
// BackupManagerModel manages backup archives
|
||||||
type BackupManagerModel struct {
|
type BackupManagerModel struct {
|
||||||
config *config.Config
|
config *config.Config
|
||||||
logger logger.Logger
|
logger logger.Logger
|
||||||
parent tea.Model
|
parent tea.Model
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
archives []ArchiveInfo
|
archives []ArchiveInfo
|
||||||
cursor int
|
cursor int
|
||||||
loading bool
|
loading bool
|
||||||
err error
|
err error
|
||||||
message string
|
message string
|
||||||
totalSize int64
|
totalSize int64
|
||||||
freeSpace int64
|
freeSpace int64
|
||||||
|
opState OperationState
|
||||||
|
opTarget string // Name of archive being operated on
|
||||||
|
spinnerFrame int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBackupManager creates a new backup manager
|
// NewBackupManager creates a new backup manager
|
||||||
func NewBackupManager(cfg *config.Config, log logger.Logger, parent tea.Model, ctx context.Context) BackupManagerModel {
|
func NewBackupManager(cfg *config.Config, log logger.Logger, parent tea.Model, ctx context.Context) BackupManagerModel {
|
||||||
return BackupManagerModel{
|
return BackupManagerModel{
|
||||||
config: cfg,
|
config: cfg,
|
||||||
logger: log,
|
logger: log,
|
||||||
parent: parent,
|
parent: parent,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
loading: true,
|
loading: true,
|
||||||
|
opState: OpIdle,
|
||||||
|
spinnerFrame: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m BackupManagerModel) Init() tea.Cmd {
|
func (m BackupManagerModel) Init() tea.Cmd {
|
||||||
return loadArchives(m.config, m.logger)
|
return tea.Batch(loadArchives(m.config, m.logger), managerTickCmd())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tick for spinner animation
|
||||||
|
type managerTickMsg time.Time
|
||||||
|
|
||||||
|
func managerTickCmd() tea.Cmd {
|
||||||
|
return tea.Tick(100*time.Millisecond, func(t time.Time) tea.Msg {
|
||||||
|
return managerTickMsg(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify result message
|
||||||
|
type verifyResultMsg struct {
|
||||||
|
archive string
|
||||||
|
valid bool
|
||||||
|
err error
|
||||||
|
details string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m BackupManagerModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
func (m BackupManagerModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||||
switch msg := msg.(type) {
|
switch msg := msg.(type) {
|
||||||
|
case managerTickMsg:
|
||||||
|
// Update spinner frame
|
||||||
|
m.spinnerFrame = (m.spinnerFrame + 1) % len(spinnerFrames)
|
||||||
|
return m, managerTickCmd()
|
||||||
|
|
||||||
|
case verifyResultMsg:
|
||||||
|
m.opState = OpIdle
|
||||||
|
m.opTarget = ""
|
||||||
|
if msg.err != nil {
|
||||||
|
m.message = fmt.Sprintf("[-] Verify failed: %v", msg.err)
|
||||||
|
} else if msg.valid {
|
||||||
|
m.message = fmt.Sprintf("[+] %s: Valid - %s", msg.archive, msg.details)
|
||||||
|
// Update archive validity in list
|
||||||
|
for i := range m.archives {
|
||||||
|
if m.archives[i].Name == msg.archive {
|
||||||
|
m.archives[i].Valid = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
m.message = fmt.Sprintf("[-] %s: Invalid - %s", msg.archive, msg.details)
|
||||||
|
for i := range m.archives {
|
||||||
|
if m.archives[i].Name == msg.archive {
|
||||||
|
m.archives[i].Valid = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
|
||||||
case archiveListMsg:
|
case archiveListMsg:
|
||||||
m.loading = false
|
m.loading = false
|
||||||
if msg.err != nil {
|
if msg.err != nil {
|
||||||
@@ -68,10 +130,24 @@ func (m BackupManagerModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
return m, nil
|
return m, nil
|
||||||
|
|
||||||
case tea.KeyMsg:
|
case tea.KeyMsg:
|
||||||
switch msg.String() {
|
// Allow escape/cancel even during operations
|
||||||
case "ctrl+c", "q", "esc":
|
if msg.String() == "ctrl+c" || msg.String() == "esc" || msg.String() == "q" {
|
||||||
|
if m.opState != OpIdle {
|
||||||
|
// Cancel current operation
|
||||||
|
m.opState = OpIdle
|
||||||
|
m.opTarget = ""
|
||||||
|
m.message = "Operation cancelled"
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
return m.parent, nil
|
return m.parent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Block other input during operations
|
||||||
|
if m.opState != OpIdle {
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch msg.String() {
|
||||||
case "up", "k":
|
case "up", "k":
|
||||||
if m.cursor > 0 {
|
if m.cursor > 0 {
|
||||||
m.cursor--
|
m.cursor--
|
||||||
@@ -83,11 +159,13 @@ func (m BackupManagerModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
case "v":
|
case "v":
|
||||||
// Verify archive
|
// Verify archive with real verification
|
||||||
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
||||||
selected := m.archives[m.cursor]
|
selected := m.archives[m.cursor]
|
||||||
m.message = fmt.Sprintf("🔍 Verifying %s...", selected.Name)
|
m.opState = OpVerifying
|
||||||
// In real implementation, would run verification
|
m.opTarget = selected.Name
|
||||||
|
m.message = ""
|
||||||
|
return m, verifyArchiveCmd(selected)
|
||||||
}
|
}
|
||||||
|
|
||||||
case "d":
|
case "d":
|
||||||
@@ -96,16 +174,16 @@ func (m BackupManagerModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
selected := m.archives[m.cursor]
|
selected := m.archives[m.cursor]
|
||||||
archivePath := selected.Path
|
archivePath := selected.Path
|
||||||
confirm := NewConfirmationModelWithAction(m.config, m.logger, m,
|
confirm := NewConfirmationModelWithAction(m.config, m.logger, m,
|
||||||
"🗑️ Delete Archive",
|
"[DELETE] Delete Archive",
|
||||||
fmt.Sprintf("Delete archive '%s'? This cannot be undone.", selected.Name),
|
fmt.Sprintf("Delete archive '%s'? This cannot be undone.", selected.Name),
|
||||||
func() (tea.Model, tea.Cmd) {
|
func() (tea.Model, tea.Cmd) {
|
||||||
// Delete the archive
|
// Delete the archive
|
||||||
err := deleteArchive(archivePath)
|
err := deleteArchive(archivePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.err = fmt.Errorf("failed to delete archive: %v", err)
|
m.err = fmt.Errorf("failed to delete archive: %v", err)
|
||||||
m.message = fmt.Sprintf("❌ Failed to delete: %v", err)
|
m.message = fmt.Sprintf("[FAIL] Failed to delete: %v", err)
|
||||||
} else {
|
} else {
|
||||||
m.message = fmt.Sprintf("✅ Deleted: %s", selected.Name)
|
m.message = fmt.Sprintf("[OK] Deleted: %s", selected.Name)
|
||||||
}
|
}
|
||||||
// Refresh the archive list
|
// Refresh the archive list
|
||||||
m.loading = true
|
m.loading = true
|
||||||
@@ -118,7 +196,7 @@ func (m BackupManagerModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
// Show info
|
// Show info
|
||||||
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
||||||
selected := m.archives[m.cursor]
|
selected := m.archives[m.cursor]
|
||||||
m.message = fmt.Sprintf("📦 %s | %s | %s | Modified: %s",
|
m.message = fmt.Sprintf("[PKG] %s | %s | %s | Modified: %s",
|
||||||
selected.Name,
|
selected.Name,
|
||||||
selected.Format.String(),
|
selected.Format.String(),
|
||||||
formatSize(selected.Size),
|
formatSize(selected.Size),
|
||||||
@@ -152,39 +230,67 @@ func (m BackupManagerModel) View() string {
|
|||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
// Title
|
// Title
|
||||||
s.WriteString(titleStyle.Render("🗄️ Backup Archive Manager"))
|
s.WriteString(TitleStyle.Render("[SELECT] Backup Archive Manager"))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
|
// Status line (no box, bold+color accents)
|
||||||
|
switch m.opState {
|
||||||
|
case OpVerifying:
|
||||||
|
spinner := spinnerFrames[m.spinnerFrame]
|
||||||
|
s.WriteString(StatusActiveStyle.Render(fmt.Sprintf("%s Verifying: %s", spinner, m.opTarget)))
|
||||||
|
s.WriteString("\n\n")
|
||||||
|
case OpDeleting:
|
||||||
|
spinner := spinnerFrames[m.spinnerFrame]
|
||||||
|
s.WriteString(StatusActiveStyle.Render(fmt.Sprintf("%s Deleting: %s", spinner, m.opTarget)))
|
||||||
|
s.WriteString("\n\n")
|
||||||
|
default:
|
||||||
|
if m.loading {
|
||||||
|
spinner := spinnerFrames[m.spinnerFrame]
|
||||||
|
s.WriteString(StatusActiveStyle.Render(fmt.Sprintf("%s Loading archives...", spinner)))
|
||||||
|
s.WriteString("\n\n")
|
||||||
|
} else if m.message != "" {
|
||||||
|
// Color based on message content
|
||||||
|
if strings.HasPrefix(m.message, "[+]") || strings.HasPrefix(m.message, "Valid") {
|
||||||
|
s.WriteString(StatusSuccessStyle.Render(m.message))
|
||||||
|
} else if strings.HasPrefix(m.message, "[-]") || strings.HasPrefix(m.message, "Error") {
|
||||||
|
s.WriteString(StatusErrorStyle.Render(m.message))
|
||||||
|
} else {
|
||||||
|
s.WriteString(StatusActiveStyle.Render(m.message))
|
||||||
|
}
|
||||||
|
s.WriteString("\n\n")
|
||||||
|
}
|
||||||
|
// No "Ready" message when idle - cleaner UI
|
||||||
|
}
|
||||||
|
|
||||||
if m.loading {
|
if m.loading {
|
||||||
s.WriteString(infoStyle.Render("Loading archives..."))
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(errorStyle.Render(fmt.Sprintf("❌ Error: %v", m.err)))
|
s.WriteString(StatusErrorStyle.Render(fmt.Sprintf("[FAIL] Error: %v", m.err)))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(infoStyle.Render("Press Esc to go back"))
|
s.WriteString(ShortcutStyle.Render("Press Esc to go back"))
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Summary
|
// Summary
|
||||||
s.WriteString(infoStyle.Render(fmt.Sprintf("Total Archives: %d | Total Size: %s",
|
s.WriteString(LabelStyle.Render(fmt.Sprintf("Total Archives: %d | Total Size: %s",
|
||||||
len(m.archives), formatSize(m.totalSize))))
|
len(m.archives), formatSize(m.totalSize))))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
// Archives list
|
// Archives list
|
||||||
if len(m.archives) == 0 {
|
if len(m.archives) == 0 {
|
||||||
s.WriteString(infoStyle.Render("No backup archives found"))
|
s.WriteString(StatusReadyStyle.Render("No backup archives found"))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(infoStyle.Render("Press Esc to go back"))
|
s.WriteString(ShortcutStyle.Render("Press Esc to go back"))
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Column headers
|
// Column headers with better alignment
|
||||||
s.WriteString(archiveHeaderStyle.Render(fmt.Sprintf("%-35s %-25s %-12s %-20s",
|
s.WriteString(ListHeaderStyle.Render(fmt.Sprintf(" %-32s %-22s %10s %-16s",
|
||||||
"FILENAME", "FORMAT", "SIZE", "MODIFIED")))
|
"FILENAME", "FORMAT", "SIZE", "MODIFIED")))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(strings.Repeat("─", 95))
|
s.WriteString(strings.Repeat("-", 90))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
// Show archives (limit to visible area)
|
// Show archives (limit to visible area)
|
||||||
@@ -199,27 +305,27 @@ func (m BackupManagerModel) View() string {
|
|||||||
|
|
||||||
for i := start; i < end; i++ {
|
for i := start; i < end; i++ {
|
||||||
archive := m.archives[i]
|
archive := m.archives[i]
|
||||||
cursor := " "
|
cursor := " "
|
||||||
style := archiveNormalStyle
|
style := ListNormalStyle
|
||||||
|
|
||||||
if i == m.cursor {
|
if i == m.cursor {
|
||||||
cursor = ">"
|
cursor = "> "
|
||||||
style = archiveSelectedStyle
|
style = ListSelectedStyle
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status icon
|
// Status icon - consistent 4-char width
|
||||||
statusIcon := "✓"
|
statusIcon := " [+]"
|
||||||
if !archive.Valid {
|
if !archive.Valid {
|
||||||
statusIcon = "✗"
|
statusIcon = " [-]"
|
||||||
style = archiveInvalidStyle
|
style = ItemInvalidStyle
|
||||||
} else if time.Since(archive.Modified) > 30*24*time.Hour {
|
} else if time.Since(archive.Modified) > 30*24*time.Hour {
|
||||||
statusIcon = "⚠"
|
statusIcon = " [!]"
|
||||||
}
|
}
|
||||||
|
|
||||||
filename := truncate(archive.Name, 33)
|
filename := truncate(archive.Name, 32)
|
||||||
format := truncate(archive.Format.String(), 23)
|
format := truncate(archive.Format.String(), 22)
|
||||||
|
|
||||||
line := fmt.Sprintf("%s %s %-33s %-23s %-10s %-19s",
|
line := fmt.Sprintf("%s%s %-32s %-22s %10s %-16s",
|
||||||
cursor,
|
cursor,
|
||||||
statusIcon,
|
statusIcon,
|
||||||
filename,
|
filename,
|
||||||
@@ -233,18 +339,83 @@ func (m BackupManagerModel) View() string {
|
|||||||
|
|
||||||
// Footer
|
// Footer
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
if m.message != "" {
|
|
||||||
s.WriteString(infoStyle.Render(m.message))
|
|
||||||
s.WriteString("\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
s.WriteString(infoStyle.Render(fmt.Sprintf("Selected: %d/%d", m.cursor+1, len(m.archives))))
|
s.WriteString(StatusReadyStyle.Render(fmt.Sprintf("Selected: %d/%d", m.cursor+1, len(m.archives))))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(infoStyle.Render("⌨️ ↑/↓: Navigate | r: Restore | v: Verify | d: Delete | i: Info | R: Refresh | Esc: Back"))
|
|
||||||
|
// Grouped keyboard shortcuts
|
||||||
|
s.WriteString(ShortcutStyle.Render("SHORTCUTS: Up/Down=Move | r=Restore | v=Verify | d=Delete | i=Info | R=Refresh | Esc=Back | q=Quit"))
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// verifyArchiveCmd runs the SAME verification as restore safety checks
|
||||||
|
// This ensures consistency between backup manager verify and restore preview
|
||||||
|
func verifyArchiveCmd(archive ArchiveInfo) tea.Cmd {
|
||||||
|
return func() tea.Msg {
|
||||||
|
var issues []string
|
||||||
|
|
||||||
|
// 1. Run the same archive integrity check as restore
|
||||||
|
safety := restore.NewSafety(nil, nil) // Doesn't need config/log for validation
|
||||||
|
if err := safety.ValidateArchive(archive.Path); err != nil {
|
||||||
|
return verifyResultMsg{
|
||||||
|
archive: archive.Name,
|
||||||
|
valid: false,
|
||||||
|
err: nil,
|
||||||
|
details: fmt.Sprintf("Archive integrity: %v", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Run the same deep diagnosis as restore
|
||||||
|
diagnoser := restore.NewDiagnoser(nil, false)
|
||||||
|
diagResult, diagErr := diagnoser.DiagnoseFile(archive.Path)
|
||||||
|
if diagErr != nil {
|
||||||
|
return verifyResultMsg{
|
||||||
|
archive: archive.Name,
|
||||||
|
valid: false,
|
||||||
|
err: diagErr,
|
||||||
|
details: "Cannot diagnose archive",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !diagResult.IsValid {
|
||||||
|
// Collect error details
|
||||||
|
if diagResult.IsTruncated {
|
||||||
|
issues = append(issues, "TRUNCATED")
|
||||||
|
}
|
||||||
|
if diagResult.IsCorrupted {
|
||||||
|
issues = append(issues, "CORRUPTED")
|
||||||
|
}
|
||||||
|
if len(diagResult.Errors) > 0 {
|
||||||
|
issues = append(issues, diagResult.Errors[0])
|
||||||
|
}
|
||||||
|
return verifyResultMsg{
|
||||||
|
archive: archive.Name,
|
||||||
|
valid: false,
|
||||||
|
err: nil,
|
||||||
|
details: strings.Join(issues, "; "),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build success details
|
||||||
|
details := "Verified"
|
||||||
|
if diagResult.Details != nil {
|
||||||
|
if diagResult.Details.TableCount > 0 {
|
||||||
|
details = fmt.Sprintf("%d databases in archive", diagResult.Details.TableCount)
|
||||||
|
} else if diagResult.Details.PgRestoreListable {
|
||||||
|
details = "pg_restore verified"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add any warnings
|
||||||
|
if len(diagResult.Warnings) > 0 {
|
||||||
|
details += fmt.Sprintf(" [%d warnings]", len(diagResult.Warnings))
|
||||||
|
}
|
||||||
|
|
||||||
|
return verifyResultMsg{archive: archive.Name, valid: true, err: nil, details: details}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// deleteArchive deletes a backup archive (to be called from confirmation)
|
// deleteArchive deletes a backup archive (to be called from confirmation)
|
||||||
func deleteArchive(archivePath string) error {
|
func deleteArchive(archivePath string) error {
|
||||||
return os.Remove(archivePath)
|
return os.Remove(archivePath)
|
||||||
|
|||||||
@@ -67,7 +67,6 @@ func (m ConfirmationModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
switch msg := msg.(type) {
|
switch msg := msg.(type) {
|
||||||
case autoConfirmMsg:
|
case autoConfirmMsg:
|
||||||
// Auto-confirm triggered
|
// Auto-confirm triggered
|
||||||
m.confirmed = true
|
|
||||||
if m.onConfirm != nil {
|
if m.onConfirm != nil {
|
||||||
return m.onConfirm()
|
return m.onConfirm()
|
||||||
}
|
}
|
||||||
@@ -95,7 +94,6 @@ func (m ConfirmationModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
|
|
||||||
case "enter", "y":
|
case "enter", "y":
|
||||||
if msg.String() == "y" || m.cursor == 0 {
|
if msg.String() == "y" || m.cursor == 0 {
|
||||||
m.confirmed = true
|
|
||||||
// Execute the onConfirm callback if provided
|
// Execute the onConfirm callback if provided
|
||||||
if m.onConfirm != nil {
|
if m.onConfirm != nil {
|
||||||
return m.onConfirm()
|
return m.onConfirm()
|
||||||
@@ -131,7 +129,7 @@ func (m ConfirmationModel) View() string {
|
|||||||
s.WriteString(" ")
|
s.WriteString(" ")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString("\n\n⌨️ ←/→: Select • Enter/y: Confirm • n/ESC: Cancel\n")
|
s.WriteString("\n\n[KEYS] <-/->: Select | Enter/y: Confirm | n/ESC: Cancel\n")
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,7 +53,8 @@ type databaseListMsg struct {
|
|||||||
|
|
||||||
func fetchDatabases(cfg *config.Config, log logger.Logger) tea.Cmd {
|
func fetchDatabases(cfg *config.Config, log logger.Logger) tea.Cmd {
|
||||||
return func() tea.Msg {
|
return func() tea.Msg {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
// 60 seconds for database listing - busy servers may be slow
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
dbClient, err := database.New(cfg, log)
|
dbClient, err := database.New(cfg, log)
|
||||||
@@ -108,7 +109,7 @@ func (m DatabaseSelectorModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
return executor, executor.Init()
|
return executor, executor.Init()
|
||||||
}
|
}
|
||||||
inputModel := NewInputModel(m.config, m.logger, m,
|
inputModel := NewInputModel(m.config, m.logger, m,
|
||||||
"📊 Sample Ratio",
|
"[STATS] Sample Ratio",
|
||||||
"Enter sample ratio (1-100):",
|
"Enter sample ratio (1-100):",
|
||||||
"10",
|
"10",
|
||||||
ValidateInt(1, 100))
|
ValidateInt(1, 100))
|
||||||
@@ -151,7 +152,7 @@ func (m DatabaseSelectorModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
// If sample backup, ask for ratio first
|
// If sample backup, ask for ratio first
|
||||||
if m.backupType == "sample" {
|
if m.backupType == "sample" {
|
||||||
inputModel := NewInputModel(m.config, m.logger, m,
|
inputModel := NewInputModel(m.config, m.logger, m,
|
||||||
"📊 Sample Ratio",
|
"[STATS] Sample Ratio",
|
||||||
"Enter sample ratio (1-100):",
|
"Enter sample ratio (1-100):",
|
||||||
"10",
|
"10",
|
||||||
ValidateInt(1, 100))
|
ValidateInt(1, 100))
|
||||||
@@ -175,12 +176,12 @@ func (m DatabaseSelectorModel) View() string {
|
|||||||
s.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
s.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
||||||
|
|
||||||
if m.loading {
|
if m.loading {
|
||||||
s.WriteString("⏳ Loading databases...\n")
|
s.WriteString("[WAIT] Loading databases...\n")
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(fmt.Sprintf("❌ Error: %v\n", m.err))
|
s.WriteString(fmt.Sprintf("[FAIL] Error: %v\n", m.err))
|
||||||
s.WriteString("\nPress ESC to go back\n")
|
s.WriteString("\nPress ESC to go back\n")
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
@@ -202,7 +203,7 @@ func (m DatabaseSelectorModel) View() string {
|
|||||||
s.WriteString(fmt.Sprintf("\n%s\n", m.message))
|
s.WriteString(fmt.Sprintf("\n%s\n", m.message))
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString("\n⌨️ ↑/↓: Navigate • Enter: Select • ESC: Back • q: Quit\n")
|
s.WriteString("\n[KEYS] Up/Down: Navigate | Enter: Select | ESC: Back | q: Quit\n")
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -160,7 +160,7 @@ func (m DiagnoseViewModel) View() string {
|
|||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
// Header
|
// Header
|
||||||
s.WriteString(titleStyle.Render("🔍 Backup Diagnosis"))
|
s.WriteString(titleStyle.Render("[CHECK] Backup Diagnosis"))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
// Archive info
|
// Archive info
|
||||||
@@ -175,14 +175,14 @@ func (m DiagnoseViewModel) View() string {
|
|||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
if m.running {
|
if m.running {
|
||||||
s.WriteString(infoStyle.Render("⏳ " + m.progress))
|
s.WriteString(infoStyle.Render("[WAIT] " + m.progress))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(diagnoseInfoStyle.Render("This may take a while for large archives..."))
|
s.WriteString(diagnoseInfoStyle.Render("This may take a while for large archives..."))
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(errorStyle.Render(fmt.Sprintf("❌ Diagnosis failed: %v", m.err)))
|
s.WriteString(errorStyle.Render(fmt.Sprintf("[FAIL] Diagnosis failed: %v", m.err)))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(infoStyle.Render("Press Enter or Esc to go back"))
|
s.WriteString(infoStyle.Render("Press Enter or Esc to go back"))
|
||||||
return s.String()
|
return s.String()
|
||||||
@@ -204,117 +204,104 @@ func (m DiagnoseViewModel) View() string {
|
|||||||
func (m DiagnoseViewModel) renderSingleResult(result *restore.DiagnoseResult) string {
|
func (m DiagnoseViewModel) renderSingleResult(result *restore.DiagnoseResult) string {
|
||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
// Status
|
// Validation Status
|
||||||
s.WriteString(strings.Repeat("─", 60))
|
s.WriteString(diagnoseHeaderStyle.Render("[STATUS] Validation"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
if result.IsValid {
|
if result.IsValid {
|
||||||
s.WriteString(diagnosePassStyle.Render("✅ STATUS: VALID"))
|
s.WriteString(diagnosePassStyle.Render(" [OK] VALID - Archive passed all checks"))
|
||||||
|
s.WriteString("\n")
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(diagnoseFailStyle.Render("❌ STATUS: INVALID"))
|
s.WriteString(diagnoseFailStyle.Render(" [FAIL] INVALID - Archive has problems"))
|
||||||
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
s.WriteString("\n")
|
|
||||||
|
|
||||||
if result.IsTruncated {
|
if result.IsTruncated {
|
||||||
s.WriteString(diagnoseFailStyle.Render("⚠️ TRUNCATED: File appears incomplete"))
|
s.WriteString(diagnoseFailStyle.Render(" [!] TRUNCATED - File is incomplete"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.IsCorrupted {
|
if result.IsCorrupted {
|
||||||
s.WriteString(diagnoseFailStyle.Render("⚠️ CORRUPTED: File structure is damaged"))
|
s.WriteString(diagnoseFailStyle.Render(" [!] CORRUPTED - File structure damaged"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString(strings.Repeat("─", 60))
|
s.WriteString("\n")
|
||||||
s.WriteString("\n\n")
|
|
||||||
|
|
||||||
// Details
|
// Details
|
||||||
if result.Details != nil {
|
if result.Details != nil {
|
||||||
s.WriteString(diagnoseHeaderStyle.Render("📊 DETAILS:"))
|
s.WriteString(diagnoseHeaderStyle.Render("[INFO] Details"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
if result.Details.HasPGDMPSignature {
|
if result.Details.HasPGDMPSignature {
|
||||||
s.WriteString(diagnosePassStyle.Render(" ✓ "))
|
s.WriteString(diagnosePassStyle.Render(" [+]") + " PostgreSQL custom format (PGDMP)\n")
|
||||||
s.WriteString("Has PGDMP signature (custom format)\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.HasSQLHeader {
|
if result.Details.HasSQLHeader {
|
||||||
s.WriteString(diagnosePassStyle.Render(" ✓ "))
|
s.WriteString(diagnosePassStyle.Render(" [+]") + " PostgreSQL SQL header found\n")
|
||||||
s.WriteString("Has PostgreSQL SQL header\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.GzipValid {
|
if result.Details.GzipValid {
|
||||||
s.WriteString(diagnosePassStyle.Render(" ✓ "))
|
s.WriteString(diagnosePassStyle.Render(" [+]") + " Gzip compression valid\n")
|
||||||
s.WriteString("Gzip compression valid\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.PgRestoreListable {
|
if result.Details.PgRestoreListable {
|
||||||
s.WriteString(diagnosePassStyle.Render(" ✓ "))
|
s.WriteString(diagnosePassStyle.Render(" [+]") + fmt.Sprintf(" pg_restore can list contents (%d tables)\n", result.Details.TableCount))
|
||||||
s.WriteString(fmt.Sprintf("pg_restore can list contents (%d tables)\n", result.Details.TableCount))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.CopyBlockCount > 0 {
|
if result.Details.CopyBlockCount > 0 {
|
||||||
s.WriteString(diagnoseInfoStyle.Render(" • "))
|
s.WriteString(fmt.Sprintf(" [-] %d COPY blocks found\n", result.Details.CopyBlockCount))
|
||||||
s.WriteString(fmt.Sprintf("Contains %d COPY blocks\n", result.Details.CopyBlockCount))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.UnterminatedCopy {
|
if result.Details.UnterminatedCopy {
|
||||||
s.WriteString(diagnoseFailStyle.Render(" ✗ "))
|
s.WriteString(diagnoseFailStyle.Render(" [-]") + " Unterminated COPY: " + truncate(result.Details.LastCopyTable, 30) + "\n")
|
||||||
s.WriteString(fmt.Sprintf("Unterminated COPY block: %s (line %d)\n",
|
|
||||||
result.Details.LastCopyTable, result.Details.LastCopyLineNumber))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.ProperlyTerminated {
|
if result.Details.ProperlyTerminated {
|
||||||
s.WriteString(diagnosePassStyle.Render(" ✓ "))
|
s.WriteString(diagnosePassStyle.Render(" [+]") + " All COPY blocks properly terminated\n")
|
||||||
s.WriteString("All COPY blocks properly terminated\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.ExpandedSize > 0 {
|
if result.Details.ExpandedSize > 0 {
|
||||||
s.WriteString(diagnoseInfoStyle.Render(" • "))
|
s.WriteString(fmt.Sprintf(" [-] Expanded: %s (%.1fx)\n", formatSize(result.Details.ExpandedSize), result.Details.CompressionRatio))
|
||||||
s.WriteString(fmt.Sprintf("Expanded size: %s (ratio: %.1fx)\n",
|
|
||||||
formatSize(result.Details.ExpandedSize), result.Details.CompressionRatio))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errors
|
// Errors
|
||||||
if len(result.Errors) > 0 {
|
if len(result.Errors) > 0 {
|
||||||
s.WriteString("\n")
|
s.WriteString(diagnoseFailStyle.Render("[FAIL] Errors"))
|
||||||
s.WriteString(diagnoseFailStyle.Render("❌ ERRORS:"))
|
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
for i, e := range result.Errors {
|
for i, e := range result.Errors {
|
||||||
if i >= 5 {
|
if i >= 5 {
|
||||||
s.WriteString(diagnoseInfoStyle.Render(fmt.Sprintf(" ... and %d more\n", len(result.Errors)-5)))
|
s.WriteString(fmt.Sprintf(" ... and %d more errors\n", len(result.Errors)-5))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
s.WriteString(diagnoseFailStyle.Render(" • "))
|
s.WriteString(" " + truncate(e, 60) + "\n")
|
||||||
s.WriteString(truncate(e, 70))
|
|
||||||
s.WriteString("\n")
|
|
||||||
}
|
}
|
||||||
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warnings
|
// Warnings
|
||||||
if len(result.Warnings) > 0 {
|
if len(result.Warnings) > 0 {
|
||||||
s.WriteString("\n")
|
s.WriteString(diagnoseWarnStyle.Render("[WARN] Warnings"))
|
||||||
s.WriteString(diagnoseWarnStyle.Render("⚠️ WARNINGS:"))
|
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
for i, w := range result.Warnings {
|
for i, w := range result.Warnings {
|
||||||
if i >= 3 {
|
if i >= 3 {
|
||||||
s.WriteString(diagnoseInfoStyle.Render(fmt.Sprintf(" ... and %d more\n", len(result.Warnings)-3)))
|
s.WriteString(fmt.Sprintf(" ... and %d more warnings\n", len(result.Warnings)-3))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
s.WriteString(diagnoseWarnStyle.Render(" • "))
|
s.WriteString(" " + truncate(w, 60) + "\n")
|
||||||
s.WriteString(truncate(w, 70))
|
|
||||||
s.WriteString("\n")
|
|
||||||
}
|
}
|
||||||
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recommendations
|
// Recommendations
|
||||||
if !result.IsValid {
|
if !result.IsValid {
|
||||||
s.WriteString("\n")
|
s.WriteString(diagnoseInfoStyle.Render("[HINT] Recommendations"))
|
||||||
s.WriteString(diagnoseHeaderStyle.Render("💡 RECOMMENDATIONS:"))
|
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
if result.IsTruncated {
|
if result.IsTruncated {
|
||||||
s.WriteString(" 1. Re-run the backup process for this database\n")
|
s.WriteString(" 1. Re-run backup with current version (v3.42+)\n")
|
||||||
s.WriteString(" 2. Check disk space on backup server\n")
|
s.WriteString(" 2. Check disk space on backup server\n")
|
||||||
s.WriteString(" 3. Verify network stability for remote backups\n")
|
s.WriteString(" 3. Verify network stability for remote backups\n")
|
||||||
}
|
}
|
||||||
@@ -341,22 +328,20 @@ func (m DiagnoseViewModel) renderClusterResults() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString(strings.Repeat("─", 60))
|
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(diagnoseHeaderStyle.Render(fmt.Sprintf("📊 CLUSTER SUMMARY: %d databases\n", len(m.results))))
|
s.WriteString(diagnoseHeaderStyle.Render(fmt.Sprintf("[STATS] Cluster Summary: %d databases", len(m.results))))
|
||||||
s.WriteString(strings.Repeat("─", 60))
|
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
if invalidCount == 0 {
|
if invalidCount == 0 {
|
||||||
s.WriteString(diagnosePassStyle.Render("✅ All dumps are valid"))
|
s.WriteString(diagnosePassStyle.Render("[OK] All dumps are valid"))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(diagnoseFailStyle.Render(fmt.Sprintf("❌ %d/%d dumps have issues", invalidCount, len(m.results))))
|
s.WriteString(diagnoseFailStyle.Render(fmt.Sprintf("[FAIL] %d/%d dumps have issues", invalidCount, len(m.results))))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// List all dumps with status
|
// List all dumps with status
|
||||||
s.WriteString(diagnoseHeaderStyle.Render("Database Dumps:"))
|
s.WriteString(diagnoseHeaderStyle.Render("[LIST] Database Dumps"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
// Show visible range based on cursor
|
// Show visible range based on cursor
|
||||||
@@ -378,13 +363,13 @@ func (m DiagnoseViewModel) renderClusterResults() string {
|
|||||||
|
|
||||||
var status string
|
var status string
|
||||||
if r.IsValid {
|
if r.IsValid {
|
||||||
status = diagnosePassStyle.Render("✓")
|
status = diagnosePassStyle.Render("[+]")
|
||||||
} else if r.IsTruncated {
|
} else if r.IsTruncated {
|
||||||
status = diagnoseFailStyle.Render("✗ TRUNCATED")
|
status = diagnoseFailStyle.Render("[-] TRUNCATED")
|
||||||
} else if r.IsCorrupted {
|
} else if r.IsCorrupted {
|
||||||
status = diagnoseFailStyle.Render("✗ CORRUPTED")
|
status = diagnoseFailStyle.Render("[-] CORRUPTED")
|
||||||
} else {
|
} else {
|
||||||
status = diagnoseFailStyle.Render("✗ INVALID")
|
status = diagnoseFailStyle.Render("[-] INVALID")
|
||||||
}
|
}
|
||||||
|
|
||||||
line := fmt.Sprintf("%s %s %-35s %s",
|
line := fmt.Sprintf("%s %s %-35s %s",
|
||||||
@@ -405,15 +390,13 @@ func (m DiagnoseViewModel) renderClusterResults() string {
|
|||||||
if m.cursor < len(m.results) {
|
if m.cursor < len(m.results) {
|
||||||
selected := m.results[m.cursor]
|
selected := m.results[m.cursor]
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(strings.Repeat("─", 60))
|
s.WriteString(diagnoseHeaderStyle.Render("[INFO] Selected: " + selected.FileName))
|
||||||
s.WriteString("\n")
|
|
||||||
s.WriteString(diagnoseHeaderStyle.Render("Selected: " + selected.FileName))
|
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
// Show condensed details for selected
|
// Show condensed details for selected
|
||||||
if selected.Details != nil {
|
if selected.Details != nil {
|
||||||
if selected.Details.UnterminatedCopy {
|
if selected.Details.UnterminatedCopy {
|
||||||
s.WriteString(diagnoseFailStyle.Render(" ✗ Unterminated COPY: "))
|
s.WriteString(diagnoseFailStyle.Render(" [-] Unterminated COPY: "))
|
||||||
s.WriteString(selected.Details.LastCopyTable)
|
s.WriteString(selected.Details.LastCopyTable)
|
||||||
s.WriteString(fmt.Sprintf(" (line %d)\n", selected.Details.LastCopyLineNumber))
|
s.WriteString(fmt.Sprintf(" (line %d)\n", selected.Details.LastCopyLineNumber))
|
||||||
}
|
}
|
||||||
@@ -429,7 +412,7 @@ func (m DiagnoseViewModel) renderClusterResults() string {
|
|||||||
if i >= 2 {
|
if i >= 2 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
s.WriteString(diagnoseFailStyle.Render(" • "))
|
s.WriteString(diagnoseFailStyle.Render(" - "))
|
||||||
s.WriteString(truncate(e, 55))
|
s.WriteString(truncate(e, 55))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -208,7 +208,7 @@ func (dp *DirectoryPicker) View() string {
|
|||||||
if dp.allowFiles {
|
if dp.allowFiles {
|
||||||
pickerType = "File/Directory"
|
pickerType = "File/Directory"
|
||||||
}
|
}
|
||||||
header := fmt.Sprintf("📁 %s Picker - %s", pickerType, dp.currentPath)
|
header := fmt.Sprintf("[DIR] %s Picker - %s", pickerType, dp.currentPath)
|
||||||
content.WriteString(dp.styles.Header.Render(header))
|
content.WriteString(dp.styles.Header.Render(header))
|
||||||
content.WriteString("\n\n")
|
content.WriteString("\n\n")
|
||||||
|
|
||||||
@@ -216,13 +216,13 @@ func (dp *DirectoryPicker) View() string {
|
|||||||
for i, item := range dp.items {
|
for i, item := range dp.items {
|
||||||
var prefix string
|
var prefix string
|
||||||
if item.Name == ".." {
|
if item.Name == ".." {
|
||||||
prefix = "⬆️ "
|
prefix = "[UP] "
|
||||||
} else if item.Name == "Error reading directory" {
|
} else if item.Name == "Error reading directory" {
|
||||||
prefix = "❌ "
|
prefix = "[X] "
|
||||||
} else if item.IsDir {
|
} else if item.IsDir {
|
||||||
prefix = "📁 "
|
prefix = "[DIR] "
|
||||||
} else {
|
} else {
|
||||||
prefix = "📄 "
|
prefix = "[FILE] "
|
||||||
}
|
}
|
||||||
|
|
||||||
line := prefix + item.Name
|
line := prefix + item.Name
|
||||||
@@ -235,9 +235,9 @@ func (dp *DirectoryPicker) View() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Help text
|
// Help text
|
||||||
help := "\n↑/↓: Navigate • Enter: Open/Select File • s: Select Directory • q/Esc: Cancel"
|
help := "\nUp/Down: Navigate | Enter: Open/Select File | s: Select Directory | q/Esc: Cancel"
|
||||||
if !dp.allowFiles {
|
if !dp.allowFiles {
|
||||||
help = "\n↑/↓: Navigate • Enter: Open • s: Select Directory • q/Esc: Cancel"
|
help = "\nUp/Down: Navigate | Enter: Open | s: Select Directory | q/Esc: Cancel"
|
||||||
}
|
}
|
||||||
content.WriteString(dp.styles.Help.Render(help))
|
content.WriteString(dp.styles.Help.Render(help))
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package tui
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -59,7 +59,7 @@ func loadHistory(cfg *config.Config) []HistoryEntry {
|
|||||||
var entries []HistoryEntry
|
var entries []HistoryEntry
|
||||||
|
|
||||||
// Read backup files from backup directory
|
// Read backup files from backup directory
|
||||||
files, err := ioutil.ReadDir(cfg.BackupDir)
|
files, err := os.ReadDir(cfg.BackupDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return entries
|
return entries
|
||||||
}
|
}
|
||||||
@@ -74,6 +74,12 @@ func loadHistory(cfg *config.Config) []HistoryEntry {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get file info for ModTime
|
||||||
|
info, err := file.Info()
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
var backupType string
|
var backupType string
|
||||||
var database string
|
var database string
|
||||||
|
|
||||||
@@ -97,8 +103,8 @@ func loadHistory(cfg *config.Config) []HistoryEntry {
|
|||||||
entries = append(entries, HistoryEntry{
|
entries = append(entries, HistoryEntry{
|
||||||
Type: backupType,
|
Type: backupType,
|
||||||
Database: database,
|
Database: database,
|
||||||
Timestamp: file.ModTime(),
|
Timestamp: info.ModTime(),
|
||||||
Status: "✅ Completed",
|
Status: "[OK] Completed",
|
||||||
Filename: name,
|
Filename: name,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -185,11 +191,11 @@ func (m HistoryViewModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
func (m HistoryViewModel) View() string {
|
func (m HistoryViewModel) View() string {
|
||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
header := titleStyle.Render("📜 Operation History")
|
header := titleStyle.Render("[STATS] Operation History")
|
||||||
s.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
s.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
||||||
|
|
||||||
if len(m.history) == 0 {
|
if len(m.history) == 0 {
|
||||||
s.WriteString("📭 No backup history found\n\n")
|
s.WriteString("[EMPTY] No backup history found\n\n")
|
||||||
} else {
|
} else {
|
||||||
maxVisible := 15 // Show max 15 items at once
|
maxVisible := 15 // Show max 15 items at once
|
||||||
|
|
||||||
@@ -205,7 +211,7 @@ func (m HistoryViewModel) View() string {
|
|||||||
|
|
||||||
// Show scroll indicators
|
// Show scroll indicators
|
||||||
if start > 0 {
|
if start > 0 {
|
||||||
s.WriteString(" ▲ More entries above...\n")
|
s.WriteString(" [^] More entries above...\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Display only visible entries
|
// Display only visible entries
|
||||||
@@ -227,13 +233,13 @@ func (m HistoryViewModel) View() string {
|
|||||||
|
|
||||||
// Show scroll indicator if more entries below
|
// Show scroll indicator if more entries below
|
||||||
if end < len(m.history) {
|
if end < len(m.history) {
|
||||||
s.WriteString(fmt.Sprintf(" ▼ %d more entries below...\n", len(m.history)-end))
|
s.WriteString(fmt.Sprintf(" [v] %d more entries below...\n", len(m.history)-end))
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString("⌨️ ↑/↓: Navigate • PgUp/PgDn: Jump • Home/End: First/Last • ESC: Back • q: Quit\n")
|
s.WriteString("[KEYS] Up/Down: Navigate - PgUp/PgDn: Jump - Home/End: First/Last - ESC: Back - q: Quit\n")
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -137,10 +137,10 @@ func (m InputModel) View() string {
|
|||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(errorStyle.Render(fmt.Sprintf("❌ Error: %v\n\n", m.err)))
|
s.WriteString(errorStyle.Render(fmt.Sprintf("[FAIL] Error: %v\n\n", m.err)))
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString("⌨️ Type value • Enter: Confirm • ESC: Cancel\n")
|
s.WriteString("[KEYS] Type value | Enter: Confirm | ESC: Cancel\n")
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -89,12 +89,12 @@ func NewMenuModel(cfg *config.Config, log logger.Logger) *MenuModel {
|
|||||||
"Single Database Backup",
|
"Single Database Backup",
|
||||||
"Sample Database Backup (with ratio)",
|
"Sample Database Backup (with ratio)",
|
||||||
"Cluster Backup (all databases)",
|
"Cluster Backup (all databases)",
|
||||||
"────────────────────────────────",
|
"--------------------------------",
|
||||||
"Restore Single Database",
|
"Restore Single Database",
|
||||||
"Restore Cluster Backup",
|
"Restore Cluster Backup",
|
||||||
"Diagnose Backup File",
|
"Diagnose Backup File",
|
||||||
"List & Manage Backups",
|
"List & Manage Backups",
|
||||||
"────────────────────────────────",
|
"--------------------------------",
|
||||||
"View Active Operations",
|
"View Active Operations",
|
||||||
"Show Operation History",
|
"Show Operation History",
|
||||||
"Database Status & Health Check",
|
"Database Status & Health Check",
|
||||||
@@ -177,7 +177,7 @@ func (m *MenuModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
case 12: // Settings
|
case 12: // Settings
|
||||||
return m.handleSettings()
|
return m.handleSettings()
|
||||||
case 13: // Clear History
|
case 13: // Clear History
|
||||||
m.message = "🗑️ History cleared"
|
m.message = "[DEL] History cleared"
|
||||||
case 14: // Quit
|
case 14: // Quit
|
||||||
if m.cancel != nil {
|
if m.cancel != nil {
|
||||||
m.cancel()
|
m.cancel()
|
||||||
@@ -262,7 +262,7 @@ func (m *MenuModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
case 12: // Settings
|
case 12: // Settings
|
||||||
return m.handleSettings()
|
return m.handleSettings()
|
||||||
case 13: // Clear History
|
case 13: // Clear History
|
||||||
m.message = "🗑️ History cleared"
|
m.message = "[DEL] History cleared"
|
||||||
case 14: // Quit
|
case 14: // Quit
|
||||||
if m.cancel != nil {
|
if m.cancel != nil {
|
||||||
m.cancel()
|
m.cancel()
|
||||||
@@ -285,7 +285,7 @@ func (m *MenuModel) View() string {
|
|||||||
var s string
|
var s string
|
||||||
|
|
||||||
// Header
|
// Header
|
||||||
header := titleStyle.Render("🗄️ Database Backup Tool - Interactive Menu")
|
header := titleStyle.Render("Database Backup Tool - Interactive Menu")
|
||||||
s += fmt.Sprintf("\n%s\n\n", header)
|
s += fmt.Sprintf("\n%s\n\n", header)
|
||||||
|
|
||||||
if len(m.dbTypes) > 0 {
|
if len(m.dbTypes) > 0 {
|
||||||
@@ -299,7 +299,7 @@ func (m *MenuModel) View() string {
|
|||||||
}
|
}
|
||||||
selector := fmt.Sprintf("Target Engine: %s", strings.Join(options, menuStyle.Render(" | ")))
|
selector := fmt.Sprintf("Target Engine: %s", strings.Join(options, menuStyle.Render(" | ")))
|
||||||
s += dbSelectorLabelStyle.Render(selector) + "\n"
|
s += dbSelectorLabelStyle.Render(selector) + "\n"
|
||||||
hint := infoStyle.Render("Switch with ←/→ or t • Cluster backup requires PostgreSQL")
|
hint := infoStyle.Render("Switch with <-/-> or t | Cluster backup requires PostgreSQL")
|
||||||
s += hint + "\n"
|
s += hint + "\n"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -326,7 +326,7 @@ func (m *MenuModel) View() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Footer
|
// Footer
|
||||||
footer := infoStyle.Render("\n⌨️ Press ↑/↓ to navigate • Enter to select • q to quit")
|
footer := infoStyle.Render("\n[KEYS] Press Up/Down to navigate | Enter to select | q to quit")
|
||||||
s += footer
|
s += footer
|
||||||
|
|
||||||
return s
|
return s
|
||||||
@@ -334,20 +334,20 @@ func (m *MenuModel) View() string {
|
|||||||
|
|
||||||
// handleSingleBackup opens database selector for single backup
|
// handleSingleBackup opens database selector for single backup
|
||||||
func (m *MenuModel) handleSingleBackup() (tea.Model, tea.Cmd) {
|
func (m *MenuModel) handleSingleBackup() (tea.Model, tea.Cmd) {
|
||||||
selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "🗄️ Single Database Backup", "single")
|
selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "[SELECT] Single Database Backup", "single")
|
||||||
return selector, selector.Init()
|
return selector, selector.Init()
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleSampleBackup opens database selector for sample backup
|
// handleSampleBackup opens database selector for sample backup
|
||||||
func (m *MenuModel) handleSampleBackup() (tea.Model, tea.Cmd) {
|
func (m *MenuModel) handleSampleBackup() (tea.Model, tea.Cmd) {
|
||||||
selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "📊 Sample Database Backup", "sample")
|
selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "[SELECT] Sample Database Backup", "sample")
|
||||||
return selector, selector.Init()
|
return selector, selector.Init()
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleClusterBackup shows confirmation and executes cluster backup
|
// handleClusterBackup shows confirmation and executes cluster backup
|
||||||
func (m *MenuModel) handleClusterBackup() (tea.Model, tea.Cmd) {
|
func (m *MenuModel) handleClusterBackup() (tea.Model, tea.Cmd) {
|
||||||
if !m.config.IsPostgreSQL() {
|
if !m.config.IsPostgreSQL() {
|
||||||
m.message = errorStyle.Render("❌ Cluster backup is available only for PostgreSQL targets")
|
m.message = errorStyle.Render("[FAIL] Cluster backup is available only for PostgreSQL targets")
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
// Skip confirmation in auto-confirm mode
|
// Skip confirmation in auto-confirm mode
|
||||||
@@ -356,7 +356,7 @@ func (m *MenuModel) handleClusterBackup() (tea.Model, tea.Cmd) {
|
|||||||
return executor, executor.Init()
|
return executor, executor.Init()
|
||||||
}
|
}
|
||||||
confirm := NewConfirmationModelWithAction(m.config, m.logger, m,
|
confirm := NewConfirmationModelWithAction(m.config, m.logger, m,
|
||||||
"🗄️ Cluster Backup",
|
"[CHECK] Cluster Backup",
|
||||||
"This will backup ALL databases in the cluster. Continue?",
|
"This will backup ALL databases in the cluster. Continue?",
|
||||||
func() (tea.Model, tea.Cmd) {
|
func() (tea.Model, tea.Cmd) {
|
||||||
executor := NewBackupExecution(m.config, m.logger, m, m.ctx, "cluster", "", 0)
|
executor := NewBackupExecution(m.config, m.logger, m, m.ctx, "cluster", "", 0)
|
||||||
@@ -399,7 +399,7 @@ func (m *MenuModel) handleRestoreSingle() (tea.Model, tea.Cmd) {
|
|||||||
// handleRestoreCluster opens archive browser for cluster restore
|
// handleRestoreCluster opens archive browser for cluster restore
|
||||||
func (m *MenuModel) handleRestoreCluster() (tea.Model, tea.Cmd) {
|
func (m *MenuModel) handleRestoreCluster() (tea.Model, tea.Cmd) {
|
||||||
if !m.config.IsPostgreSQL() {
|
if !m.config.IsPostgreSQL() {
|
||||||
m.message = errorStyle.Render("❌ Cluster restore is available only for PostgreSQL")
|
m.message = errorStyle.Render("[FAIL] Cluster restore is available only for PostgreSQL")
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
browser := NewArchiveBrowser(m.config, m.logger, m, m.ctx, "restore-cluster")
|
browser := NewArchiveBrowser(m.config, m.logger, m, m.ctx, "restore-cluster")
|
||||||
@@ -428,7 +428,7 @@ func (m *MenuModel) applyDatabaseSelection() {
|
|||||||
|
|
||||||
selection := m.dbTypes[m.dbTypeCursor]
|
selection := m.dbTypes[m.dbTypeCursor]
|
||||||
if err := m.config.SetDatabaseType(selection.value); err != nil {
|
if err := m.config.SetDatabaseType(selection.value); err != nil {
|
||||||
m.message = errorStyle.Render(fmt.Sprintf("❌ %v", err))
|
m.message = errorStyle.Render(fmt.Sprintf("[FAIL] %v", err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -437,7 +437,7 @@ func (m *MenuModel) applyDatabaseSelection() {
|
|||||||
m.config.Port = m.config.GetDefaultPort()
|
m.config.Port = m.config.GetDefaultPort()
|
||||||
}
|
}
|
||||||
|
|
||||||
m.message = successStyle.Render(fmt.Sprintf("🔀 Target database set to %s", m.config.DisplayDatabaseType()))
|
m.message = successStyle.Render(fmt.Sprintf("[SWITCH] Target database set to %s", m.config.DisplayDatabaseType()))
|
||||||
if m.logger != nil {
|
if m.logger != nil {
|
||||||
m.logger.Info("updated target database type", "type", m.config.DatabaseType, "port", m.config.Port)
|
m.logger.Info("updated target database type", "type", m.config.DatabaseType, "port", m.config.Port)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -49,14 +49,14 @@ func (m OperationsViewModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
func (m OperationsViewModel) View() string {
|
func (m OperationsViewModel) View() string {
|
||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
header := titleStyle.Render("📊 Active Operations")
|
header := titleStyle.Render("[STATS] Active Operations")
|
||||||
s.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
s.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
||||||
|
|
||||||
s.WriteString("Currently running operations:\n\n")
|
s.WriteString("Currently running operations:\n\n")
|
||||||
s.WriteString(infoStyle.Render("📭 No active operations"))
|
s.WriteString(infoStyle.Render("[NONE] No active operations"))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
s.WriteString("⌨️ Press any key to return to menu\n")
|
s.WriteString("[KEYS] Press any key to return to menu\n")
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user