Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3e41d88445 | |||
| 5fb88b14ba | |||
| cccee4294f | |||
| 9688143176 | |||
| e821e131b4 | |||
| 15a60d2e71 | |||
| 9c65821250 | |||
| 627061cdbb | |||
| e1a7c57e0f | |||
| 22915102d4 | |||
| 3653ced6da | |||
| 9743d571ce | |||
| c519f08ef2 | |||
| b99b05fedb |
@@ -63,7 +63,7 @@ jobs:
|
|||||||
name: Build & Release
|
name: Build & Release
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: [test, lint]
|
needs: [test, lint]
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
container:
|
container:
|
||||||
image: golang:1.24-bookworm
|
image: golang:1.24-bookworm
|
||||||
steps:
|
steps:
|
||||||
@@ -82,24 +82,27 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mkdir -p release
|
mkdir -p release
|
||||||
|
|
||||||
# Linux amd64
|
# Install cross-compilation tools for CGO
|
||||||
echo "Building linux/amd64..."
|
apt-get update && apt-get install -y -qq gcc-aarch64-linux-gnu
|
||||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o release/dbbackup-linux-amd64 .
|
|
||||||
|
|
||||||
# Linux arm64
|
# Linux amd64 (with CGO for SQLite)
|
||||||
echo "Building linux/arm64..."
|
echo "Building linux/amd64 (CGO enabled)..."
|
||||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -o release/dbbackup-linux-arm64 .
|
CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o release/dbbackup-linux-amd64 .
|
||||||
|
|
||||||
# Darwin amd64
|
# Linux arm64 (with CGO for SQLite)
|
||||||
echo "Building darwin/amd64..."
|
echo "Building linux/arm64 (CGO enabled)..."
|
||||||
|
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -o release/dbbackup-linux-arm64 .
|
||||||
|
|
||||||
|
# Darwin amd64 (no CGO - cross-compile limitation)
|
||||||
|
echo "Building darwin/amd64 (CGO disabled)..."
|
||||||
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags="-s -w" -o release/dbbackup-darwin-amd64 .
|
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags="-s -w" -o release/dbbackup-darwin-amd64 .
|
||||||
|
|
||||||
# Darwin arm64
|
# Darwin arm64 (no CGO - cross-compile limitation)
|
||||||
echo "Building darwin/arm64..."
|
echo "Building darwin/arm64 (CGO disabled)..."
|
||||||
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -ldflags="-s -w" -o release/dbbackup-darwin-arm64 .
|
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -ldflags="-s -w" -o release/dbbackup-darwin-arm64 .
|
||||||
|
|
||||||
# FreeBSD amd64
|
# FreeBSD amd64 (no CGO - cross-compile limitation)
|
||||||
echo "Building freebsd/amd64..."
|
echo "Building freebsd/amd64 (CGO disabled)..."
|
||||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build -ldflags="-s -w" -o release/dbbackup-freebsd-amd64 .
|
CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build -ldflags="-s -w" -o release/dbbackup-freebsd-amd64 .
|
||||||
|
|
||||||
echo "All builds complete:"
|
echo "All builds complete:"
|
||||||
|
|||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -34,3 +34,7 @@ coverage.html
|
|||||||
# Ignore temporary files
|
# Ignore temporary files
|
||||||
tmp/
|
tmp/
|
||||||
temp/
|
temp/
|
||||||
|
CRITICAL_BUGS_FIXED.md
|
||||||
|
LEGAL_DOCUMENTATION.md
|
||||||
|
LEGAL_*.md
|
||||||
|
legal/
|
||||||
|
|||||||
211
CHANGELOG.md
211
CHANGELOG.md
@@ -5,9 +5,216 @@ All notable changes to dbbackup will be documented in this file.
|
|||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
## [3.42.0] - 2026-01-07 "The Operator"
|
## [3.42.10] - 2026-01-08 "Code Quality"
|
||||||
|
|
||||||
### Added - 🐧 Systemd Integration & Prometheus Metrics
|
### Fixed - Code Quality Issues
|
||||||
|
- Removed deprecated `io/ioutil` usage (replaced with `os`)
|
||||||
|
- Fixed `os.DirEntry.ModTime()` → `file.Info().ModTime()`
|
||||||
|
- Removed unused fields and variables
|
||||||
|
- Fixed ineffective assignments in TUI code
|
||||||
|
- Fixed error strings (no capitalization, no trailing punctuation)
|
||||||
|
|
||||||
|
## [3.42.9] - 2026-01-08 "Diagnose Timeout Fix"
|
||||||
|
|
||||||
|
### Fixed - diagnose.go Timeout Bugs
|
||||||
|
|
||||||
|
**More short timeouts that caused large archive failures:**
|
||||||
|
|
||||||
|
- `diagnoseClusterArchive()`: tar listing 60s → **5 minutes**
|
||||||
|
- `verifyWithPgRestore()`: pg_restore --list 60s → **5 minutes**
|
||||||
|
- `DiagnoseClusterDumps()`: archive listing 120s → **10 minutes**
|
||||||
|
|
||||||
|
**Impact:** These timeouts caused "context deadline exceeded" errors when
|
||||||
|
diagnosing multi-GB backup archives, preventing TUI restore from even starting.
|
||||||
|
|
||||||
|
## [3.42.8] - 2026-01-08 "TUI Timeout Fix"
|
||||||
|
|
||||||
|
### Fixed - TUI Timeout Bugs Causing Backup/Restore Failures
|
||||||
|
|
||||||
|
**ROOT CAUSE of 2-3 month TUI backup/restore failures identified and fixed:**
|
||||||
|
|
||||||
|
#### Critical Timeout Fixes:
|
||||||
|
- **restore_preview.go**: Safety check timeout increased from 60s → **10 minutes**
|
||||||
|
- Large archives (>1GB) take 2+ minutes to diagnose
|
||||||
|
- Users saw "context deadline exceeded" before backup even started
|
||||||
|
- **dbselector.go**: Database listing timeout increased from 15s → **60 seconds**
|
||||||
|
- Busy PostgreSQL servers need more time to respond
|
||||||
|
- **status.go**: Status check timeout increased from 10s → **30 seconds**
|
||||||
|
- SSL negotiation and slow networks caused failures
|
||||||
|
|
||||||
|
#### Stability Improvements:
|
||||||
|
- **Panic recovery** added to parallel goroutines in:
|
||||||
|
- `backup/engine.go:BackupCluster()` - cluster backup workers
|
||||||
|
- `restore/engine.go:RestoreCluster()` - cluster restore workers
|
||||||
|
- Prevents single database panic from crashing entire operation
|
||||||
|
|
||||||
|
#### Bug Fix:
|
||||||
|
- **restore/engine.go**: Fixed variable shadowing `err` → `cmdErr` for exit code detection
|
||||||
|
|
||||||
|
## [3.42.7] - 2026-01-08 "Context Killer Complete"
|
||||||
|
|
||||||
|
### Fixed - Additional Deadlock Bugs in Restore & Engine
|
||||||
|
|
||||||
|
**All remaining cmd.Wait() deadlock bugs fixed across the codebase:**
|
||||||
|
|
||||||
|
#### internal/restore/engine.go:
|
||||||
|
- `executeRestoreWithDecompression()` - gunzip/pigz pipeline restore
|
||||||
|
- `extractArchive()` - tar extraction for cluster restore
|
||||||
|
- `restoreGlobals()` - pg_dumpall globals restore
|
||||||
|
|
||||||
|
#### internal/backup/engine.go:
|
||||||
|
- `createArchive()` - tar/pigz archive creation pipeline
|
||||||
|
|
||||||
|
#### internal/engine/mysqldump.go:
|
||||||
|
- `Backup()` - mysqldump backup operation
|
||||||
|
- `BackupToWriter()` - streaming mysqldump to writer
|
||||||
|
|
||||||
|
**All 6 functions now use proper channel-based context handling with Process.Kill().**
|
||||||
|
|
||||||
|
## [3.42.6] - 2026-01-08 "Deadlock Killer"
|
||||||
|
|
||||||
|
### Fixed - Backup Command Context Handling
|
||||||
|
|
||||||
|
**Critical Bug: pg_dump/mysqldump could hang forever on context cancellation**
|
||||||
|
|
||||||
|
The `executeCommand`, `executeCommandWithProgress`, `executeMySQLWithProgressAndCompression`,
|
||||||
|
and `executeMySQLWithCompression` functions had a race condition where:
|
||||||
|
|
||||||
|
1. A goroutine was spawned to read stderr
|
||||||
|
2. `cmd.Wait()` was called directly
|
||||||
|
3. If context was cancelled, the process was NOT killed
|
||||||
|
4. The goroutine could hang forever waiting for stderr
|
||||||
|
|
||||||
|
**Fix**: All backup execution functions now use proper channel-based context handling:
|
||||||
|
```go
|
||||||
|
// Wait for command with context handling
|
||||||
|
cmdDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Context cancelled - kill process
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone
|
||||||
|
cmdErr = ctx.Err()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Affected Functions:**
|
||||||
|
- `executeCommand()` - pg_dump for cluster backup
|
||||||
|
- `executeCommandWithProgress()` - pg_dump for single backup with progress
|
||||||
|
- `executeMySQLWithProgressAndCompression()` - mysqldump pipeline
|
||||||
|
- `executeMySQLWithCompression()` - mysqldump pipeline
|
||||||
|
|
||||||
|
**This fixes:** Backup operations hanging indefinitely when cancelled or timing out.
|
||||||
|
|
||||||
|
## [3.42.5] - 2026-01-08 "False Positive Fix"
|
||||||
|
|
||||||
|
### Fixed - Encryption Detection Bug
|
||||||
|
|
||||||
|
**IsBackupEncrypted False Positive:**
|
||||||
|
- **BUG FIX**: `IsBackupEncrypted()` returned `true` for ALL files, blocking normal restores
|
||||||
|
- Root cause: Fallback logic checked if first 12 bytes (nonce size) could be read - always true
|
||||||
|
- Fix: Now properly detects known unencrypted formats by magic bytes:
|
||||||
|
- Gzip: `1f 8b`
|
||||||
|
- PostgreSQL custom: `PGDMP`
|
||||||
|
- Plain SQL: starts with `--`, `SET`, `CREATE`
|
||||||
|
- Returns `false` if no metadata present and format is recognized as unencrypted
|
||||||
|
- Affected file: `internal/backup/encryption.go`
|
||||||
|
|
||||||
|
## [3.42.4] - 2026-01-08 "The Long Haul"
|
||||||
|
|
||||||
|
### Fixed - Critical Restore Timeout Bug
|
||||||
|
|
||||||
|
**Removed Arbitrary Timeouts from Backup/Restore Operations:**
|
||||||
|
- **CRITICAL FIX**: Removed 4-hour timeout that was killing large database restores
|
||||||
|
- PostgreSQL cluster restores of 69GB+ databases no longer fail with "context deadline exceeded"
|
||||||
|
- All backup/restore operations now use `context.WithCancel` instead of `context.WithTimeout`
|
||||||
|
- Operations run until completion or manual cancellation (Ctrl+C)
|
||||||
|
|
||||||
|
**Affected Files:**
|
||||||
|
- `internal/tui/restore_exec.go`: Changed from 4-hour timeout to context.WithCancel
|
||||||
|
- `internal/tui/backup_exec.go`: Changed from 4-hour timeout to context.WithCancel
|
||||||
|
- `internal/backup/engine.go`: Removed per-database timeout in cluster backup
|
||||||
|
- `cmd/restore.go`: CLI restore commands use context.WithCancel
|
||||||
|
|
||||||
|
**exec.Command Context Audit:**
|
||||||
|
- Fixed `exec.Command` without Context in `internal/restore/engine.go:730`
|
||||||
|
- Added proper context handling to all external command calls
|
||||||
|
- Added timeouts only for quick diagnostic/version checks (not restore path):
|
||||||
|
- `restore/version_check.go`: 30s timeout for pg_restore --version check only
|
||||||
|
- `restore/error_report.go`: 10s timeout for tool version detection
|
||||||
|
- `restore/diagnose.go`: 60s timeout for diagnostic functions
|
||||||
|
- `pitr/binlog.go`: 10s timeout for mysqlbinlog --version check
|
||||||
|
- `cleanup/processes.go`: 5s timeout for process listing
|
||||||
|
- `auth/helper.go`: 30s timeout for auth helper commands
|
||||||
|
|
||||||
|
**Verification:**
|
||||||
|
- 54 total `exec.CommandContext` calls verified in backup/restore/pitr path
|
||||||
|
- 0 `exec.Command` without Context in critical restore path
|
||||||
|
- All 14 PostgreSQL exec calls use CommandContext (pg_dump, pg_restore, psql)
|
||||||
|
- All 15 MySQL/MariaDB exec calls use CommandContext (mysqldump, mysql, mysqlbinlog)
|
||||||
|
- All 14 test packages pass
|
||||||
|
|
||||||
|
### Technical Details
|
||||||
|
- Large Object (BLOB/BYTEA) restores are particularly affected by timeouts
|
||||||
|
- 69GB database with large objects can take 5+ hours to restore
|
||||||
|
- Previous 4-hour hard timeout was causing consistent failures
|
||||||
|
- Now: No timeout - runs until complete or user cancels
|
||||||
|
|
||||||
|
## [3.42.1] - 2026-01-07 "Resistance is Futile"
|
||||||
|
|
||||||
|
### Added - Content-Defined Chunking Deduplication
|
||||||
|
|
||||||
|
**Deduplication Engine:**
|
||||||
|
- New `dbbackup dedup` command family for space-efficient backups
|
||||||
|
- Gear hash content-defined chunking (CDC) with 92%+ overlap on shifted data
|
||||||
|
- SHA-256 content-addressed storage - chunks stored by hash
|
||||||
|
- AES-256-GCM per-chunk encryption (optional, via `--encrypt`)
|
||||||
|
- Gzip compression enabled by default
|
||||||
|
- SQLite index for fast chunk lookups
|
||||||
|
- JSON manifests track chunks per backup with full verification
|
||||||
|
|
||||||
|
**Dedup Commands:**
|
||||||
|
```bash
|
||||||
|
dbbackup dedup backup <file> # Create deduplicated backup
|
||||||
|
dbbackup dedup backup <file> --encrypt # With encryption
|
||||||
|
dbbackup dedup restore <id> <output> # Restore from manifest
|
||||||
|
dbbackup dedup list # List all backups
|
||||||
|
dbbackup dedup stats # Show deduplication statistics
|
||||||
|
dbbackup dedup delete <id> # Delete a backup manifest
|
||||||
|
dbbackup dedup gc # Garbage collect unreferenced chunks
|
||||||
|
```
|
||||||
|
|
||||||
|
**Storage Structure:**
|
||||||
|
```
|
||||||
|
<backup-dir>/dedup/
|
||||||
|
chunks/ # Content-addressed chunk files (sharded by hash prefix)
|
||||||
|
manifests/ # JSON manifest per backup
|
||||||
|
chunks.db # SQLite index for fast lookups
|
||||||
|
```
|
||||||
|
|
||||||
|
**Test Results:**
|
||||||
|
- First 5MB backup: 448 chunks, 5MB stored
|
||||||
|
- Modified 5MB file: 448 chunks, only 1 NEW chunk (1.6KB), 100% dedup ratio
|
||||||
|
- Restore with SHA-256 verification
|
||||||
|
|
||||||
|
### Added - Documentation Updates
|
||||||
|
- Prometheus alerting rules added to SYSTEMD.md
|
||||||
|
- Catalog sync instructions for existing backups
|
||||||
|
|
||||||
|
## [3.41.1] - 2026-01-07
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Enabled CGO for Linux builds (required for SQLite catalog)
|
||||||
|
|
||||||
|
## [3.41.0] - 2026-01-07 "The Operator"
|
||||||
|
|
||||||
|
### Added - Systemd Integration & Prometheus Metrics
|
||||||
|
|
||||||
**Embedded Systemd Installer:**
|
**Embedded Systemd Installer:**
|
||||||
- New `dbbackup install` command installs as systemd service/timer
|
- New `dbbackup install` command installs as systemd service/timer
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ Be respectful, constructive, and professional in all interactions. We're buildin
|
|||||||
|
|
||||||
**Bug Report Template:**
|
**Bug Report Template:**
|
||||||
```
|
```
|
||||||
**Version:** dbbackup v3.40.0
|
**Version:** dbbackup v3.42.1
|
||||||
**OS:** Linux/macOS/BSD
|
**OS:** Linux/macOS/BSD
|
||||||
**Database:** PostgreSQL 14 / MySQL 8.0 / MariaDB 10.6
|
**Database:** PostgreSQL 14 / MySQL 8.0 / MariaDB 10.6
|
||||||
**Command:** The exact command that failed
|
**Command:** The exact command that failed
|
||||||
|
|||||||
295
EMOTICON_REMOVAL_PLAN.md
Normal file
295
EMOTICON_REMOVAL_PLAN.md
Normal file
@@ -0,0 +1,295 @@
|
|||||||
|
# Emoticon Removal Plan for Python Code
|
||||||
|
|
||||||
|
## ⚠️ CRITICAL: Code Must Remain Functional After Removal
|
||||||
|
|
||||||
|
This document outlines a **safe, systematic approach** to removing emoticons from Python code without breaking functionality.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1. Identification Phase
|
||||||
|
|
||||||
|
### 1.1 Where Emoticons CAN Safely Exist (Safe to Remove)
|
||||||
|
| Location | Risk Level | Action |
|
||||||
|
|----------|------------|--------|
|
||||||
|
| Comments (`# 🎉 Success!`) | ✅ SAFE | Remove or replace with text |
|
||||||
|
| Docstrings (`"""📌 Note:..."""`) | ✅ SAFE | Remove or replace with text |
|
||||||
|
| Print statements for decoration (`print("✅ Done!")`) | ⚠️ LOW | Replace with ASCII or text |
|
||||||
|
| Logging messages (`logger.info("🔥 Starting...")`) | ⚠️ LOW | Replace with text equivalent |
|
||||||
|
|
||||||
|
### 1.2 Where Emoticons are DANGEROUS to Remove
|
||||||
|
| Location | Risk Level | Action |
|
||||||
|
|----------|------------|--------|
|
||||||
|
| String literals used in logic | 🚨 HIGH | **DO NOT REMOVE** without analysis |
|
||||||
|
| Dictionary keys (`{"🔑": value}`) | 🚨 CRITICAL | **NEVER REMOVE** - breaks code |
|
||||||
|
| Regex patterns | 🚨 CRITICAL | **NEVER REMOVE** - breaks matching |
|
||||||
|
| String comparisons (`if x == "✅"`) | 🚨 CRITICAL | Requires refactoring, not just removal |
|
||||||
|
| Database/API payloads | 🚨 CRITICAL | May break external systems |
|
||||||
|
| File content markers | 🚨 HIGH | May break parsing logic |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. Pre-Removal Checklist
|
||||||
|
|
||||||
|
### 2.1 Before ANY Changes
|
||||||
|
- [ ] **Full backup** of the codebase
|
||||||
|
- [ ] **Run all tests** and record baseline results
|
||||||
|
- [ ] **Document all emoticon locations** with grep/search
|
||||||
|
- [ ] **Identify emoticon usage patterns** (decorative vs. functional)
|
||||||
|
|
||||||
|
### 2.2 Discovery Commands
|
||||||
|
```bash
|
||||||
|
# Find all files with emoticons (Unicode range for common emojis)
|
||||||
|
grep -rn --include="*.py" -P '[\x{1F300}-\x{1F9FF}]' .
|
||||||
|
|
||||||
|
# Find emoticons in strings
|
||||||
|
grep -rn --include="*.py" -E '["'"'"'][^"'"'"']*[\x{1F300}-\x{1F9FF}]' .
|
||||||
|
|
||||||
|
# List unique emoticons used
|
||||||
|
grep -oP '[\x{1F300}-\x{1F9FF}]' *.py | sort -u
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. Replacement Strategy
|
||||||
|
|
||||||
|
### 3.1 Semantic Replacement Table
|
||||||
|
| Emoticon | Text Replacement | Context |
|
||||||
|
|----------|------------------|---------|
|
||||||
|
| ✅ | `[OK]` or `[SUCCESS]` | Status indicators |
|
||||||
|
| ❌ | `[FAIL]` or `[ERROR]` | Error indicators |
|
||||||
|
| ⚠️ | `[WARNING]` | Warning messages |
|
||||||
|
| 🔥 | `[HOT]` or `` (remove) | Decorative |
|
||||||
|
| 🎉 | `[DONE]` or `` (remove) | Celebration/completion |
|
||||||
|
| 📌 | `[NOTE]` | Notes/pinned items |
|
||||||
|
| 🚀 | `[START]` or `` (remove) | Launch/start indicators |
|
||||||
|
| 💾 | `[SAVE]` | Save operations |
|
||||||
|
| 🔑 | `[KEY]` | Key/authentication |
|
||||||
|
| 📁 | `[FILE]` | File operations |
|
||||||
|
| 🔍 | `[SEARCH]` | Search operations |
|
||||||
|
| ⏳ | `[WAIT]` or `[LOADING]` | Progress indicators |
|
||||||
|
| 🛑 | `[STOP]` | Stop/halt indicators |
|
||||||
|
| ℹ️ | `[INFO]` | Information |
|
||||||
|
| 🐛 | `[BUG]` or `[DEBUG]` | Debug messages |
|
||||||
|
|
||||||
|
### 3.2 Context-Aware Replacement Rules
|
||||||
|
|
||||||
|
```
|
||||||
|
RULE 1: Comments
|
||||||
|
- Remove emoticon entirely OR replace with text
|
||||||
|
- Example: `# 🎉 Feature complete` → `# Feature complete`
|
||||||
|
|
||||||
|
RULE 2: User-facing strings (print/logging)
|
||||||
|
- Replace with semantic text equivalent
|
||||||
|
- Example: `print("✅ Backup complete")` → `print("[OK] Backup complete")`
|
||||||
|
|
||||||
|
RULE 3: Functional strings (DANGER ZONE)
|
||||||
|
- DO NOT auto-replace
|
||||||
|
- Requires manual code refactoring
|
||||||
|
- Example: `status = "✅"` → Refactor to `status = "success"` AND update all comparisons
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. Safe Removal Process
|
||||||
|
|
||||||
|
### Step 1: Audit
|
||||||
|
```python
|
||||||
|
# Python script to audit emoticon usage
|
||||||
|
import re
|
||||||
|
import ast
|
||||||
|
|
||||||
|
EMOJI_PATTERN = re.compile(
|
||||||
|
"["
|
||||||
|
"\U0001F300-\U0001F9FF" # Symbols & Pictographs
|
||||||
|
"\U00002600-\U000026FF" # Misc symbols
|
||||||
|
"\U00002700-\U000027BF" # Dingbats
|
||||||
|
"\U0001F600-\U0001F64F" # Emoticons
|
||||||
|
"]+"
|
||||||
|
)
|
||||||
|
|
||||||
|
def audit_file(filepath):
|
||||||
|
with open(filepath, 'r', encoding='utf-8') as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
# Parse AST to understand context
|
||||||
|
tree = ast.parse(content)
|
||||||
|
|
||||||
|
findings = []
|
||||||
|
for lineno, line in enumerate(content.split('\n'), 1):
|
||||||
|
matches = EMOJI_PATTERN.findall(line)
|
||||||
|
if matches:
|
||||||
|
# Determine context (comment, string, etc.)
|
||||||
|
context = classify_context(line, matches)
|
||||||
|
findings.append({
|
||||||
|
'line': lineno,
|
||||||
|
'content': line.strip(),
|
||||||
|
'emojis': matches,
|
||||||
|
'context': context,
|
||||||
|
'risk': assess_risk(context)
|
||||||
|
})
|
||||||
|
return findings
|
||||||
|
|
||||||
|
def classify_context(line, matches):
|
||||||
|
stripped = line.strip()
|
||||||
|
if stripped.startswith('#'):
|
||||||
|
return 'COMMENT'
|
||||||
|
if 'print(' in line or 'logging.' in line or 'logger.' in line:
|
||||||
|
return 'OUTPUT'
|
||||||
|
if '==' in line or '!=' in line:
|
||||||
|
return 'COMPARISON'
|
||||||
|
if re.search(r'["\'][^"\']*$', line.split('#')[0]):
|
||||||
|
return 'STRING_LITERAL'
|
||||||
|
return 'UNKNOWN'
|
||||||
|
|
||||||
|
def assess_risk(context):
|
||||||
|
risk_map = {
|
||||||
|
'COMMENT': 'LOW',
|
||||||
|
'OUTPUT': 'LOW',
|
||||||
|
'COMPARISON': 'CRITICAL',
|
||||||
|
'STRING_LITERAL': 'HIGH',
|
||||||
|
'UNKNOWN': 'HIGH'
|
||||||
|
}
|
||||||
|
return risk_map.get(context, 'HIGH')
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Generate Change Plan
|
||||||
|
```python
|
||||||
|
def generate_change_plan(findings):
|
||||||
|
plan = {'safe': [], 'review_required': [], 'do_not_touch': []}
|
||||||
|
|
||||||
|
for finding in findings:
|
||||||
|
if finding['risk'] == 'LOW':
|
||||||
|
plan['safe'].append(finding)
|
||||||
|
elif finding['risk'] == 'HIGH':
|
||||||
|
plan['review_required'].append(finding)
|
||||||
|
else: # CRITICAL
|
||||||
|
plan['do_not_touch'].append(finding)
|
||||||
|
|
||||||
|
return plan
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Apply Changes (SAFE items only)
|
||||||
|
```python
|
||||||
|
def apply_safe_replacements(filepath, replacements):
|
||||||
|
# Create backup first!
|
||||||
|
import shutil
|
||||||
|
shutil.copy(filepath, filepath + '.backup')
|
||||||
|
|
||||||
|
with open(filepath, 'r', encoding='utf-8') as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
for old, new in replacements:
|
||||||
|
content = content.replace(old, new)
|
||||||
|
|
||||||
|
with open(filepath, 'w', encoding='utf-8') as f:
|
||||||
|
f.write(content)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Validate
|
||||||
|
```bash
|
||||||
|
# After each file change:
|
||||||
|
python -m py_compile <modified_file.py> # Syntax check
|
||||||
|
pytest <related_tests> # Run tests
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. Validation Checklist
|
||||||
|
|
||||||
|
### After EACH File Modification
|
||||||
|
- [ ] File compiles without syntax errors (`python -m py_compile file.py`)
|
||||||
|
- [ ] All imports still work
|
||||||
|
- [ ] Related unit tests pass
|
||||||
|
- [ ] Integration tests pass
|
||||||
|
- [ ] Manual smoke test if applicable
|
||||||
|
|
||||||
|
### After ALL Modifications
|
||||||
|
- [ ] Full test suite passes
|
||||||
|
- [ ] Application starts correctly
|
||||||
|
- [ ] Key functionality verified manually
|
||||||
|
- [ ] No new warnings in logs
|
||||||
|
- [ ] Compare output with baseline
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6. Rollback Plan
|
||||||
|
|
||||||
|
### If Something Breaks
|
||||||
|
1. **Immediate**: Restore from `.backup` files
|
||||||
|
2. **Git**: `git checkout -- <file>` or `git stash pop`
|
||||||
|
3. **Full rollback**: Restore from pre-change backup
|
||||||
|
|
||||||
|
### Keep Until Verified
|
||||||
|
```bash
|
||||||
|
# Backup storage structure
|
||||||
|
backups/
|
||||||
|
├── pre_emoticon_removal/
|
||||||
|
│ ├── timestamp.tar.gz
|
||||||
|
│ └── git_commit_hash.txt
|
||||||
|
└── individual_files/
|
||||||
|
├── file1.py.backup
|
||||||
|
└── file2.py.backup
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7. Implementation Order
|
||||||
|
|
||||||
|
1. **Phase 1**: Comments only (LOWEST risk)
|
||||||
|
2. **Phase 2**: Docstrings (LOW risk)
|
||||||
|
3. **Phase 3**: Print/logging statements (LOW-MEDIUM risk)
|
||||||
|
4. **Phase 4**: Manual review items (HIGH risk) - one by one
|
||||||
|
5. **Phase 5**: NEVER touch CRITICAL items without full refactoring
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8. Example Workflow
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Create full backup
|
||||||
|
git stash && git checkout -b emoticon-removal
|
||||||
|
|
||||||
|
# 2. Run audit script
|
||||||
|
python emoticon_audit.py > audit_report.json
|
||||||
|
|
||||||
|
# 3. Review audit report
|
||||||
|
cat audit_report.json | jq '.do_not_touch' # Check critical items
|
||||||
|
|
||||||
|
# 4. Apply safe changes only
|
||||||
|
python apply_safe_changes.py --dry-run # Preview first!
|
||||||
|
python apply_safe_changes.py # Apply
|
||||||
|
|
||||||
|
# 5. Validate after each change
|
||||||
|
python -m pytest tests/
|
||||||
|
|
||||||
|
# 6. Commit incrementally
|
||||||
|
git add -p # Review each change
|
||||||
|
git commit -m "Remove emoticons from comments in module X"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 9. DO NOT DO
|
||||||
|
|
||||||
|
❌ **Never** use global find-replace on emoticons
|
||||||
|
❌ **Never** remove emoticons from string comparisons without refactoring
|
||||||
|
❌ **Never** change multiple files without testing between changes
|
||||||
|
❌ **Never** assume an emoticon is decorative - verify context
|
||||||
|
❌ **Never** proceed if tests fail after a change
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 10. Sign-Off Requirements
|
||||||
|
|
||||||
|
Before merging emoticon removal changes:
|
||||||
|
- [ ] All tests pass (100%)
|
||||||
|
- [ ] Code review by second developer
|
||||||
|
- [ ] Manual testing of affected features
|
||||||
|
- [ ] Documented all CRITICAL items left unchanged (with justification)
|
||||||
|
- [ ] Backup verified and accessible
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Author**: Generated Plan
|
||||||
|
**Date**: 2026-01-07
|
||||||
|
**Status**: PLAN ONLY - No code changes made
|
||||||
@@ -56,7 +56,7 @@ Download from [releases](https://git.uuxo.net/UUXO/dbbackup/releases):
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Linux x86_64
|
# Linux x86_64
|
||||||
wget https://git.uuxo.net/UUXO/dbbackup/releases/download/v3.40.0/dbbackup-linux-amd64
|
wget https://git.uuxo.net/UUXO/dbbackup/releases/download/v3.42.1/dbbackup-linux-amd64
|
||||||
chmod +x dbbackup-linux-amd64
|
chmod +x dbbackup-linux-amd64
|
||||||
sudo mv dbbackup-linux-amd64 /usr/local/bin/dbbackup
|
sudo mv dbbackup-linux-amd64 /usr/local/bin/dbbackup
|
||||||
```
|
```
|
||||||
@@ -143,7 +143,7 @@ Backup Execution
|
|||||||
|
|
||||||
Backup created: cluster_20251128_092928.tar.gz
|
Backup created: cluster_20251128_092928.tar.gz
|
||||||
Size: 22.5 GB (compressed)
|
Size: 22.5 GB (compressed)
|
||||||
Location: /u01/dba/dumps/
|
Location: /var/backups/postgres/
|
||||||
Databases: 7
|
Databases: 7
|
||||||
Checksum: SHA-256 verified
|
Checksum: SHA-256 verified
|
||||||
```
|
```
|
||||||
|
|||||||
108
RELEASE_NOTES.md
Normal file
108
RELEASE_NOTES.md
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
# v3.42.1 Release Notes
|
||||||
|
|
||||||
|
## What's New in v3.42.1
|
||||||
|
|
||||||
|
### Deduplication - Resistance is Futile
|
||||||
|
|
||||||
|
Content-defined chunking deduplication for space-efficient backups. Like restic/borgbackup but with **native database dump support**.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# First backup: 5MB stored
|
||||||
|
dbbackup dedup backup mydb.dump
|
||||||
|
|
||||||
|
# Second backup (modified): only 1.6KB new data stored!
|
||||||
|
# 100% deduplication ratio
|
||||||
|
dbbackup dedup backup mydb_modified.dump
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Features
|
||||||
|
- **Gear Hash CDC** - Content-defined chunking with 92%+ overlap on shifted data
|
||||||
|
- **SHA-256 Content-Addressed** - Chunks stored by hash, automatic deduplication
|
||||||
|
- **AES-256-GCM Encryption** - Optional per-chunk encryption
|
||||||
|
- **Gzip Compression** - Optional compression (enabled by default)
|
||||||
|
- **SQLite Index** - Fast chunk lookups and statistics
|
||||||
|
|
||||||
|
#### Commands
|
||||||
|
```bash
|
||||||
|
dbbackup dedup backup <file> # Create deduplicated backup
|
||||||
|
dbbackup dedup backup <file> --encrypt # With AES-256-GCM encryption
|
||||||
|
dbbackup dedup restore <id> <output> # Restore from manifest
|
||||||
|
dbbackup dedup list # List all backups
|
||||||
|
dbbackup dedup stats # Show deduplication statistics
|
||||||
|
dbbackup dedup delete <id> # Delete a backup
|
||||||
|
dbbackup dedup gc # Garbage collect unreferenced chunks
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Storage Structure
|
||||||
|
```
|
||||||
|
<backup-dir>/dedup/
|
||||||
|
chunks/ # Content-addressed chunk files
|
||||||
|
ab/cdef1234... # Sharded by first 2 chars of hash
|
||||||
|
manifests/ # JSON manifest per backup
|
||||||
|
chunks.db # SQLite index
|
||||||
|
```
|
||||||
|
|
||||||
|
### Also Included (from v3.41.x)
|
||||||
|
- **Systemd Integration** - One-command install with `dbbackup install`
|
||||||
|
- **Prometheus Metrics** - HTTP exporter on port 9399
|
||||||
|
- **Backup Catalog** - SQLite-based tracking of all backup operations
|
||||||
|
- **Prometheus Alerting Rules** - Added to SYSTEMD.md documentation
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
#### Quick Install (Recommended)
|
||||||
|
```bash
|
||||||
|
# Download for your platform
|
||||||
|
curl -LO https://git.uuxo.net/UUXO/dbbackup/releases/download/v3.42.1/dbbackup-linux-amd64
|
||||||
|
|
||||||
|
# Install with systemd service
|
||||||
|
chmod +x dbbackup-linux-amd64
|
||||||
|
sudo ./dbbackup-linux-amd64 install --config /path/to/config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Available Binaries
|
||||||
|
| Platform | Architecture | Binary |
|
||||||
|
|----------|--------------|--------|
|
||||||
|
| Linux | amd64 | `dbbackup-linux-amd64` |
|
||||||
|
| Linux | arm64 | `dbbackup-linux-arm64` |
|
||||||
|
| macOS | Intel | `dbbackup-darwin-amd64` |
|
||||||
|
| macOS | Apple Silicon | `dbbackup-darwin-arm64` |
|
||||||
|
| FreeBSD | amd64 | `dbbackup-freebsd-amd64` |
|
||||||
|
|
||||||
|
### Systemd Commands
|
||||||
|
```bash
|
||||||
|
dbbackup install --config config.yaml # Install service + timer
|
||||||
|
dbbackup install --status # Check service status
|
||||||
|
dbbackup install --uninstall # Remove services
|
||||||
|
```
|
||||||
|
|
||||||
|
### Prometheus Metrics
|
||||||
|
Available at `http://localhost:9399/metrics`:
|
||||||
|
|
||||||
|
| Metric | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| `dbbackup_last_backup_timestamp` | Unix timestamp of last backup |
|
||||||
|
| `dbbackup_last_backup_success` | 1 if successful, 0 if failed |
|
||||||
|
| `dbbackup_last_backup_duration_seconds` | Duration of last backup |
|
||||||
|
| `dbbackup_last_backup_size_bytes` | Size of last backup |
|
||||||
|
| `dbbackup_backup_total` | Total number of backups |
|
||||||
|
| `dbbackup_backup_errors_total` | Total number of failed backups |
|
||||||
|
|
||||||
|
### Security Features
|
||||||
|
- Hardened systemd service with `ProtectSystem=strict`
|
||||||
|
- `NoNewPrivileges=true` prevents privilege escalation
|
||||||
|
- Dedicated `dbbackup` system user (optional)
|
||||||
|
- Credential files with restricted permissions
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
- [SYSTEMD.md](SYSTEMD.md) - Complete systemd installation guide
|
||||||
|
- [README.md](README.md) - Full documentation
|
||||||
|
- [CHANGELOG.md](CHANGELOG.md) - Version history
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
- Fixed SQLite time parsing in dedup stats
|
||||||
|
- Fixed function name collision in cmd package
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Full Changelog**: https://git.uuxo.net/UUXO/dbbackup/compare/v3.41.1...v3.42.1
|
||||||
87
SYSTEMD.md
87
SYSTEMD.md
@@ -481,6 +481,93 @@ sudo ufw status
|
|||||||
sudo iptables -L -n | grep 9399
|
sudo iptables -L -n | grep 9399
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Prometheus Alerting Rules
|
||||||
|
|
||||||
|
Add these alert rules to your Prometheus configuration for backup monitoring:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# /etc/prometheus/rules/dbbackup.yml
|
||||||
|
groups:
|
||||||
|
- name: dbbackup
|
||||||
|
rules:
|
||||||
|
# Alert if no successful backup in 24 hours
|
||||||
|
- alert: DBBackupMissing
|
||||||
|
expr: time() - dbbackup_last_success_timestamp > 86400
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "No backup in 24 hours on {{ $labels.instance }}"
|
||||||
|
description: "Database {{ $labels.database }} has not had a successful backup in over 24 hours."
|
||||||
|
|
||||||
|
# Alert if backup verification failed
|
||||||
|
- alert: DBBackupVerificationFailed
|
||||||
|
expr: dbbackup_backup_verified == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Backup verification failed on {{ $labels.instance }}"
|
||||||
|
description: "Last backup for {{ $labels.database }} failed verification check."
|
||||||
|
|
||||||
|
# Alert if RPO exceeded (48 hours)
|
||||||
|
- alert: DBBackupRPOExceeded
|
||||||
|
expr: dbbackup_rpo_seconds > 172800
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "RPO exceeded on {{ $labels.instance }}"
|
||||||
|
description: "Recovery Point Objective exceeded 48 hours for {{ $labels.database }}."
|
||||||
|
|
||||||
|
# Alert if exporter is down
|
||||||
|
- alert: DBBackupExporterDown
|
||||||
|
expr: up{job="dbbackup"} == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "DBBackup exporter down on {{ $labels.instance }}"
|
||||||
|
description: "Cannot scrape metrics from dbbackup-exporter."
|
||||||
|
|
||||||
|
# Alert if backup size dropped significantly (possible truncation)
|
||||||
|
- alert: DBBackupSizeAnomaly
|
||||||
|
expr: dbbackup_last_backup_size_bytes < (dbbackup_last_backup_size_bytes offset 1d) * 0.5
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "Backup size anomaly on {{ $labels.instance }}"
|
||||||
|
description: "Backup size for {{ $labels.database }} dropped by more than 50%."
|
||||||
|
```
|
||||||
|
|
||||||
|
### Loading Alert Rules
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test rules syntax
|
||||||
|
promtool check rules /etc/prometheus/rules/dbbackup.yml
|
||||||
|
|
||||||
|
# Reload Prometheus
|
||||||
|
sudo systemctl reload prometheus
|
||||||
|
# or via API:
|
||||||
|
curl -X POST http://localhost:9090/-/reload
|
||||||
|
```
|
||||||
|
|
||||||
|
## Catalog Sync for Existing Backups
|
||||||
|
|
||||||
|
If you have existing backups created before installing v3.41+, sync them to the catalog:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Sync existing backups to catalog
|
||||||
|
dbbackup catalog sync /path/to/backup/directory --allow-root
|
||||||
|
|
||||||
|
# Verify catalog contents
|
||||||
|
dbbackup catalog list --allow-root
|
||||||
|
|
||||||
|
# Show statistics
|
||||||
|
dbbackup catalog stats --allow-root
|
||||||
|
```
|
||||||
|
|
||||||
## Uninstallation
|
## Uninstallation
|
||||||
|
|
||||||
### Using Installer
|
### Using Installer
|
||||||
|
|||||||
@@ -1,22 +1,11 @@
|
|||||||
# DB Backup Tool - Pre-compiled Binaries
|
# DB Backup Tool - Pre-compiled Binaries
|
||||||
|
|
||||||
## Download
|
This directory contains pre-compiled binaries for the DB Backup Tool across multiple platforms and architectures.
|
||||||
|
|
||||||
**Binaries are distributed via GitHub Releases:**
|
|
||||||
|
|
||||||
📦 **https://github.com/PlusOne/dbbackup/releases**
|
|
||||||
|
|
||||||
Or build from source:
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/PlusOne/dbbackup.git
|
|
||||||
cd dbbackup
|
|
||||||
./build_all.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
## Build Information
|
## Build Information
|
||||||
- **Version**: 3.40.0
|
- **Version**: 3.42.1
|
||||||
- **Build Time**: 2026-01-07_10:55:47_UTC
|
- **Build Time**: 2026-01-08_05:03:53_UTC
|
||||||
- **Git Commit**: 495ee31
|
- **Git Commit**: 9c65821
|
||||||
|
|
||||||
## Recent Updates (v1.1.0)
|
## Recent Updates (v1.1.0)
|
||||||
- ✅ Fixed TUI progress display with line-by-line output
|
- ✅ Fixed TUI progress display with line-by-line output
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ echo "🔧 Using Go version: $GO_VERSION"
|
|||||||
|
|
||||||
# Configuration
|
# Configuration
|
||||||
APP_NAME="dbbackup"
|
APP_NAME="dbbackup"
|
||||||
VERSION="3.40.0"
|
VERSION=$(grep 'version.*=' main.go | head -1 | sed 's/.*"\(.*\)".*/\1/')
|
||||||
BUILD_TIME=$(date -u '+%Y-%m-%d_%H:%M:%S_UTC')
|
BUILD_TIME=$(date -u '+%Y-%m-%d_%H:%M:%S_UTC')
|
||||||
GIT_COMMIT=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
|
GIT_COMMIT=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
|
||||||
BIN_DIR="bin"
|
BIN_DIR="bin"
|
||||||
|
|||||||
116
cmd/catalog.go
116
cmd/catalog.go
@@ -252,8 +252,8 @@ func runCatalogSync(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
defer cat.Close()
|
defer cat.Close()
|
||||||
|
|
||||||
fmt.Printf("📁 Syncing backups from: %s\n", absDir)
|
fmt.Printf("[DIR] Syncing backups from: %s\n", absDir)
|
||||||
fmt.Printf("📊 Catalog database: %s\n\n", catalogDBPath)
|
fmt.Printf("[STATS] Catalog database: %s\n\n", catalogDBPath)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
result, err := cat.SyncFromDirectory(ctx, absDir)
|
result, err := cat.SyncFromDirectory(ctx, absDir)
|
||||||
@@ -265,17 +265,17 @@ func runCatalogSync(cmd *cobra.Command, args []string) error {
|
|||||||
cat.SetLastSync(ctx)
|
cat.SetLastSync(ctx)
|
||||||
|
|
||||||
// Show results
|
// Show results
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
fmt.Printf(" Sync Results\n")
|
fmt.Printf(" Sync Results\n")
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
fmt.Printf(" ✅ Added: %d\n", result.Added)
|
fmt.Printf(" [OK] Added: %d\n", result.Added)
|
||||||
fmt.Printf(" 🔄 Updated: %d\n", result.Updated)
|
fmt.Printf(" [SYNC] Updated: %d\n", result.Updated)
|
||||||
fmt.Printf(" 🗑️ Removed: %d\n", result.Removed)
|
fmt.Printf(" [DEL] Removed: %d\n", result.Removed)
|
||||||
if result.Errors > 0 {
|
if result.Errors > 0 {
|
||||||
fmt.Printf(" ❌ Errors: %d\n", result.Errors)
|
fmt.Printf(" [FAIL] Errors: %d\n", result.Errors)
|
||||||
}
|
}
|
||||||
fmt.Printf(" ⏱️ Duration: %.2fs\n", result.Duration)
|
fmt.Printf(" [TIME] Duration: %.2fs\n", result.Duration)
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
|
|
||||||
// Show details if verbose
|
// Show details if verbose
|
||||||
if catalogVerbose && len(result.Details) > 0 {
|
if catalogVerbose && len(result.Details) > 0 {
|
||||||
@@ -323,7 +323,7 @@ func runCatalogList(cmd *cobra.Command, args []string) error {
|
|||||||
// Table format
|
// Table format
|
||||||
fmt.Printf("%-30s %-12s %-10s %-20s %-10s %s\n",
|
fmt.Printf("%-30s %-12s %-10s %-20s %-10s %s\n",
|
||||||
"DATABASE", "TYPE", "SIZE", "CREATED", "STATUS", "PATH")
|
"DATABASE", "TYPE", "SIZE", "CREATED", "STATUS", "PATH")
|
||||||
fmt.Println(strings.Repeat("─", 120))
|
fmt.Println(strings.Repeat("-", 120))
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
dbName := truncateString(entry.Database, 28)
|
dbName := truncateString(entry.Database, 28)
|
||||||
@@ -331,10 +331,10 @@ func runCatalogList(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
status := string(entry.Status)
|
status := string(entry.Status)
|
||||||
if entry.VerifyValid != nil && *entry.VerifyValid {
|
if entry.VerifyValid != nil && *entry.VerifyValid {
|
||||||
status = "✓ verified"
|
status = "[OK] verified"
|
||||||
}
|
}
|
||||||
if entry.DrillSuccess != nil && *entry.DrillSuccess {
|
if entry.DrillSuccess != nil && *entry.DrillSuccess {
|
||||||
status = "✓ tested"
|
status = "[OK] tested"
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("%-30s %-12s %-10s %-20s %-10s %s\n",
|
fmt.Printf("%-30s %-12s %-10s %-20s %-10s %s\n",
|
||||||
@@ -377,20 +377,20 @@ func runCatalogStats(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Table format
|
// Table format
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
if catalogDatabase != "" {
|
if catalogDatabase != "" {
|
||||||
fmt.Printf(" Catalog Statistics: %s\n", catalogDatabase)
|
fmt.Printf(" Catalog Statistics: %s\n", catalogDatabase)
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" Catalog Statistics\n")
|
fmt.Printf(" Catalog Statistics\n")
|
||||||
}
|
}
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\n")
|
fmt.Printf("=====================================================\n\n")
|
||||||
|
|
||||||
fmt.Printf("📊 Total Backups: %d\n", stats.TotalBackups)
|
fmt.Printf("[STATS] Total Backups: %d\n", stats.TotalBackups)
|
||||||
fmt.Printf("💾 Total Size: %s\n", stats.TotalSizeHuman)
|
fmt.Printf("[SAVE] Total Size: %s\n", stats.TotalSizeHuman)
|
||||||
fmt.Printf("📏 Average Size: %s\n", catalog.FormatSize(stats.AvgSize))
|
fmt.Printf("[SIZE] Average Size: %s\n", catalog.FormatSize(stats.AvgSize))
|
||||||
fmt.Printf("⏱️ Average Duration: %.1fs\n", stats.AvgDuration)
|
fmt.Printf("[TIME] Average Duration: %.1fs\n", stats.AvgDuration)
|
||||||
fmt.Printf("✅ Verified: %d\n", stats.VerifiedCount)
|
fmt.Printf("[OK] Verified: %d\n", stats.VerifiedCount)
|
||||||
fmt.Printf("🧪 Drill Tested: %d\n", stats.DrillTestedCount)
|
fmt.Printf("[TEST] Drill Tested: %d\n", stats.DrillTestedCount)
|
||||||
|
|
||||||
if stats.OldestBackup != nil {
|
if stats.OldestBackup != nil {
|
||||||
fmt.Printf("📅 Oldest Backup: %s\n", stats.OldestBackup.Format("2006-01-02 15:04"))
|
fmt.Printf("📅 Oldest Backup: %s\n", stats.OldestBackup.Format("2006-01-02 15:04"))
|
||||||
@@ -400,27 +400,27 @@ func runCatalogStats(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(stats.ByDatabase) > 0 && catalogDatabase == "" {
|
if len(stats.ByDatabase) > 0 && catalogDatabase == "" {
|
||||||
fmt.Printf("\n📁 By Database:\n")
|
fmt.Printf("\n[DIR] By Database:\n")
|
||||||
for db, count := range stats.ByDatabase {
|
for db, count := range stats.ByDatabase {
|
||||||
fmt.Printf(" %-30s %d\n", db, count)
|
fmt.Printf(" %-30s %d\n", db, count)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(stats.ByType) > 0 {
|
if len(stats.ByType) > 0 {
|
||||||
fmt.Printf("\n📦 By Type:\n")
|
fmt.Printf("\n[PKG] By Type:\n")
|
||||||
for t, count := range stats.ByType {
|
for t, count := range stats.ByType {
|
||||||
fmt.Printf(" %-15s %d\n", t, count)
|
fmt.Printf(" %-15s %d\n", t, count)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(stats.ByStatus) > 0 {
|
if len(stats.ByStatus) > 0 {
|
||||||
fmt.Printf("\n📋 By Status:\n")
|
fmt.Printf("\n[LOG] By Status:\n")
|
||||||
for s, count := range stats.ByStatus {
|
for s, count := range stats.ByStatus {
|
||||||
fmt.Printf(" %-15s %d\n", s, count)
|
fmt.Printf(" %-15s %d\n", s, count)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("\n=====================================================\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -488,26 +488,26 @@ func runCatalogGaps(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(allGaps) == 0 {
|
if len(allGaps) == 0 {
|
||||||
fmt.Printf("✅ No backup gaps detected (expected interval: %s)\n", interval)
|
fmt.Printf("[OK] No backup gaps detected (expected interval: %s)\n", interval)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
fmt.Printf(" Backup Gaps Detected (expected interval: %s)\n", interval)
|
fmt.Printf(" Backup Gaps Detected (expected interval: %s)\n", interval)
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\n")
|
fmt.Printf("=====================================================\n\n")
|
||||||
|
|
||||||
totalGaps := 0
|
totalGaps := 0
|
||||||
criticalGaps := 0
|
criticalGaps := 0
|
||||||
|
|
||||||
for database, gaps := range allGaps {
|
for database, gaps := range allGaps {
|
||||||
fmt.Printf("📁 %s (%d gaps)\n", database, len(gaps))
|
fmt.Printf("[DIR] %s (%d gaps)\n", database, len(gaps))
|
||||||
|
|
||||||
for _, gap := range gaps {
|
for _, gap := range gaps {
|
||||||
totalGaps++
|
totalGaps++
|
||||||
icon := "ℹ️"
|
icon := "[INFO]"
|
||||||
switch gap.Severity {
|
switch gap.Severity {
|
||||||
case catalog.SeverityWarning:
|
case catalog.SeverityWarning:
|
||||||
icon = "⚠️"
|
icon = "[WARN]"
|
||||||
case catalog.SeverityCritical:
|
case catalog.SeverityCritical:
|
||||||
icon = "🚨"
|
icon = "🚨"
|
||||||
criticalGaps++
|
criticalGaps++
|
||||||
@@ -523,7 +523,7 @@ func runCatalogGaps(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
fmt.Printf("Total: %d gaps detected", totalGaps)
|
fmt.Printf("Total: %d gaps detected", totalGaps)
|
||||||
if criticalGaps > 0 {
|
if criticalGaps > 0 {
|
||||||
fmt.Printf(" (%d critical)", criticalGaps)
|
fmt.Printf(" (%d critical)", criticalGaps)
|
||||||
@@ -598,20 +598,20 @@ func runCatalogSearch(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Printf("Found %d matching backups:\n\n", len(entries))
|
fmt.Printf("Found %d matching backups:\n\n", len(entries))
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
fmt.Printf("📁 %s\n", entry.Database)
|
fmt.Printf("[DIR] %s\n", entry.Database)
|
||||||
fmt.Printf(" Path: %s\n", entry.BackupPath)
|
fmt.Printf(" Path: %s\n", entry.BackupPath)
|
||||||
fmt.Printf(" Type: %s | Size: %s | Created: %s\n",
|
fmt.Printf(" Type: %s | Size: %s | Created: %s\n",
|
||||||
entry.DatabaseType,
|
entry.DatabaseType,
|
||||||
catalog.FormatSize(entry.SizeBytes),
|
catalog.FormatSize(entry.SizeBytes),
|
||||||
entry.CreatedAt.Format("2006-01-02 15:04:05"))
|
entry.CreatedAt.Format("2006-01-02 15:04:05"))
|
||||||
if entry.Encrypted {
|
if entry.Encrypted {
|
||||||
fmt.Printf(" 🔒 Encrypted\n")
|
fmt.Printf(" [LOCK] Encrypted\n")
|
||||||
}
|
}
|
||||||
if entry.VerifyValid != nil && *entry.VerifyValid {
|
if entry.VerifyValid != nil && *entry.VerifyValid {
|
||||||
fmt.Printf(" ✅ Verified: %s\n", entry.VerifiedAt.Format("2006-01-02 15:04"))
|
fmt.Printf(" [OK] Verified: %s\n", entry.VerifiedAt.Format("2006-01-02 15:04"))
|
||||||
}
|
}
|
||||||
if entry.DrillSuccess != nil && *entry.DrillSuccess {
|
if entry.DrillSuccess != nil && *entry.DrillSuccess {
|
||||||
fmt.Printf(" 🧪 Drill Tested: %s\n", entry.DrillTestedAt.Format("2006-01-02 15:04"))
|
fmt.Printf(" [TEST] Drill Tested: %s\n", entry.DrillTestedAt.Format("2006-01-02 15:04"))
|
||||||
}
|
}
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
@@ -655,64 +655,64 @@ func runCatalogInfo(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
fmt.Printf(" Backup Details\n")
|
fmt.Printf(" Backup Details\n")
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\n")
|
fmt.Printf("=====================================================\n\n")
|
||||||
|
|
||||||
fmt.Printf("📁 Database: %s\n", entry.Database)
|
fmt.Printf("[DIR] Database: %s\n", entry.Database)
|
||||||
fmt.Printf("🔧 Type: %s\n", entry.DatabaseType)
|
fmt.Printf("🔧 Type: %s\n", entry.DatabaseType)
|
||||||
fmt.Printf("🖥️ Host: %s:%d\n", entry.Host, entry.Port)
|
fmt.Printf("[HOST] Host: %s:%d\n", entry.Host, entry.Port)
|
||||||
fmt.Printf("📂 Path: %s\n", entry.BackupPath)
|
fmt.Printf("📂 Path: %s\n", entry.BackupPath)
|
||||||
fmt.Printf("📦 Backup Type: %s\n", entry.BackupType)
|
fmt.Printf("[PKG] Backup Type: %s\n", entry.BackupType)
|
||||||
fmt.Printf("💾 Size: %s (%d bytes)\n", catalog.FormatSize(entry.SizeBytes), entry.SizeBytes)
|
fmt.Printf("[SAVE] Size: %s (%d bytes)\n", catalog.FormatSize(entry.SizeBytes), entry.SizeBytes)
|
||||||
fmt.Printf("🔐 SHA256: %s\n", entry.SHA256)
|
fmt.Printf("[HASH] SHA256: %s\n", entry.SHA256)
|
||||||
fmt.Printf("📅 Created: %s\n", entry.CreatedAt.Format("2006-01-02 15:04:05 MST"))
|
fmt.Printf("📅 Created: %s\n", entry.CreatedAt.Format("2006-01-02 15:04:05 MST"))
|
||||||
fmt.Printf("⏱️ Duration: %.2fs\n", entry.Duration)
|
fmt.Printf("[TIME] Duration: %.2fs\n", entry.Duration)
|
||||||
fmt.Printf("📋 Status: %s\n", entry.Status)
|
fmt.Printf("[LOG] Status: %s\n", entry.Status)
|
||||||
|
|
||||||
if entry.Compression != "" {
|
if entry.Compression != "" {
|
||||||
fmt.Printf("📦 Compression: %s\n", entry.Compression)
|
fmt.Printf("[PKG] Compression: %s\n", entry.Compression)
|
||||||
}
|
}
|
||||||
if entry.Encrypted {
|
if entry.Encrypted {
|
||||||
fmt.Printf("🔒 Encrypted: yes\n")
|
fmt.Printf("[LOCK] Encrypted: yes\n")
|
||||||
}
|
}
|
||||||
if entry.CloudLocation != "" {
|
if entry.CloudLocation != "" {
|
||||||
fmt.Printf("☁️ Cloud: %s\n", entry.CloudLocation)
|
fmt.Printf("[CLOUD] Cloud: %s\n", entry.CloudLocation)
|
||||||
}
|
}
|
||||||
if entry.RetentionPolicy != "" {
|
if entry.RetentionPolicy != "" {
|
||||||
fmt.Printf("📆 Retention: %s\n", entry.RetentionPolicy)
|
fmt.Printf("📆 Retention: %s\n", entry.RetentionPolicy)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n📊 Verification:\n")
|
fmt.Printf("\n[STATS] Verification:\n")
|
||||||
if entry.VerifiedAt != nil {
|
if entry.VerifiedAt != nil {
|
||||||
status := "❌ Failed"
|
status := "[FAIL] Failed"
|
||||||
if entry.VerifyValid != nil && *entry.VerifyValid {
|
if entry.VerifyValid != nil && *entry.VerifyValid {
|
||||||
status = "✅ Valid"
|
status = "[OK] Valid"
|
||||||
}
|
}
|
||||||
fmt.Printf(" Status: %s (checked %s)\n", status, entry.VerifiedAt.Format("2006-01-02 15:04"))
|
fmt.Printf(" Status: %s (checked %s)\n", status, entry.VerifiedAt.Format("2006-01-02 15:04"))
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" Status: ⏳ Not verified\n")
|
fmt.Printf(" Status: [WAIT] Not verified\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n🧪 DR Drill Test:\n")
|
fmt.Printf("\n[TEST] DR Drill Test:\n")
|
||||||
if entry.DrillTestedAt != nil {
|
if entry.DrillTestedAt != nil {
|
||||||
status := "❌ Failed"
|
status := "[FAIL] Failed"
|
||||||
if entry.DrillSuccess != nil && *entry.DrillSuccess {
|
if entry.DrillSuccess != nil && *entry.DrillSuccess {
|
||||||
status = "✅ Passed"
|
status = "[OK] Passed"
|
||||||
}
|
}
|
||||||
fmt.Printf(" Status: %s (tested %s)\n", status, entry.DrillTestedAt.Format("2006-01-02 15:04"))
|
fmt.Printf(" Status: %s (tested %s)\n", status, entry.DrillTestedAt.Format("2006-01-02 15:04"))
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" Status: ⏳ Not tested\n")
|
fmt.Printf(" Status: [WAIT] Not tested\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(entry.Metadata) > 0 {
|
if len(entry.Metadata) > 0 {
|
||||||
fmt.Printf("\n📝 Additional Metadata:\n")
|
fmt.Printf("\n[NOTE] Additional Metadata:\n")
|
||||||
for k, v := range entry.Metadata {
|
for k, v := range entry.Metadata {
|
||||||
fmt.Printf(" %s: %s\n", k, v)
|
fmt.Printf(" %s: %s\n", k, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("\n=====================================================\n")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ func runCleanup(cmd *cobra.Command, args []string) error {
|
|||||||
DryRun: dryRun,
|
DryRun: dryRun,
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("🗑️ Cleanup Policy:\n")
|
fmt.Printf("[CLEANUP] Cleanup Policy:\n")
|
||||||
fmt.Printf(" Directory: %s\n", backupDir)
|
fmt.Printf(" Directory: %s\n", backupDir)
|
||||||
fmt.Printf(" Retention: %d days\n", policy.RetentionDays)
|
fmt.Printf(" Retention: %d days\n", policy.RetentionDays)
|
||||||
fmt.Printf(" Min backups: %d\n", policy.MinBackups)
|
fmt.Printf(" Min backups: %d\n", policy.MinBackups)
|
||||||
@@ -142,16 +142,16 @@ func runCleanup(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Display results
|
// Display results
|
||||||
fmt.Printf("📊 Results:\n")
|
fmt.Printf("[RESULTS] Results:\n")
|
||||||
fmt.Printf(" Total backups: %d\n", result.TotalBackups)
|
fmt.Printf(" Total backups: %d\n", result.TotalBackups)
|
||||||
fmt.Printf(" Eligible for deletion: %d\n", result.EligibleForDeletion)
|
fmt.Printf(" Eligible for deletion: %d\n", result.EligibleForDeletion)
|
||||||
|
|
||||||
if len(result.Deleted) > 0 {
|
if len(result.Deleted) > 0 {
|
||||||
fmt.Printf("\n")
|
fmt.Printf("\n")
|
||||||
if dryRun {
|
if dryRun {
|
||||||
fmt.Printf("🔍 Would delete %d backup(s):\n", len(result.Deleted))
|
fmt.Printf("[DRY-RUN] Would delete %d backup(s):\n", len(result.Deleted))
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("✅ Deleted %d backup(s):\n", len(result.Deleted))
|
fmt.Printf("[OK] Deleted %d backup(s):\n", len(result.Deleted))
|
||||||
}
|
}
|
||||||
for _, file := range result.Deleted {
|
for _, file := range result.Deleted {
|
||||||
fmt.Printf(" - %s\n", filepath.Base(file))
|
fmt.Printf(" - %s\n", filepath.Base(file))
|
||||||
@@ -159,33 +159,33 @@ func runCleanup(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(result.Kept) > 0 && len(result.Kept) <= 10 {
|
if len(result.Kept) > 0 && len(result.Kept) <= 10 {
|
||||||
fmt.Printf("\n📦 Kept %d backup(s):\n", len(result.Kept))
|
fmt.Printf("\n[KEPT] Kept %d backup(s):\n", len(result.Kept))
|
||||||
for _, file := range result.Kept {
|
for _, file := range result.Kept {
|
||||||
fmt.Printf(" - %s\n", filepath.Base(file))
|
fmt.Printf(" - %s\n", filepath.Base(file))
|
||||||
}
|
}
|
||||||
} else if len(result.Kept) > 10 {
|
} else if len(result.Kept) > 10 {
|
||||||
fmt.Printf("\n📦 Kept %d backup(s)\n", len(result.Kept))
|
fmt.Printf("\n[KEPT] Kept %d backup(s)\n", len(result.Kept))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !dryRun && result.SpaceFreed > 0 {
|
if !dryRun && result.SpaceFreed > 0 {
|
||||||
fmt.Printf("\n💾 Space freed: %s\n", metadata.FormatSize(result.SpaceFreed))
|
fmt.Printf("\n[FREED] Space freed: %s\n", metadata.FormatSize(result.SpaceFreed))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(result.Errors) > 0 {
|
if len(result.Errors) > 0 {
|
||||||
fmt.Printf("\n⚠️ Errors:\n")
|
fmt.Printf("\n[WARN] Errors:\n")
|
||||||
for _, err := range result.Errors {
|
for _, err := range result.Errors {
|
||||||
fmt.Printf(" - %v\n", err)
|
fmt.Printf(" - %v\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(strings.Repeat("─", 50))
|
fmt.Println(strings.Repeat("-", 50))
|
||||||
|
|
||||||
if dryRun {
|
if dryRun {
|
||||||
fmt.Println("✅ Dry run completed (no files were deleted)")
|
fmt.Println("[OK] Dry run completed (no files were deleted)")
|
||||||
} else if len(result.Deleted) > 0 {
|
} else if len(result.Deleted) > 0 {
|
||||||
fmt.Println("✅ Cleanup completed successfully")
|
fmt.Println("[OK] Cleanup completed successfully")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("ℹ️ No backups eligible for deletion")
|
fmt.Println("[INFO] No backups eligible for deletion")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -212,7 +212,7 @@ func runCloudCleanup(ctx context.Context, uri string) error {
|
|||||||
return fmt.Errorf("invalid cloud URI: %w", err)
|
return fmt.Errorf("invalid cloud URI: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("☁️ Cloud Cleanup Policy:\n")
|
fmt.Printf("[CLOUD] Cloud Cleanup Policy:\n")
|
||||||
fmt.Printf(" URI: %s\n", uri)
|
fmt.Printf(" URI: %s\n", uri)
|
||||||
fmt.Printf(" Provider: %s\n", cloudURI.Provider)
|
fmt.Printf(" Provider: %s\n", cloudURI.Provider)
|
||||||
fmt.Printf(" Bucket: %s\n", cloudURI.Bucket)
|
fmt.Printf(" Bucket: %s\n", cloudURI.Bucket)
|
||||||
@@ -295,7 +295,7 @@ func runCloudCleanup(ctx context.Context, uri string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Display results
|
// Display results
|
||||||
fmt.Printf("📊 Results:\n")
|
fmt.Printf("[RESULTS] Results:\n")
|
||||||
fmt.Printf(" Total backups: %d\n", totalBackups)
|
fmt.Printf(" Total backups: %d\n", totalBackups)
|
||||||
fmt.Printf(" Eligible for deletion: %d\n", len(toDelete))
|
fmt.Printf(" Eligible for deletion: %d\n", len(toDelete))
|
||||||
fmt.Printf(" Will keep: %d\n", len(toKeep))
|
fmt.Printf(" Will keep: %d\n", len(toKeep))
|
||||||
@@ -303,9 +303,9 @@ func runCloudCleanup(ctx context.Context, uri string) error {
|
|||||||
|
|
||||||
if len(toDelete) > 0 {
|
if len(toDelete) > 0 {
|
||||||
if dryRun {
|
if dryRun {
|
||||||
fmt.Printf("🔍 Would delete %d backup(s):\n", len(toDelete))
|
fmt.Printf("[DRY-RUN] Would delete %d backup(s):\n", len(toDelete))
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("🗑️ Deleting %d backup(s):\n", len(toDelete))
|
fmt.Printf("[DELETE] Deleting %d backup(s):\n", len(toDelete))
|
||||||
}
|
}
|
||||||
|
|
||||||
var totalSize int64
|
var totalSize int64
|
||||||
@@ -321,7 +321,7 @@ func runCloudCleanup(ctx context.Context, uri string) error {
|
|||||||
|
|
||||||
if !dryRun {
|
if !dryRun {
|
||||||
if err := backend.Delete(ctx, backup.Key); err != nil {
|
if err := backend.Delete(ctx, backup.Key); err != nil {
|
||||||
fmt.Printf(" ❌ Error: %v\n", err)
|
fmt.Printf(" [FAIL] Error: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
deletedCount++
|
deletedCount++
|
||||||
// Also try to delete metadata
|
// Also try to delete metadata
|
||||||
@@ -330,12 +330,12 @@ func runCloudCleanup(ctx context.Context, uri string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n💾 Space %s: %s\n",
|
fmt.Printf("\n[FREED] Space %s: %s\n",
|
||||||
map[bool]string{true: "would be freed", false: "freed"}[dryRun],
|
map[bool]string{true: "would be freed", false: "freed"}[dryRun],
|
||||||
cloud.FormatSize(totalSize))
|
cloud.FormatSize(totalSize))
|
||||||
|
|
||||||
if !dryRun && deletedCount > 0 {
|
if !dryRun && deletedCount > 0 {
|
||||||
fmt.Printf("✅ Successfully deleted %d backup(s)\n", deletedCount)
|
fmt.Printf("[OK] Successfully deleted %d backup(s)\n", deletedCount)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("No backups eligible for deletion")
|
fmt.Println("No backups eligible for deletion")
|
||||||
@@ -405,7 +405,7 @@ func runGFSCleanup(backupDir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Display tier breakdown
|
// Display tier breakdown
|
||||||
fmt.Printf("📊 Backup Classification:\n")
|
fmt.Printf("[STATS] Backup Classification:\n")
|
||||||
fmt.Printf(" Yearly: %d\n", result.YearlyKept)
|
fmt.Printf(" Yearly: %d\n", result.YearlyKept)
|
||||||
fmt.Printf(" Monthly: %d\n", result.MonthlyKept)
|
fmt.Printf(" Monthly: %d\n", result.MonthlyKept)
|
||||||
fmt.Printf(" Weekly: %d\n", result.WeeklyKept)
|
fmt.Printf(" Weekly: %d\n", result.WeeklyKept)
|
||||||
@@ -416,9 +416,9 @@ func runGFSCleanup(backupDir string) error {
|
|||||||
// Display deletions
|
// Display deletions
|
||||||
if len(result.Deleted) > 0 {
|
if len(result.Deleted) > 0 {
|
||||||
if dryRun {
|
if dryRun {
|
||||||
fmt.Printf("🔍 Would delete %d backup(s):\n", len(result.Deleted))
|
fmt.Printf("[SEARCH] Would delete %d backup(s):\n", len(result.Deleted))
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("✅ Deleted %d backup(s):\n", len(result.Deleted))
|
fmt.Printf("[OK] Deleted %d backup(s):\n", len(result.Deleted))
|
||||||
}
|
}
|
||||||
for _, file := range result.Deleted {
|
for _, file := range result.Deleted {
|
||||||
fmt.Printf(" - %s\n", filepath.Base(file))
|
fmt.Printf(" - %s\n", filepath.Base(file))
|
||||||
@@ -427,7 +427,7 @@ func runGFSCleanup(backupDir string) error {
|
|||||||
|
|
||||||
// Display kept backups (limited display)
|
// Display kept backups (limited display)
|
||||||
if len(result.Kept) > 0 && len(result.Kept) <= 15 {
|
if len(result.Kept) > 0 && len(result.Kept) <= 15 {
|
||||||
fmt.Printf("\n📦 Kept %d backup(s):\n", len(result.Kept))
|
fmt.Printf("\n[PKG] Kept %d backup(s):\n", len(result.Kept))
|
||||||
for _, file := range result.Kept {
|
for _, file := range result.Kept {
|
||||||
// Show tier classification
|
// Show tier classification
|
||||||
info, _ := os.Stat(file)
|
info, _ := os.Stat(file)
|
||||||
@@ -440,28 +440,28 @@ func runGFSCleanup(backupDir string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if len(result.Kept) > 15 {
|
} else if len(result.Kept) > 15 {
|
||||||
fmt.Printf("\n📦 Kept %d backup(s)\n", len(result.Kept))
|
fmt.Printf("\n[PKG] Kept %d backup(s)\n", len(result.Kept))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !dryRun && result.SpaceFreed > 0 {
|
if !dryRun && result.SpaceFreed > 0 {
|
||||||
fmt.Printf("\n💾 Space freed: %s\n", metadata.FormatSize(result.SpaceFreed))
|
fmt.Printf("\n[SAVE] Space freed: %s\n", metadata.FormatSize(result.SpaceFreed))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(result.Errors) > 0 {
|
if len(result.Errors) > 0 {
|
||||||
fmt.Printf("\n⚠️ Errors:\n")
|
fmt.Printf("\n[WARN] Errors:\n")
|
||||||
for _, err := range result.Errors {
|
for _, err := range result.Errors {
|
||||||
fmt.Printf(" - %v\n", err)
|
fmt.Printf(" - %v\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(strings.Repeat("─", 50))
|
fmt.Println(strings.Repeat("-", 50))
|
||||||
|
|
||||||
if dryRun {
|
if dryRun {
|
||||||
fmt.Println("✅ GFS dry run completed (no files were deleted)")
|
fmt.Println("[OK] GFS dry run completed (no files were deleted)")
|
||||||
} else if len(result.Deleted) > 0 {
|
} else if len(result.Deleted) > 0 {
|
||||||
fmt.Println("✅ GFS cleanup completed successfully")
|
fmt.Println("[OK] GFS cleanup completed successfully")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("ℹ️ No backups eligible for deletion under GFS policy")
|
fmt.Println("[INFO] No backups eligible for deletion under GFS policy")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
34
cmd/cloud.go
34
cmd/cloud.go
@@ -189,12 +189,12 @@ func runCloudUpload(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("☁️ Uploading %d file(s) to %s...\n\n", len(files), backend.Name())
|
fmt.Printf("[CLOUD] Uploading %d file(s) to %s...\n\n", len(files), backend.Name())
|
||||||
|
|
||||||
successCount := 0
|
successCount := 0
|
||||||
for _, localPath := range files {
|
for _, localPath := range files {
|
||||||
filename := filepath.Base(localPath)
|
filename := filepath.Base(localPath)
|
||||||
fmt.Printf("📤 %s\n", filename)
|
fmt.Printf("[UPLOAD] %s\n", filename)
|
||||||
|
|
||||||
// Progress callback
|
// Progress callback
|
||||||
var lastPercent int
|
var lastPercent int
|
||||||
@@ -214,21 +214,21 @@ func runCloudUpload(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
err := backend.Upload(ctx, localPath, filename, progress)
|
err := backend.Upload(ctx, localPath, filename, progress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf(" ❌ Failed: %v\n\n", err)
|
fmt.Printf(" [FAIL] Failed: %v\n\n", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get file size
|
// Get file size
|
||||||
if info, err := os.Stat(localPath); err == nil {
|
if info, err := os.Stat(localPath); err == nil {
|
||||||
fmt.Printf(" ✅ Uploaded (%s)\n\n", cloud.FormatSize(info.Size()))
|
fmt.Printf(" [OK] Uploaded (%s)\n\n", cloud.FormatSize(info.Size()))
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" ✅ Uploaded\n\n")
|
fmt.Printf(" [OK] Uploaded\n\n")
|
||||||
}
|
}
|
||||||
successCount++
|
successCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(strings.Repeat("─", 50))
|
fmt.Println(strings.Repeat("-", 50))
|
||||||
fmt.Printf("✅ Successfully uploaded %d/%d file(s)\n", successCount, len(files))
|
fmt.Printf("[OK] Successfully uploaded %d/%d file(s)\n", successCount, len(files))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -248,8 +248,8 @@ func runCloudDownload(cmd *cobra.Command, args []string) error {
|
|||||||
localPath = filepath.Join(localPath, filepath.Base(remotePath))
|
localPath = filepath.Join(localPath, filepath.Base(remotePath))
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("☁️ Downloading from %s...\n\n", backend.Name())
|
fmt.Printf("[CLOUD] Downloading from %s...\n\n", backend.Name())
|
||||||
fmt.Printf("📥 %s → %s\n", remotePath, localPath)
|
fmt.Printf("[DOWNLOAD] %s -> %s\n", remotePath, localPath)
|
||||||
|
|
||||||
// Progress callback
|
// Progress callback
|
||||||
var lastPercent int
|
var lastPercent int
|
||||||
@@ -274,9 +274,9 @@ func runCloudDownload(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// Get file size
|
// Get file size
|
||||||
if info, err := os.Stat(localPath); err == nil {
|
if info, err := os.Stat(localPath); err == nil {
|
||||||
fmt.Printf(" ✅ Downloaded (%s)\n", cloud.FormatSize(info.Size()))
|
fmt.Printf(" [OK] Downloaded (%s)\n", cloud.FormatSize(info.Size()))
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" ✅ Downloaded\n")
|
fmt.Printf(" [OK] Downloaded\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -294,7 +294,7 @@ func runCloudList(cmd *cobra.Command, args []string) error {
|
|||||||
prefix = args[0]
|
prefix = args[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("☁️ Listing backups in %s/%s...\n\n", backend.Name(), cloudBucket)
|
fmt.Printf("[CLOUD] Listing backups in %s/%s...\n\n", backend.Name(), cloudBucket)
|
||||||
|
|
||||||
backups, err := backend.List(ctx, prefix)
|
backups, err := backend.List(ctx, prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -311,7 +311,7 @@ func runCloudList(cmd *cobra.Command, args []string) error {
|
|||||||
totalSize += backup.Size
|
totalSize += backup.Size
|
||||||
|
|
||||||
if cloudVerbose {
|
if cloudVerbose {
|
||||||
fmt.Printf("📦 %s\n", backup.Name)
|
fmt.Printf("[FILE] %s\n", backup.Name)
|
||||||
fmt.Printf(" Size: %s\n", cloud.FormatSize(backup.Size))
|
fmt.Printf(" Size: %s\n", cloud.FormatSize(backup.Size))
|
||||||
fmt.Printf(" Modified: %s\n", backup.LastModified.Format(time.RFC3339))
|
fmt.Printf(" Modified: %s\n", backup.LastModified.Format(time.RFC3339))
|
||||||
if backup.StorageClass != "" {
|
if backup.StorageClass != "" {
|
||||||
@@ -328,7 +328,7 @@ func runCloudList(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(strings.Repeat("─", 50))
|
fmt.Println(strings.Repeat("-", 50))
|
||||||
fmt.Printf("Total: %d backup(s), %s\n", len(backups), cloud.FormatSize(totalSize))
|
fmt.Printf("Total: %d backup(s), %s\n", len(backups), cloud.FormatSize(totalSize))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -360,7 +360,7 @@ func runCloudDelete(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// Confirmation prompt
|
// Confirmation prompt
|
||||||
if !cloudConfirm {
|
if !cloudConfirm {
|
||||||
fmt.Printf("⚠️ Delete %s (%s) from cloud storage?\n", remotePath, cloud.FormatSize(size))
|
fmt.Printf("[WARN] Delete %s (%s) from cloud storage?\n", remotePath, cloud.FormatSize(size))
|
||||||
fmt.Print("Type 'yes' to confirm: ")
|
fmt.Print("Type 'yes' to confirm: ")
|
||||||
var response string
|
var response string
|
||||||
fmt.Scanln(&response)
|
fmt.Scanln(&response)
|
||||||
@@ -370,14 +370,14 @@ func runCloudDelete(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("🗑️ Deleting %s...\n", remotePath)
|
fmt.Printf("[DELETE] Deleting %s...\n", remotePath)
|
||||||
|
|
||||||
err = backend.Delete(ctx, remotePath)
|
err = backend.Delete(ctx, remotePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("delete failed: %w", err)
|
return fmt.Errorf("delete failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("✅ Deleted %s (%s)\n", remotePath, cloud.FormatSize(size))
|
fmt.Printf("[OK] Deleted %s (%s)\n", remotePath, cloud.FormatSize(size))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -61,10 +61,10 @@ func runCPUInfo(ctx context.Context) error {
|
|||||||
|
|
||||||
// Show current vs optimal
|
// Show current vs optimal
|
||||||
if cfg.AutoDetectCores {
|
if cfg.AutoDetectCores {
|
||||||
fmt.Println("\n✅ CPU optimization is enabled")
|
fmt.Println("\n[OK] CPU optimization is enabled")
|
||||||
fmt.Println("Job counts are automatically optimized based on detected hardware")
|
fmt.Println("Job counts are automatically optimized based on detected hardware")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("\n⚠️ CPU optimization is disabled")
|
fmt.Println("\n[WARN] CPU optimization is disabled")
|
||||||
fmt.Println("Consider enabling --auto-detect-cores for better performance")
|
fmt.Println("Consider enabling --auto-detect-cores for better performance")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
579
cmd/dedup.go
Normal file
579
cmd/dedup.go
Normal file
@@ -0,0 +1,579 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"dbbackup/internal/dedup"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var dedupCmd = &cobra.Command{
|
||||||
|
Use: "dedup",
|
||||||
|
Short: "Deduplicated backup operations",
|
||||||
|
Long: `Content-defined chunking deduplication for space-efficient backups.
|
||||||
|
|
||||||
|
Similar to restic/borgbackup but with native database dump support.
|
||||||
|
|
||||||
|
Features:
|
||||||
|
- Content-defined chunking (CDC) with Buzhash rolling hash
|
||||||
|
- SHA-256 content-addressed storage
|
||||||
|
- AES-256-GCM encryption (optional)
|
||||||
|
- Gzip compression (optional)
|
||||||
|
- SQLite index for fast lookups
|
||||||
|
|
||||||
|
Storage Structure:
|
||||||
|
<dedup-dir>/
|
||||||
|
chunks/ # Content-addressed chunk files
|
||||||
|
ab/cdef... # Sharded by first 2 chars of hash
|
||||||
|
manifests/ # JSON manifest per backup
|
||||||
|
chunks.db # SQLite index`,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupBackupCmd = &cobra.Command{
|
||||||
|
Use: "backup <file>",
|
||||||
|
Short: "Create a deduplicated backup of a file",
|
||||||
|
Long: `Chunk a file using content-defined chunking and store deduplicated chunks.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
dbbackup dedup backup /path/to/database.dump
|
||||||
|
dbbackup dedup backup mydb.sql --compress --encrypt`,
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: runDedupBackup,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupRestoreCmd = &cobra.Command{
|
||||||
|
Use: "restore <manifest-id> <output-file>",
|
||||||
|
Short: "Restore a backup from its manifest",
|
||||||
|
Long: `Reconstruct a file from its deduplicated chunks.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
dbbackup dedup restore 2026-01-07_120000_mydb /tmp/restored.dump
|
||||||
|
dbbackup dedup list # to see available manifests`,
|
||||||
|
Args: cobra.ExactArgs(2),
|
||||||
|
RunE: runDedupRestore,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupListCmd = &cobra.Command{
|
||||||
|
Use: "list",
|
||||||
|
Short: "List all deduplicated backups",
|
||||||
|
RunE: runDedupList,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupStatsCmd = &cobra.Command{
|
||||||
|
Use: "stats",
|
||||||
|
Short: "Show deduplication statistics",
|
||||||
|
RunE: runDedupStats,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupGCCmd = &cobra.Command{
|
||||||
|
Use: "gc",
|
||||||
|
Short: "Garbage collect unreferenced chunks",
|
||||||
|
Long: `Remove chunks that are no longer referenced by any manifest.
|
||||||
|
|
||||||
|
Run after deleting old backups to reclaim space.`,
|
||||||
|
RunE: runDedupGC,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupDeleteCmd = &cobra.Command{
|
||||||
|
Use: "delete <manifest-id>",
|
||||||
|
Short: "Delete a backup manifest (chunks cleaned by gc)",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: runDedupDelete,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flags
|
||||||
|
var (
|
||||||
|
dedupDir string
|
||||||
|
dedupCompress bool
|
||||||
|
dedupEncrypt bool
|
||||||
|
dedupKey string
|
||||||
|
dedupName string
|
||||||
|
dedupDBType string
|
||||||
|
dedupDBName string
|
||||||
|
dedupDBHost string
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(dedupCmd)
|
||||||
|
dedupCmd.AddCommand(dedupBackupCmd)
|
||||||
|
dedupCmd.AddCommand(dedupRestoreCmd)
|
||||||
|
dedupCmd.AddCommand(dedupListCmd)
|
||||||
|
dedupCmd.AddCommand(dedupStatsCmd)
|
||||||
|
dedupCmd.AddCommand(dedupGCCmd)
|
||||||
|
dedupCmd.AddCommand(dedupDeleteCmd)
|
||||||
|
|
||||||
|
// Global dedup flags
|
||||||
|
dedupCmd.PersistentFlags().StringVar(&dedupDir, "dedup-dir", "", "Dedup storage directory (default: $BACKUP_DIR/dedup)")
|
||||||
|
dedupCmd.PersistentFlags().BoolVar(&dedupCompress, "compress", true, "Compress chunks with gzip")
|
||||||
|
dedupCmd.PersistentFlags().BoolVar(&dedupEncrypt, "encrypt", false, "Encrypt chunks with AES-256-GCM")
|
||||||
|
dedupCmd.PersistentFlags().StringVar(&dedupKey, "key", "", "Encryption key (hex) or use DBBACKUP_DEDUP_KEY env")
|
||||||
|
|
||||||
|
// Backup-specific flags
|
||||||
|
dedupBackupCmd.Flags().StringVar(&dedupName, "name", "", "Optional backup name")
|
||||||
|
dedupBackupCmd.Flags().StringVar(&dedupDBType, "db-type", "", "Database type (postgres/mysql)")
|
||||||
|
dedupBackupCmd.Flags().StringVar(&dedupDBName, "db-name", "", "Database name")
|
||||||
|
dedupBackupCmd.Flags().StringVar(&dedupDBHost, "db-host", "", "Database host")
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDedupDir() string {
|
||||||
|
if dedupDir != "" {
|
||||||
|
return dedupDir
|
||||||
|
}
|
||||||
|
if cfg != nil && cfg.BackupDir != "" {
|
||||||
|
return filepath.Join(cfg.BackupDir, "dedup")
|
||||||
|
}
|
||||||
|
return filepath.Join(os.Getenv("HOME"), "db_backups", "dedup")
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEncryptionKey() string {
|
||||||
|
if dedupKey != "" {
|
||||||
|
return dedupKey
|
||||||
|
}
|
||||||
|
return os.Getenv("DBBACKUP_DEDUP_KEY")
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupBackup(cmd *cobra.Command, args []string) error {
|
||||||
|
inputPath := args[0]
|
||||||
|
|
||||||
|
// Open input file
|
||||||
|
file, err := os.Open(inputPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open input file: %w", err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
info, err := file.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to stat input file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup dedup storage
|
||||||
|
basePath := getDedupDir()
|
||||||
|
encKey := ""
|
||||||
|
if dedupEncrypt {
|
||||||
|
encKey = getEncryptionKey()
|
||||||
|
if encKey == "" {
|
||||||
|
return fmt.Errorf("encryption enabled but no key provided (use --key or DBBACKUP_DEDUP_KEY)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := dedup.NewChunkStore(dedup.StoreConfig{
|
||||||
|
BasePath: basePath,
|
||||||
|
Compress: dedupCompress,
|
||||||
|
EncryptionKey: encKey,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestStore, err := dedup.NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
index, err := dedup.NewChunkIndex(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer index.Close()
|
||||||
|
|
||||||
|
// Generate manifest ID
|
||||||
|
now := time.Now()
|
||||||
|
manifestID := now.Format("2006-01-02_150405")
|
||||||
|
if dedupDBName != "" {
|
||||||
|
manifestID += "_" + dedupDBName
|
||||||
|
} else {
|
||||||
|
base := filepath.Base(inputPath)
|
||||||
|
ext := filepath.Ext(base)
|
||||||
|
manifestID += "_" + strings.TrimSuffix(base, ext)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Creating deduplicated backup: %s\n", manifestID)
|
||||||
|
fmt.Printf("Input: %s (%s)\n", inputPath, formatBytes(info.Size()))
|
||||||
|
fmt.Printf("Store: %s\n", basePath)
|
||||||
|
|
||||||
|
// Hash the entire file for verification
|
||||||
|
file.Seek(0, 0)
|
||||||
|
h := sha256.New()
|
||||||
|
io.Copy(h, file)
|
||||||
|
fileHash := hex.EncodeToString(h.Sum(nil))
|
||||||
|
file.Seek(0, 0)
|
||||||
|
|
||||||
|
// Chunk the file
|
||||||
|
chunker := dedup.NewChunker(file, dedup.DefaultChunkerConfig())
|
||||||
|
var chunks []dedup.ChunkRef
|
||||||
|
var totalSize, storedSize int64
|
||||||
|
var chunkCount, newChunks int
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
for {
|
||||||
|
chunk, err := chunker.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("chunking failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkCount++
|
||||||
|
totalSize += int64(chunk.Length)
|
||||||
|
|
||||||
|
// Store chunk (deduplication happens here)
|
||||||
|
isNew, err := store.Put(chunk)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to store chunk: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if isNew {
|
||||||
|
newChunks++
|
||||||
|
storedSize += int64(chunk.Length)
|
||||||
|
// Record in index
|
||||||
|
index.AddChunk(chunk.Hash, chunk.Length, chunk.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunks = append(chunks, dedup.ChunkRef{
|
||||||
|
Hash: chunk.Hash,
|
||||||
|
Offset: chunk.Offset,
|
||||||
|
Length: chunk.Length,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Progress
|
||||||
|
if chunkCount%1000 == 0 {
|
||||||
|
fmt.Printf("\r Processed %d chunks, %d new...", chunkCount, newChunks)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Since(startTime)
|
||||||
|
|
||||||
|
// Calculate dedup ratio
|
||||||
|
dedupRatio := 0.0
|
||||||
|
if totalSize > 0 {
|
||||||
|
dedupRatio = 1.0 - float64(storedSize)/float64(totalSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create manifest
|
||||||
|
manifest := &dedup.Manifest{
|
||||||
|
ID: manifestID,
|
||||||
|
Name: dedupName,
|
||||||
|
CreatedAt: now,
|
||||||
|
DatabaseType: dedupDBType,
|
||||||
|
DatabaseName: dedupDBName,
|
||||||
|
DatabaseHost: dedupDBHost,
|
||||||
|
Chunks: chunks,
|
||||||
|
OriginalSize: totalSize,
|
||||||
|
StoredSize: storedSize,
|
||||||
|
ChunkCount: chunkCount,
|
||||||
|
NewChunks: newChunks,
|
||||||
|
DedupRatio: dedupRatio,
|
||||||
|
Encrypted: dedupEncrypt,
|
||||||
|
Compressed: dedupCompress,
|
||||||
|
SHA256: fileHash,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := manifestStore.Save(manifest); err != nil {
|
||||||
|
return fmt.Errorf("failed to save manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := index.AddManifest(manifest); err != nil {
|
||||||
|
log.Warn("Failed to index manifest", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\r \r")
|
||||||
|
fmt.Printf("\nBackup complete!\n")
|
||||||
|
fmt.Printf(" Manifest: %s\n", manifestID)
|
||||||
|
fmt.Printf(" Chunks: %d total, %d new\n", chunkCount, newChunks)
|
||||||
|
fmt.Printf(" Original: %s\n", formatBytes(totalSize))
|
||||||
|
fmt.Printf(" Stored: %s (new data)\n", formatBytes(storedSize))
|
||||||
|
fmt.Printf(" Dedup ratio: %.1f%%\n", dedupRatio*100)
|
||||||
|
fmt.Printf(" Duration: %s\n", duration.Round(time.Millisecond))
|
||||||
|
fmt.Printf(" Throughput: %s/s\n", formatBytes(int64(float64(totalSize)/duration.Seconds())))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupRestore(cmd *cobra.Command, args []string) error {
|
||||||
|
manifestID := args[0]
|
||||||
|
outputPath := args[1]
|
||||||
|
|
||||||
|
basePath := getDedupDir()
|
||||||
|
encKey := ""
|
||||||
|
if dedupEncrypt {
|
||||||
|
encKey = getEncryptionKey()
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := dedup.NewChunkStore(dedup.StoreConfig{
|
||||||
|
BasePath: basePath,
|
||||||
|
Compress: dedupCompress,
|
||||||
|
EncryptionKey: encKey,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestStore, err := dedup.NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest, err := manifestStore.Load(manifestID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Restoring backup: %s\n", manifestID)
|
||||||
|
fmt.Printf(" Created: %s\n", manifest.CreatedAt.Format(time.RFC3339))
|
||||||
|
fmt.Printf(" Size: %s\n", formatBytes(manifest.OriginalSize))
|
||||||
|
fmt.Printf(" Chunks: %d\n", manifest.ChunkCount)
|
||||||
|
|
||||||
|
// Create output file
|
||||||
|
outFile, err := os.Create(outputPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create output file: %w", err)
|
||||||
|
}
|
||||||
|
defer outFile.Close()
|
||||||
|
|
||||||
|
h := sha256.New()
|
||||||
|
writer := io.MultiWriter(outFile, h)
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
for i, ref := range manifest.Chunks {
|
||||||
|
chunk, err := store.Get(ref.Hash)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read chunk %d (%s): %w", i, ref.Hash[:8], err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := writer.Write(chunk.Data); err != nil {
|
||||||
|
return fmt.Errorf("failed to write chunk %d: %w", i, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i+1)%1000 == 0 {
|
||||||
|
fmt.Printf("\r Restored %d/%d chunks...", i+1, manifest.ChunkCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Since(startTime)
|
||||||
|
restoredHash := hex.EncodeToString(h.Sum(nil))
|
||||||
|
|
||||||
|
fmt.Printf("\r \r")
|
||||||
|
fmt.Printf("\nRestore complete!\n")
|
||||||
|
fmt.Printf(" Output: %s\n", outputPath)
|
||||||
|
fmt.Printf(" Duration: %s\n", duration.Round(time.Millisecond))
|
||||||
|
|
||||||
|
// Verify hash
|
||||||
|
if manifest.SHA256 != "" {
|
||||||
|
if restoredHash == manifest.SHA256 {
|
||||||
|
fmt.Printf(" Verification: [OK] SHA-256 matches\n")
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" Verification: [FAIL] SHA-256 MISMATCH!\n")
|
||||||
|
fmt.Printf(" Expected: %s\n", manifest.SHA256)
|
||||||
|
fmt.Printf(" Got: %s\n", restoredHash)
|
||||||
|
return fmt.Errorf("integrity verification failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupList(cmd *cobra.Command, args []string) error {
|
||||||
|
basePath := getDedupDir()
|
||||||
|
|
||||||
|
manifestStore, err := dedup.NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifests, err := manifestStore.ListAll()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list manifests: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(manifests) == 0 {
|
||||||
|
fmt.Println("No deduplicated backups found.")
|
||||||
|
fmt.Printf("Store: %s\n", basePath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Deduplicated Backups (%s)\n\n", basePath)
|
||||||
|
fmt.Printf("%-30s %-12s %-10s %-10s %s\n", "ID", "SIZE", "DEDUP", "CHUNKS", "CREATED")
|
||||||
|
fmt.Println(strings.Repeat("-", 80))
|
||||||
|
|
||||||
|
for _, m := range manifests {
|
||||||
|
fmt.Printf("%-30s %-12s %-10.1f%% %-10d %s\n",
|
||||||
|
truncateStr(m.ID, 30),
|
||||||
|
formatBytes(m.OriginalSize),
|
||||||
|
m.DedupRatio*100,
|
||||||
|
m.ChunkCount,
|
||||||
|
m.CreatedAt.Format("2006-01-02 15:04"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupStats(cmd *cobra.Command, args []string) error {
|
||||||
|
basePath := getDedupDir()
|
||||||
|
|
||||||
|
index, err := dedup.NewChunkIndex(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer index.Close()
|
||||||
|
|
||||||
|
stats, err := index.Stats()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get stats: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := dedup.NewChunkStore(dedup.StoreConfig{BasePath: basePath})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
storeStats, err := store.Stats()
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Failed to get store stats", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Deduplication Statistics\n")
|
||||||
|
fmt.Printf("========================\n\n")
|
||||||
|
fmt.Printf("Store: %s\n", basePath)
|
||||||
|
fmt.Printf("Manifests: %d\n", stats.TotalManifests)
|
||||||
|
fmt.Printf("Unique chunks: %d\n", stats.TotalChunks)
|
||||||
|
fmt.Printf("Total raw size: %s\n", formatBytes(stats.TotalSizeRaw))
|
||||||
|
fmt.Printf("Stored size: %s\n", formatBytes(stats.TotalSizeStored))
|
||||||
|
fmt.Printf("Dedup ratio: %.1f%%\n", stats.DedupRatio*100)
|
||||||
|
fmt.Printf("Space saved: %s\n", formatBytes(stats.TotalSizeRaw-stats.TotalSizeStored))
|
||||||
|
|
||||||
|
if storeStats != nil {
|
||||||
|
fmt.Printf("Disk usage: %s\n", formatBytes(storeStats.TotalSize))
|
||||||
|
fmt.Printf("Directories: %d\n", storeStats.Directories)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupGC(cmd *cobra.Command, args []string) error {
|
||||||
|
basePath := getDedupDir()
|
||||||
|
|
||||||
|
index, err := dedup.NewChunkIndex(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer index.Close()
|
||||||
|
|
||||||
|
store, err := dedup.NewChunkStore(dedup.StoreConfig{
|
||||||
|
BasePath: basePath,
|
||||||
|
Compress: dedupCompress,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find orphaned chunks
|
||||||
|
orphans, err := index.ListOrphanedChunks()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to find orphaned chunks: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(orphans) == 0 {
|
||||||
|
fmt.Println("No orphaned chunks to clean up.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found %d orphaned chunks\n", len(orphans))
|
||||||
|
|
||||||
|
var freed int64
|
||||||
|
for _, hash := range orphans {
|
||||||
|
if meta, _ := index.GetChunk(hash); meta != nil {
|
||||||
|
freed += meta.SizeStored
|
||||||
|
}
|
||||||
|
if err := store.Delete(hash); err != nil {
|
||||||
|
log.Warn("Failed to delete chunk", "hash", hash[:8], "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := index.RemoveChunk(hash); err != nil {
|
||||||
|
log.Warn("Failed to remove chunk from index", "hash", hash[:8], "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Deleted %d chunks, freed %s\n", len(orphans), formatBytes(freed))
|
||||||
|
|
||||||
|
// Vacuum the index
|
||||||
|
if err := index.Vacuum(); err != nil {
|
||||||
|
log.Warn("Failed to vacuum index", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupDelete(cmd *cobra.Command, args []string) error {
|
||||||
|
manifestID := args[0]
|
||||||
|
basePath := getDedupDir()
|
||||||
|
|
||||||
|
manifestStore, err := dedup.NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
index, err := dedup.NewChunkIndex(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer index.Close()
|
||||||
|
|
||||||
|
// Load manifest to decrement chunk refs
|
||||||
|
manifest, err := manifestStore.Load(manifestID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrement reference counts
|
||||||
|
for _, ref := range manifest.Chunks {
|
||||||
|
index.DecrementRef(ref.Hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete manifest
|
||||||
|
if err := manifestStore.Delete(manifestID); err != nil {
|
||||||
|
return fmt.Errorf("failed to delete manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := index.RemoveManifest(manifestID); err != nil {
|
||||||
|
log.Warn("Failed to remove manifest from index", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Deleted backup: %s\n", manifestID)
|
||||||
|
fmt.Println("Run 'dbbackup dedup gc' to reclaim space from unreferenced chunks.")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper functions
|
||||||
|
func formatBytes(b int64) string {
|
||||||
|
const unit = 1024
|
||||||
|
if b < unit {
|
||||||
|
return fmt.Sprintf("%d B", b)
|
||||||
|
}
|
||||||
|
div, exp := int64(unit), 0
|
||||||
|
for n := b / unit; n >= unit; n /= unit {
|
||||||
|
div *= unit
|
||||||
|
exp++
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "KMGTPE"[exp])
|
||||||
|
}
|
||||||
|
|
||||||
|
func truncateStr(s string, max int) string {
|
||||||
|
if len(s) <= max {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return s[:max-3] + "..."
|
||||||
|
}
|
||||||
58
cmd/drill.go
58
cmd/drill.go
@@ -318,7 +318,7 @@ func runDrillList(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("%-15s %-40s %-20s %s\n", "ID", "NAME", "IMAGE", "STATUS")
|
fmt.Printf("%-15s %-40s %-20s %s\n", "ID", "NAME", "IMAGE", "STATUS")
|
||||||
fmt.Println(strings.Repeat("─", 100))
|
fmt.Println(strings.Repeat("-", 100))
|
||||||
|
|
||||||
for _, c := range containers {
|
for _, c := range containers {
|
||||||
fmt.Printf("%-15s %-40s %-20s %s\n",
|
fmt.Printf("%-15s %-40s %-20s %s\n",
|
||||||
@@ -345,7 +345,7 @@ func runDrillCleanup(cmd *cobra.Command, args []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("✅ Cleanup completed")
|
fmt.Println("[OK] Cleanup completed")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -369,32 +369,32 @@ func runDrillReport(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
func printDrillResult(result *drill.DrillResult) {
|
func printDrillResult(result *drill.DrillResult) {
|
||||||
fmt.Printf("\n")
|
fmt.Printf("\n")
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
fmt.Printf(" DR Drill Report: %s\n", result.DrillID)
|
fmt.Printf(" DR Drill Report: %s\n", result.DrillID)
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\n")
|
fmt.Printf("=====================================================\n\n")
|
||||||
|
|
||||||
status := "✅ PASSED"
|
status := "[OK] PASSED"
|
||||||
if !result.Success {
|
if !result.Success {
|
||||||
status = "❌ FAILED"
|
status = "[FAIL] FAILED"
|
||||||
} else if result.Status == drill.StatusPartial {
|
} else if result.Status == drill.StatusPartial {
|
||||||
status = "⚠️ PARTIAL"
|
status = "[WARN] PARTIAL"
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("📋 Status: %s\n", status)
|
fmt.Printf("[LOG] Status: %s\n", status)
|
||||||
fmt.Printf("💾 Backup: %s\n", filepath.Base(result.BackupPath))
|
fmt.Printf("[SAVE] Backup: %s\n", filepath.Base(result.BackupPath))
|
||||||
fmt.Printf("🗄️ Database: %s (%s)\n", result.DatabaseName, result.DatabaseType)
|
fmt.Printf("[DB] Database: %s (%s)\n", result.DatabaseName, result.DatabaseType)
|
||||||
fmt.Printf("⏱️ Duration: %.2fs\n", result.Duration)
|
fmt.Printf("[TIME] Duration: %.2fs\n", result.Duration)
|
||||||
fmt.Printf("📅 Started: %s\n", result.StartTime.Format(time.RFC3339))
|
fmt.Printf("📅 Started: %s\n", result.StartTime.Format(time.RFC3339))
|
||||||
fmt.Printf("\n")
|
fmt.Printf("\n")
|
||||||
|
|
||||||
// Phases
|
// Phases
|
||||||
fmt.Printf("📊 Phases:\n")
|
fmt.Printf("[STATS] Phases:\n")
|
||||||
for _, phase := range result.Phases {
|
for _, phase := range result.Phases {
|
||||||
icon := "✅"
|
icon := "[OK]"
|
||||||
if phase.Status == "failed" {
|
if phase.Status == "failed" {
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
} else if phase.Status == "running" {
|
} else if phase.Status == "running" {
|
||||||
icon = "🔄"
|
icon = "[SYNC]"
|
||||||
}
|
}
|
||||||
fmt.Printf(" %s %-20s (%.2fs) %s\n", icon, phase.Name, phase.Duration, phase.Message)
|
fmt.Printf(" %s %-20s (%.2fs) %s\n", icon, phase.Name, phase.Duration, phase.Message)
|
||||||
}
|
}
|
||||||
@@ -412,10 +412,10 @@ func printDrillResult(result *drill.DrillResult) {
|
|||||||
fmt.Printf("\n")
|
fmt.Printf("\n")
|
||||||
|
|
||||||
// RTO
|
// RTO
|
||||||
fmt.Printf("⏱️ RTO Analysis:\n")
|
fmt.Printf("[TIME] RTO Analysis:\n")
|
||||||
rtoIcon := "✅"
|
rtoIcon := "[OK]"
|
||||||
if !result.RTOMet {
|
if !result.RTOMet {
|
||||||
rtoIcon = "❌"
|
rtoIcon = "[FAIL]"
|
||||||
}
|
}
|
||||||
fmt.Printf(" Actual RTO: %.2fs\n", result.ActualRTO)
|
fmt.Printf(" Actual RTO: %.2fs\n", result.ActualRTO)
|
||||||
fmt.Printf(" Target RTO: %.0fs\n", result.TargetRTO)
|
fmt.Printf(" Target RTO: %.0fs\n", result.TargetRTO)
|
||||||
@@ -424,11 +424,11 @@ func printDrillResult(result *drill.DrillResult) {
|
|||||||
|
|
||||||
// Validation results
|
// Validation results
|
||||||
if len(result.ValidationResults) > 0 {
|
if len(result.ValidationResults) > 0 {
|
||||||
fmt.Printf("🔍 Validation Queries:\n")
|
fmt.Printf("[SEARCH] Validation Queries:\n")
|
||||||
for _, vr := range result.ValidationResults {
|
for _, vr := range result.ValidationResults {
|
||||||
icon := "✅"
|
icon := "[OK]"
|
||||||
if !vr.Success {
|
if !vr.Success {
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
}
|
}
|
||||||
fmt.Printf(" %s %s: %s\n", icon, vr.Name, vr.Result)
|
fmt.Printf(" %s %s: %s\n", icon, vr.Name, vr.Result)
|
||||||
if vr.Error != "" {
|
if vr.Error != "" {
|
||||||
@@ -440,11 +440,11 @@ func printDrillResult(result *drill.DrillResult) {
|
|||||||
|
|
||||||
// Check results
|
// Check results
|
||||||
if len(result.CheckResults) > 0 {
|
if len(result.CheckResults) > 0 {
|
||||||
fmt.Printf("✓ Checks:\n")
|
fmt.Printf("[OK] Checks:\n")
|
||||||
for _, cr := range result.CheckResults {
|
for _, cr := range result.CheckResults {
|
||||||
icon := "✅"
|
icon := "[OK]"
|
||||||
if !cr.Success {
|
if !cr.Success {
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
}
|
}
|
||||||
fmt.Printf(" %s %s\n", icon, cr.Message)
|
fmt.Printf(" %s %s\n", icon, cr.Message)
|
||||||
}
|
}
|
||||||
@@ -453,7 +453,7 @@ func printDrillResult(result *drill.DrillResult) {
|
|||||||
|
|
||||||
// Errors and warnings
|
// Errors and warnings
|
||||||
if len(result.Errors) > 0 {
|
if len(result.Errors) > 0 {
|
||||||
fmt.Printf("❌ Errors:\n")
|
fmt.Printf("[FAIL] Errors:\n")
|
||||||
for _, e := range result.Errors {
|
for _, e := range result.Errors {
|
||||||
fmt.Printf(" • %s\n", e)
|
fmt.Printf(" • %s\n", e)
|
||||||
}
|
}
|
||||||
@@ -461,7 +461,7 @@ func printDrillResult(result *drill.DrillResult) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(result.Warnings) > 0 {
|
if len(result.Warnings) > 0 {
|
||||||
fmt.Printf("⚠️ Warnings:\n")
|
fmt.Printf("[WARN] Warnings:\n")
|
||||||
for _, w := range result.Warnings {
|
for _, w := range result.Warnings {
|
||||||
fmt.Printf(" • %s\n", w)
|
fmt.Printf(" • %s\n", w)
|
||||||
}
|
}
|
||||||
@@ -470,14 +470,14 @@ func printDrillResult(result *drill.DrillResult) {
|
|||||||
|
|
||||||
// Container info
|
// Container info
|
||||||
if result.ContainerKept {
|
if result.ContainerKept {
|
||||||
fmt.Printf("📦 Container kept: %s\n", result.ContainerID[:12])
|
fmt.Printf("[PKG] Container kept: %s\n", result.ContainerID[:12])
|
||||||
fmt.Printf(" Connect with: docker exec -it %s bash\n", result.ContainerID[:12])
|
fmt.Printf(" Connect with: docker exec -it %s bash\n", result.ContainerID[:12])
|
||||||
fmt.Printf("\n")
|
fmt.Printf("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
fmt.Printf(" %s\n", result.Message)
|
fmt.Printf(" %s\n", result.Message)
|
||||||
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
|
fmt.Printf("=====================================================\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateCatalogWithDrillResult(ctx context.Context, backupPath string, result *drill.DrillResult) {
|
func updateCatalogWithDrillResult(ctx context.Context, backupPath string, result *drill.DrillResult) {
|
||||||
|
|||||||
@@ -63,9 +63,9 @@ func runEngineList(cmd *cobra.Command, args []string) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
status := "✓ Available"
|
status := "[Y] Available"
|
||||||
if !avail.Available {
|
if !avail.Available {
|
||||||
status = "✗ Not available"
|
status = "[N] Not available"
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n%s (%s)\n", info.Name, info.Description)
|
fmt.Printf("\n%s (%s)\n", info.Name, info.Description)
|
||||||
|
|||||||
@@ -176,12 +176,12 @@ func runInstallStatus(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("📦 DBBackup Installation Status")
|
fmt.Println("[STATUS] DBBackup Installation Status")
|
||||||
fmt.Println(strings.Repeat("═", 50))
|
fmt.Println(strings.Repeat("=", 50))
|
||||||
|
|
||||||
if clusterStatus.Installed {
|
if clusterStatus.Installed {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("🔹 Cluster Backup:")
|
fmt.Println(" * Cluster Backup:")
|
||||||
fmt.Printf(" Service: %s\n", formatStatus(clusterStatus.Installed, clusterStatus.Active))
|
fmt.Printf(" Service: %s\n", formatStatus(clusterStatus.Installed, clusterStatus.Active))
|
||||||
fmt.Printf(" Timer: %s\n", formatStatus(clusterStatus.TimerEnabled, clusterStatus.TimerActive))
|
fmt.Printf(" Timer: %s\n", formatStatus(clusterStatus.TimerEnabled, clusterStatus.TimerActive))
|
||||||
if clusterStatus.NextRun != "" {
|
if clusterStatus.NextRun != "" {
|
||||||
@@ -192,7 +192,7 @@ func runInstallStatus(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("❌ No systemd services installed")
|
fmt.Println("[NONE] No systemd services installed")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("Run 'sudo dbbackup install' to install as a systemd service")
|
fmt.Println("Run 'sudo dbbackup install' to install as a systemd service")
|
||||||
}
|
}
|
||||||
@@ -200,13 +200,13 @@ func runInstallStatus(ctx context.Context) error {
|
|||||||
// Check for exporter
|
// Check for exporter
|
||||||
if _, err := os.Stat("/etc/systemd/system/dbbackup-exporter.service"); err == nil {
|
if _, err := os.Stat("/etc/systemd/system/dbbackup-exporter.service"); err == nil {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("🔹 Metrics Exporter:")
|
fmt.Println(" * Metrics Exporter:")
|
||||||
// Check if exporter is active using systemctl
|
// Check if exporter is active using systemctl
|
||||||
cmd := exec.CommandContext(ctx, "systemctl", "is-active", "dbbackup-exporter")
|
cmd := exec.CommandContext(ctx, "systemctl", "is-active", "dbbackup-exporter")
|
||||||
if err := cmd.Run(); err == nil {
|
if err := cmd.Run(); err == nil {
|
||||||
fmt.Printf(" Service: ✅ active\n")
|
fmt.Printf(" Service: [OK] active\n")
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" Service: ⚪ inactive\n")
|
fmt.Printf(" Service: [-] inactive\n")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -219,9 +219,9 @@ func formatStatus(installed, active bool) string {
|
|||||||
return "not installed"
|
return "not installed"
|
||||||
}
|
}
|
||||||
if active {
|
if active {
|
||||||
return "✅ active"
|
return "[OK] active"
|
||||||
}
|
}
|
||||||
return "⚪ inactive"
|
return "[-] inactive"
|
||||||
}
|
}
|
||||||
|
|
||||||
func expandSchedule(schedule string) string {
|
func expandSchedule(schedule string) string {
|
||||||
|
|||||||
@@ -203,9 +203,17 @@ func runMigrateCluster(cmd *cobra.Command, args []string) error {
|
|||||||
migrateTargetUser = migrateSourceUser
|
migrateTargetUser = migrateSourceUser
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create source config first to get WorkDir
|
||||||
|
sourceCfg := config.New()
|
||||||
|
sourceCfg.Host = migrateSourceHost
|
||||||
|
sourceCfg.Port = migrateSourcePort
|
||||||
|
sourceCfg.User = migrateSourceUser
|
||||||
|
sourceCfg.Password = migrateSourcePassword
|
||||||
|
|
||||||
workdir := migrateWorkdir
|
workdir := migrateWorkdir
|
||||||
if workdir == "" {
|
if workdir == "" {
|
||||||
workdir = filepath.Join(os.TempDir(), "dbbackup-migrate")
|
// Use WorkDir from config if available
|
||||||
|
workdir = filepath.Join(sourceCfg.GetEffectiveWorkDir(), "dbbackup-migrate")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create working directory
|
// Create working directory
|
||||||
@@ -213,12 +221,7 @@ func runMigrateCluster(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("failed to create working directory: %w", err)
|
return fmt.Errorf("failed to create working directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create source config
|
// Update source config with remaining settings
|
||||||
sourceCfg := config.New()
|
|
||||||
sourceCfg.Host = migrateSourceHost
|
|
||||||
sourceCfg.Port = migrateSourcePort
|
|
||||||
sourceCfg.User = migrateSourceUser
|
|
||||||
sourceCfg.Password = migrateSourcePassword
|
|
||||||
sourceCfg.SSLMode = migrateSourceSSLMode
|
sourceCfg.SSLMode = migrateSourceSSLMode
|
||||||
sourceCfg.Database = "postgres" // Default connection database
|
sourceCfg.Database = "postgres" // Default connection database
|
||||||
sourceCfg.DatabaseType = cfg.DatabaseType
|
sourceCfg.DatabaseType = cfg.DatabaseType
|
||||||
@@ -342,7 +345,8 @@ func runMigrateSingle(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
workdir := migrateWorkdir
|
workdir := migrateWorkdir
|
||||||
if workdir == "" {
|
if workdir == "" {
|
||||||
workdir = filepath.Join(os.TempDir(), "dbbackup-migrate")
|
tempCfg := config.New()
|
||||||
|
workdir = filepath.Join(tempCfg.GetEffectiveWorkDir(), "dbbackup-migrate")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create working directory
|
// Create working directory
|
||||||
|
|||||||
76
cmd/pitr.go
76
cmd/pitr.go
@@ -436,7 +436,7 @@ func runPITREnable(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("failed to enable PITR: %w", err)
|
return fmt.Errorf("failed to enable PITR: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("✅ PITR enabled successfully!")
|
log.Info("[OK] PITR enabled successfully!")
|
||||||
log.Info("")
|
log.Info("")
|
||||||
log.Info("Next steps:")
|
log.Info("Next steps:")
|
||||||
log.Info("1. Restart PostgreSQL: sudo systemctl restart postgresql")
|
log.Info("1. Restart PostgreSQL: sudo systemctl restart postgresql")
|
||||||
@@ -463,7 +463,7 @@ func runPITRDisable(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("failed to disable PITR: %w", err)
|
return fmt.Errorf("failed to disable PITR: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("✅ PITR disabled successfully!")
|
log.Info("[OK] PITR disabled successfully!")
|
||||||
log.Info("PostgreSQL restart required: sudo systemctl restart postgresql")
|
log.Info("PostgreSQL restart required: sudo systemctl restart postgresql")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -483,15 +483,15 @@ func runPITRStatus(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Display PITR configuration
|
// Display PITR configuration
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("======================================================")
|
||||||
fmt.Println(" Point-in-Time Recovery (PITR) Status")
|
fmt.Println(" Point-in-Time Recovery (PITR) Status")
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("======================================================")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
if config.Enabled {
|
if config.Enabled {
|
||||||
fmt.Println("Status: ✅ ENABLED")
|
fmt.Println("Status: [OK] ENABLED")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("Status: ❌ DISABLED")
|
fmt.Println("Status: [FAIL] DISABLED")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("WAL Level: %s\n", config.WALLevel)
|
fmt.Printf("WAL Level: %s\n", config.WALLevel)
|
||||||
@@ -510,7 +510,7 @@ func runPITRStatus(cmd *cobra.Command, args []string) error {
|
|||||||
// Extract archive dir from command (simple parsing)
|
// Extract archive dir from command (simple parsing)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("WAL Archive Statistics:")
|
fmt.Println("WAL Archive Statistics:")
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("======================================================")
|
||||||
// TODO: Parse archive dir and show stats
|
// TODO: Parse archive dir and show stats
|
||||||
fmt.Println(" (Use 'dbbackup wal list --archive-dir <dir>' to view archives)")
|
fmt.Println(" (Use 'dbbackup wal list --archive-dir <dir>' to view archives)")
|
||||||
}
|
}
|
||||||
@@ -574,13 +574,13 @@ func runWALList(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Display archives
|
// Display archives
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("======================================================")
|
||||||
fmt.Printf(" WAL Archives (%d files)\n", len(archives))
|
fmt.Printf(" WAL Archives (%d files)\n", len(archives))
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("======================================================")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
fmt.Printf("%-28s %10s %10s %8s %s\n", "WAL Filename", "Timeline", "Segment", "Size", "Archived At")
|
fmt.Printf("%-28s %10s %10s %8s %s\n", "WAL Filename", "Timeline", "Segment", "Size", "Archived At")
|
||||||
fmt.Println("────────────────────────────────────────────────────────────────────────────────")
|
fmt.Println("--------------------------------------------------------------------------------")
|
||||||
|
|
||||||
for _, archive := range archives {
|
for _, archive := range archives {
|
||||||
size := formatWALSize(archive.ArchivedSize)
|
size := formatWALSize(archive.ArchivedSize)
|
||||||
@@ -644,7 +644,7 @@ func runWALCleanup(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("WAL cleanup failed: %w", err)
|
return fmt.Errorf("WAL cleanup failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("✅ WAL cleanup completed", "deleted", deleted, "retention_days", archiveConfig.RetentionDays)
|
log.Info("[OK] WAL cleanup completed", "deleted", deleted, "retention_days", archiveConfig.RetentionDays)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -671,7 +671,7 @@ func runWALTimeline(cmd *cobra.Command, args []string) error {
|
|||||||
// Display timeline details
|
// Display timeline details
|
||||||
if len(history.Timelines) > 0 {
|
if len(history.Timelines) > 0 {
|
||||||
fmt.Println("\nTimeline Details:")
|
fmt.Println("\nTimeline Details:")
|
||||||
fmt.Println("═════════════════")
|
fmt.Println("=================")
|
||||||
for _, tl := range history.Timelines {
|
for _, tl := range history.Timelines {
|
||||||
fmt.Printf("\nTimeline %d:\n", tl.TimelineID)
|
fmt.Printf("\nTimeline %d:\n", tl.TimelineID)
|
||||||
if tl.ParentTimeline > 0 {
|
if tl.ParentTimeline > 0 {
|
||||||
@@ -690,7 +690,7 @@ func runWALTimeline(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Printf(" Created: %s\n", tl.CreatedAt.Format("2006-01-02 15:04:05"))
|
fmt.Printf(" Created: %s\n", tl.CreatedAt.Format("2006-01-02 15:04:05"))
|
||||||
}
|
}
|
||||||
if tl.TimelineID == history.CurrentTimeline {
|
if tl.TimelineID == history.CurrentTimeline {
|
||||||
fmt.Printf(" Status: ⚡ CURRENT\n")
|
fmt.Printf(" Status: [CURR] CURRENT\n")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -759,15 +759,15 @@ func runBinlogList(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Printf(" Binary Log Files (%s)\n", bm.ServerType())
|
fmt.Printf(" Binary Log Files (%s)\n", bm.ServerType())
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
if len(binlogs) > 0 {
|
if len(binlogs) > 0 {
|
||||||
fmt.Println("Source Directory:")
|
fmt.Println("Source Directory:")
|
||||||
fmt.Printf("%-24s %10s %-19s %-19s %s\n", "Filename", "Size", "Start Time", "End Time", "Format")
|
fmt.Printf("%-24s %10s %-19s %-19s %s\n", "Filename", "Size", "Start Time", "End Time", "Format")
|
||||||
fmt.Println("────────────────────────────────────────────────────────────────────────────────")
|
fmt.Println("--------------------------------------------------------------------------------")
|
||||||
|
|
||||||
var totalSize int64
|
var totalSize int64
|
||||||
for _, b := range binlogs {
|
for _, b := range binlogs {
|
||||||
@@ -797,7 +797,7 @@ func runBinlogList(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("Archived Binlogs:")
|
fmt.Println("Archived Binlogs:")
|
||||||
fmt.Printf("%-24s %10s %-19s %s\n", "Original", "Size", "Archived At", "Flags")
|
fmt.Printf("%-24s %10s %-19s %s\n", "Original", "Size", "Archived At", "Flags")
|
||||||
fmt.Println("────────────────────────────────────────────────────────────────────────────────")
|
fmt.Println("--------------------------------------------------------------------------------")
|
||||||
|
|
||||||
var totalSize int64
|
var totalSize int64
|
||||||
for _, a := range archived {
|
for _, a := range archived {
|
||||||
@@ -914,7 +914,7 @@ func runBinlogArchive(cmd *cobra.Command, args []string) error {
|
|||||||
bm.SaveArchiveMetadata(allArchived)
|
bm.SaveArchiveMetadata(allArchived)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("✅ Binlog archiving completed", "archived", len(newArchives))
|
log.Info("[OK] Binlog archiving completed", "archived", len(newArchives))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1014,15 +1014,15 @@ func runBinlogValidate(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("validating binlog chain: %w", err)
|
return fmt.Errorf("validating binlog chain: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Println(" Binlog Chain Validation")
|
fmt.Println(" Binlog Chain Validation")
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
if validation.Valid {
|
if validation.Valid {
|
||||||
fmt.Println("Status: ✅ VALID - Binlog chain is complete")
|
fmt.Println("Status: [OK] VALID - Binlog chain is complete")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("Status: ❌ INVALID - Binlog chain has gaps")
|
fmt.Println("Status: [FAIL] INVALID - Binlog chain has gaps")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Files: %d binlog files\n", validation.LogCount)
|
fmt.Printf("Files: %d binlog files\n", validation.LogCount)
|
||||||
@@ -1055,7 +1055,7 @@ func runBinlogValidate(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("Errors:")
|
fmt.Println("Errors:")
|
||||||
for _, e := range validation.Errors {
|
for _, e := range validation.Errors {
|
||||||
fmt.Printf(" ✗ %s\n", e)
|
fmt.Printf(" [FAIL] %s\n", e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1094,9 +1094,9 @@ func runBinlogPosition(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Println(" Current Binary Log Position")
|
fmt.Println(" Current Binary Log Position")
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
if rows.Next() {
|
if rows.Next() {
|
||||||
@@ -1178,24 +1178,24 @@ func runMySQLPITRStatus(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("getting PITR status: %w", err)
|
return fmt.Errorf("getting PITR status: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Printf(" MySQL/MariaDB PITR Status (%s)\n", status.DatabaseType)
|
fmt.Printf(" MySQL/MariaDB PITR Status (%s)\n", status.DatabaseType)
|
||||||
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
fmt.Println("=============================================================")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
if status.Enabled {
|
if status.Enabled {
|
||||||
fmt.Println("PITR Status: ✅ ENABLED")
|
fmt.Println("PITR Status: [OK] ENABLED")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("PITR Status: ❌ NOT CONFIGURED")
|
fmt.Println("PITR Status: [FAIL] NOT CONFIGURED")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get binary logging status
|
// Get binary logging status
|
||||||
var logBin string
|
var logBin string
|
||||||
db.QueryRowContext(ctx, "SELECT @@log_bin").Scan(&logBin)
|
db.QueryRowContext(ctx, "SELECT @@log_bin").Scan(&logBin)
|
||||||
if logBin == "1" || logBin == "ON" {
|
if logBin == "1" || logBin == "ON" {
|
||||||
fmt.Println("Binary Logging: ✅ ENABLED")
|
fmt.Println("Binary Logging: [OK] ENABLED")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("Binary Logging: ❌ DISABLED")
|
fmt.Println("Binary Logging: [FAIL] DISABLED")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Binlog Format: %s\n", status.LogLevel)
|
fmt.Printf("Binlog Format: %s\n", status.LogLevel)
|
||||||
@@ -1205,14 +1205,14 @@ func runMySQLPITRStatus(cmd *cobra.Command, args []string) error {
|
|||||||
if status.DatabaseType == pitr.DatabaseMariaDB {
|
if status.DatabaseType == pitr.DatabaseMariaDB {
|
||||||
db.QueryRowContext(ctx, "SELECT @@gtid_current_pos").Scan(>idMode)
|
db.QueryRowContext(ctx, "SELECT @@gtid_current_pos").Scan(>idMode)
|
||||||
if gtidMode != "" {
|
if gtidMode != "" {
|
||||||
fmt.Println("GTID Mode: ✅ ENABLED")
|
fmt.Println("GTID Mode: [OK] ENABLED")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("GTID Mode: ❌ DISABLED")
|
fmt.Println("GTID Mode: [FAIL] DISABLED")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
db.QueryRowContext(ctx, "SELECT @@gtid_mode").Scan(>idMode)
|
db.QueryRowContext(ctx, "SELECT @@gtid_mode").Scan(>idMode)
|
||||||
if gtidMode == "ON" {
|
if gtidMode == "ON" {
|
||||||
fmt.Println("GTID Mode: ✅ ENABLED")
|
fmt.Println("GTID Mode: [OK] ENABLED")
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("GTID Mode: %s\n", gtidMode)
|
fmt.Printf("GTID Mode: %s\n", gtidMode)
|
||||||
}
|
}
|
||||||
@@ -1237,12 +1237,12 @@ func runMySQLPITRStatus(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("PITR Requirements:")
|
fmt.Println("PITR Requirements:")
|
||||||
if logBin == "1" || logBin == "ON" {
|
if logBin == "1" || logBin == "ON" {
|
||||||
fmt.Println(" ✅ Binary logging enabled")
|
fmt.Println(" [OK] Binary logging enabled")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println(" ❌ Binary logging must be enabled (log_bin = mysql-bin)")
|
fmt.Println(" [FAIL] Binary logging must be enabled (log_bin = mysql-bin)")
|
||||||
}
|
}
|
||||||
if status.LogLevel == "ROW" {
|
if status.LogLevel == "ROW" {
|
||||||
fmt.Println(" ✅ Row-based logging (recommended)")
|
fmt.Println(" [OK] Row-based logging (recommended)")
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" ⚠ binlog_format = %s (ROW recommended for PITR)\n", status.LogLevel)
|
fmt.Printf(" ⚠ binlog_format = %s (ROW recommended for PITR)\n", status.LogLevel)
|
||||||
}
|
}
|
||||||
@@ -1299,7 +1299,7 @@ func runMySQLPITREnable(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("enabling PITR: %w", err)
|
return fmt.Errorf("enabling PITR: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("✅ MySQL PITR enabled successfully!")
|
log.Info("[OK] MySQL PITR enabled successfully!")
|
||||||
log.Info("")
|
log.Info("")
|
||||||
log.Info("Next steps:")
|
log.Info("Next steps:")
|
||||||
log.Info("1. Start binlog archiving: dbbackup binlog watch --archive-dir " + mysqlArchiveDir)
|
log.Info("1. Start binlog archiving: dbbackup binlog watch --archive-dir " + mysqlArchiveDir)
|
||||||
|
|||||||
@@ -141,7 +141,7 @@ func runList(ctx context.Context) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("📦 %s\n", file.Name)
|
fmt.Printf("[FILE] %s\n", file.Name)
|
||||||
fmt.Printf(" Size: %s\n", formatFileSize(stat.Size()))
|
fmt.Printf(" Size: %s\n", formatFileSize(stat.Size()))
|
||||||
fmt.Printf(" Modified: %s\n", stat.ModTime().Format("2006-01-02 15:04:05"))
|
fmt.Printf(" Modified: %s\n", stat.ModTime().Format("2006-01-02 15:04:05"))
|
||||||
fmt.Printf(" Type: %s\n", getBackupType(file.Name))
|
fmt.Printf(" Type: %s\n", getBackupType(file.Name))
|
||||||
@@ -237,56 +237,56 @@ func runPreflight(ctx context.Context) error {
|
|||||||
totalChecks := 6
|
totalChecks := 6
|
||||||
|
|
||||||
// 1. Database connectivity check
|
// 1. Database connectivity check
|
||||||
fmt.Print("🔗 Database connectivity... ")
|
fmt.Print("[1] Database connectivity... ")
|
||||||
if err := testDatabaseConnection(); err != nil {
|
if err := testDatabaseConnection(); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Required tools check
|
// 2. Required tools check
|
||||||
fmt.Print("🛠️ Required tools (pg_dump/pg_restore)... ")
|
fmt.Print("[2] Required tools (pg_dump/pg_restore)... ")
|
||||||
if err := checkRequiredTools(); err != nil {
|
if err := checkRequiredTools(); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. Backup directory check
|
// 3. Backup directory check
|
||||||
fmt.Print("📁 Backup directory access... ")
|
fmt.Print("[3] Backup directory access... ")
|
||||||
if err := checkBackupDirectory(); err != nil {
|
if err := checkBackupDirectory(); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. Disk space check
|
// 4. Disk space check
|
||||||
fmt.Print("💾 Available disk space... ")
|
fmt.Print("[4] Available disk space... ")
|
||||||
if err := checkDiskSpace(); err != nil {
|
if err := checkDiskSpace(); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 5. Permissions check
|
// 5. Permissions check
|
||||||
fmt.Print("🔐 File permissions... ")
|
fmt.Print("[5] File permissions... ")
|
||||||
if err := checkPermissions(); err != nil {
|
if err := checkPermissions(); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6. CPU/Memory resources check
|
// 6. CPU/Memory resources check
|
||||||
fmt.Print("🖥️ System resources... ")
|
fmt.Print("[6] System resources... ")
|
||||||
if err := checkSystemResources(); err != nil {
|
if err := checkSystemResources(); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -294,10 +294,10 @@ func runPreflight(ctx context.Context) error {
|
|||||||
fmt.Printf("Results: %d/%d checks passed\n", checksPassed, totalChecks)
|
fmt.Printf("Results: %d/%d checks passed\n", checksPassed, totalChecks)
|
||||||
|
|
||||||
if checksPassed == totalChecks {
|
if checksPassed == totalChecks {
|
||||||
fmt.Println("🎉 All preflight checks passed! System is ready for backup operations.")
|
fmt.Println("[SUCCESS] All preflight checks passed! System is ready for backup operations.")
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("⚠️ %d check(s) failed. Please address the issues before running backups.\n", totalChecks-checksPassed)
|
fmt.Printf("[WARN] %d check(s) failed. Please address the issues before running backups.\n", totalChecks-checksPassed)
|
||||||
return fmt.Errorf("preflight checks failed: %d/%d passed", checksPassed, totalChecks)
|
return fmt.Errorf("preflight checks failed: %d/%d passed", checksPassed, totalChecks)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -414,44 +414,44 @@ func runRestore(ctx context.Context, archiveName string) error {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
// Show warning
|
// Show warning
|
||||||
fmt.Println("⚠️ WARNING: This will restore data to the target database.")
|
fmt.Println("[WARN] WARNING: This will restore data to the target database.")
|
||||||
fmt.Println(" Existing data may be overwritten or merged depending on the restore method.")
|
fmt.Println(" Existing data may be overwritten or merged depending on the restore method.")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
// For safety, show what would be done without actually doing it
|
// For safety, show what would be done without actually doing it
|
||||||
switch archiveType {
|
switch archiveType {
|
||||||
case "Single Database (.dump)":
|
case "Single Database (.dump)":
|
||||||
fmt.Println("🔄 Would execute: pg_restore to restore single database")
|
fmt.Println("[EXEC] Would execute: pg_restore to restore single database")
|
||||||
fmt.Printf(" Command: pg_restore -h %s -p %d -U %s -d %s --verbose %s\n",
|
fmt.Printf(" Command: pg_restore -h %s -p %d -U %s -d %s --verbose %s\n",
|
||||||
cfg.Host, cfg.Port, cfg.User, cfg.Database, archivePath)
|
cfg.Host, cfg.Port, cfg.User, cfg.Database, archivePath)
|
||||||
case "Single Database (.dump.gz)":
|
case "Single Database (.dump.gz)":
|
||||||
fmt.Println("🔄 Would execute: gunzip and pg_restore to restore single database")
|
fmt.Println("[EXEC] Would execute: gunzip and pg_restore to restore single database")
|
||||||
fmt.Printf(" Command: gunzip -c %s | pg_restore -h %s -p %d -U %s -d %s --verbose\n",
|
fmt.Printf(" Command: gunzip -c %s | pg_restore -h %s -p %d -U %s -d %s --verbose\n",
|
||||||
archivePath, cfg.Host, cfg.Port, cfg.User, cfg.Database)
|
archivePath, cfg.Host, cfg.Port, cfg.User, cfg.Database)
|
||||||
case "SQL Script (.sql)":
|
case "SQL Script (.sql)":
|
||||||
if cfg.IsPostgreSQL() {
|
if cfg.IsPostgreSQL() {
|
||||||
fmt.Println("🔄 Would execute: psql to run SQL script")
|
fmt.Println("[EXEC] Would execute: psql to run SQL script")
|
||||||
fmt.Printf(" Command: psql -h %s -p %d -U %s -d %s -f %s\n",
|
fmt.Printf(" Command: psql -h %s -p %d -U %s -d %s -f %s\n",
|
||||||
cfg.Host, cfg.Port, cfg.User, cfg.Database, archivePath)
|
cfg.Host, cfg.Port, cfg.User, cfg.Database, archivePath)
|
||||||
} else if cfg.IsMySQL() {
|
} else if cfg.IsMySQL() {
|
||||||
fmt.Println("🔄 Would execute: mysql to run SQL script")
|
fmt.Println("[EXEC] Would execute: mysql to run SQL script")
|
||||||
fmt.Printf(" Command: %s\n", mysqlRestoreCommand(archivePath, false))
|
fmt.Printf(" Command: %s\n", mysqlRestoreCommand(archivePath, false))
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("🔄 Would execute: SQL client to run script (database type unknown)")
|
fmt.Println("[EXEC] Would execute: SQL client to run script (database type unknown)")
|
||||||
}
|
}
|
||||||
case "SQL Script (.sql.gz)":
|
case "SQL Script (.sql.gz)":
|
||||||
if cfg.IsPostgreSQL() {
|
if cfg.IsPostgreSQL() {
|
||||||
fmt.Println("🔄 Would execute: gunzip and psql to run SQL script")
|
fmt.Println("[EXEC] Would execute: gunzip and psql to run SQL script")
|
||||||
fmt.Printf(" Command: gunzip -c %s | psql -h %s -p %d -U %s -d %s\n",
|
fmt.Printf(" Command: gunzip -c %s | psql -h %s -p %d -U %s -d %s\n",
|
||||||
archivePath, cfg.Host, cfg.Port, cfg.User, cfg.Database)
|
archivePath, cfg.Host, cfg.Port, cfg.User, cfg.Database)
|
||||||
} else if cfg.IsMySQL() {
|
} else if cfg.IsMySQL() {
|
||||||
fmt.Println("🔄 Would execute: gunzip and mysql to run SQL script")
|
fmt.Println("[EXEC] Would execute: gunzip and mysql to run SQL script")
|
||||||
fmt.Printf(" Command: %s\n", mysqlRestoreCommand(archivePath, true))
|
fmt.Printf(" Command: %s\n", mysqlRestoreCommand(archivePath, true))
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("🔄 Would execute: gunzip and SQL client to run script (database type unknown)")
|
fmt.Println("[EXEC] Would execute: gunzip and SQL client to run script (database type unknown)")
|
||||||
}
|
}
|
||||||
case "Cluster Backup (.tar.gz)":
|
case "Cluster Backup (.tar.gz)":
|
||||||
fmt.Println("🔄 Would execute: Extract and restore cluster backup")
|
fmt.Println("[EXEC] Would execute: Extract and restore cluster backup")
|
||||||
fmt.Println(" Steps:")
|
fmt.Println(" Steps:")
|
||||||
fmt.Println(" 1. Extract tar.gz archive")
|
fmt.Println(" 1. Extract tar.gz archive")
|
||||||
fmt.Println(" 2. Restore global objects (roles, tablespaces)")
|
fmt.Println(" 2. Restore global objects (roles, tablespaces)")
|
||||||
@@ -461,7 +461,7 @@ func runRestore(ctx context.Context, archiveName string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("🛡️ SAFETY MODE: Restore command is in preview mode.")
|
fmt.Println("[SAFETY] SAFETY MODE: Restore command is in preview mode.")
|
||||||
fmt.Println(" This shows what would be executed without making changes.")
|
fmt.Println(" This shows what would be executed without making changes.")
|
||||||
fmt.Println(" To enable actual restore, add --confirm flag (not yet implemented).")
|
fmt.Println(" To enable actual restore, add --confirm flag (not yet implemented).")
|
||||||
|
|
||||||
@@ -520,25 +520,25 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
checksPassed := 0
|
checksPassed := 0
|
||||||
|
|
||||||
// Basic file existence and readability
|
// Basic file existence and readability
|
||||||
fmt.Print("📁 File accessibility... ")
|
fmt.Print("[CHK] File accessibility... ")
|
||||||
if file, err := os.Open(archivePath); err != nil {
|
if file, err := os.Open(archivePath); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
file.Close()
|
file.Close()
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
// File size sanity check
|
// File size sanity check
|
||||||
fmt.Print("📏 File size check... ")
|
fmt.Print("[CHK] File size check... ")
|
||||||
if stat.Size() == 0 {
|
if stat.Size() == 0 {
|
||||||
fmt.Println("❌ FAILED: File is empty")
|
fmt.Println("[FAIL] FAILED: File is empty")
|
||||||
} else if stat.Size() < 100 {
|
} else if stat.Size() < 100 {
|
||||||
fmt.Println("⚠️ WARNING: File is very small (< 100 bytes)")
|
fmt.Println("[WARN] WARNING: File is very small (< 100 bytes)")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
@@ -546,51 +546,51 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
// Type-specific verification
|
// Type-specific verification
|
||||||
switch archiveType {
|
switch archiveType {
|
||||||
case "Single Database (.dump)":
|
case "Single Database (.dump)":
|
||||||
fmt.Print("🔍 PostgreSQL dump format check... ")
|
fmt.Print("[CHK] PostgreSQL dump format check... ")
|
||||||
if err := verifyPgDump(archivePath); err != nil {
|
if err := verifyPgDump(archivePath); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
case "Single Database (.dump.gz)":
|
case "Single Database (.dump.gz)":
|
||||||
fmt.Print("🔍 PostgreSQL dump format check (gzip)... ")
|
fmt.Print("[CHK] PostgreSQL dump format check (gzip)... ")
|
||||||
if err := verifyPgDumpGzip(archivePath); err != nil {
|
if err := verifyPgDumpGzip(archivePath); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
case "SQL Script (.sql)":
|
case "SQL Script (.sql)":
|
||||||
fmt.Print("📜 SQL script validation... ")
|
fmt.Print("[CHK] SQL script validation... ")
|
||||||
if err := verifySqlScript(archivePath); err != nil {
|
if err := verifySqlScript(archivePath); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
case "SQL Script (.sql.gz)":
|
case "SQL Script (.sql.gz)":
|
||||||
fmt.Print("📜 SQL script validation (gzip)... ")
|
fmt.Print("[CHK] SQL script validation (gzip)... ")
|
||||||
if err := verifyGzipSqlScript(archivePath); err != nil {
|
if err := verifyGzipSqlScript(archivePath); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
case "Cluster Backup (.tar.gz)":
|
case "Cluster Backup (.tar.gz)":
|
||||||
fmt.Print("📦 Archive extraction test... ")
|
fmt.Print("[CHK] Archive extraction test... ")
|
||||||
if err := verifyTarGz(archivePath); err != nil {
|
if err := verifyTarGz(archivePath); err != nil {
|
||||||
fmt.Printf("❌ FAILED: %v\n", err)
|
fmt.Printf("[FAIL] FAILED: %v\n", err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
@@ -598,11 +598,11 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
|
|
||||||
// Check for metadata file
|
// Check for metadata file
|
||||||
metadataPath := archivePath + ".info"
|
metadataPath := archivePath + ".info"
|
||||||
fmt.Print("📋 Metadata file check... ")
|
fmt.Print("[CHK] Metadata file check... ")
|
||||||
if _, err := os.Stat(metadataPath); os.IsNotExist(err) {
|
if _, err := os.Stat(metadataPath); os.IsNotExist(err) {
|
||||||
fmt.Println("⚠️ WARNING: No metadata file found")
|
fmt.Println("[WARN] WARNING: No metadata file found")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("✅ PASSED")
|
fmt.Println("[OK] PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
@@ -611,13 +611,13 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
fmt.Printf("Verification Results: %d/%d checks passed\n", checksPassed, checksRun)
|
fmt.Printf("Verification Results: %d/%d checks passed\n", checksPassed, checksRun)
|
||||||
|
|
||||||
if checksPassed == checksRun {
|
if checksPassed == checksRun {
|
||||||
fmt.Println("🎉 Archive verification completed successfully!")
|
fmt.Println("[SUCCESS] Archive verification completed successfully!")
|
||||||
return nil
|
return nil
|
||||||
} else if float64(checksPassed)/float64(checksRun) >= 0.8 {
|
} else if float64(checksPassed)/float64(checksRun) >= 0.8 {
|
||||||
fmt.Println("⚠️ Archive verification completed with warnings.")
|
fmt.Println("[WARN] Archive verification completed with warnings.")
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("❌ Archive verification failed. Archive may be corrupted.")
|
fmt.Println("[FAIL] Archive verification failed. Archive may be corrupted.")
|
||||||
return fmt.Errorf("verification failed: %d/%d checks passed", checksPassed, checksRun)
|
return fmt.Errorf("verification failed: %d/%d checks passed", checksPassed, checksRun)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -342,7 +342,7 @@ func runRestoreDiagnose(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("archive not found: %s", archivePath)
|
return fmt.Errorf("archive not found: %s", archivePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("🔍 Diagnosing backup file", "path", archivePath)
|
log.Info("[DIAG] Diagnosing backup file", "path", archivePath)
|
||||||
|
|
||||||
diagnoser := restore.NewDiagnoser(log, restoreVerbose)
|
diagnoser := restore.NewDiagnoser(log, restoreVerbose)
|
||||||
|
|
||||||
@@ -350,10 +350,11 @@ func runRestoreDiagnose(cmd *cobra.Command, args []string) error {
|
|||||||
format := restore.DetectArchiveFormat(archivePath)
|
format := restore.DetectArchiveFormat(archivePath)
|
||||||
|
|
||||||
if format.IsClusterBackup() && diagnoseDeep {
|
if format.IsClusterBackup() && diagnoseDeep {
|
||||||
// Create temp directory for extraction
|
// Create temp directory for extraction in configured WorkDir
|
||||||
tempDir, err := os.MkdirTemp("", "dbbackup-diagnose-*")
|
workDir := cfg.GetEffectiveWorkDir()
|
||||||
|
tempDir, err := os.MkdirTemp(workDir, "dbbackup-diagnose-*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create temp directory: %w", err)
|
return fmt.Errorf("failed to create temp directory in %s: %w", workDir, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !diagnoseKeepTemp {
|
if !diagnoseKeepTemp {
|
||||||
@@ -386,7 +387,7 @@ func runRestoreDiagnose(cmd *cobra.Command, args []string) error {
|
|||||||
// Summary
|
// Summary
|
||||||
if !diagnoseJSON {
|
if !diagnoseJSON {
|
||||||
fmt.Println("\n" + strings.Repeat("=", 70))
|
fmt.Println("\n" + strings.Repeat("=", 70))
|
||||||
fmt.Printf("📊 CLUSTER SUMMARY: %d databases analyzed\n", len(results))
|
fmt.Printf("[SUMMARY] CLUSTER SUMMARY: %d databases analyzed\n", len(results))
|
||||||
|
|
||||||
validCount := 0
|
validCount := 0
|
||||||
for _, r := range results {
|
for _, r := range results {
|
||||||
@@ -396,9 +397,9 @@ func runRestoreDiagnose(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if validCount == len(results) {
|
if validCount == len(results) {
|
||||||
fmt.Println("✅ All dumps are valid")
|
fmt.Println("[OK] All dumps are valid")
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("❌ %d/%d dumps have issues\n", len(results)-validCount, len(results))
|
fmt.Printf("[FAIL] %d/%d dumps have issues\n", len(results)-validCount, len(results))
|
||||||
}
|
}
|
||||||
fmt.Println(strings.Repeat("=", 70))
|
fmt.Println(strings.Repeat("=", 70))
|
||||||
}
|
}
|
||||||
@@ -425,7 +426,7 @@ func runRestoreDiagnose(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("backup file has validation errors")
|
return fmt.Errorf("backup file has validation errors")
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("✅ Backup file appears valid")
|
log.Info("[OK] Backup file appears valid")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -544,7 +545,7 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
|
|||||||
isDryRun := restoreDryRun || !restoreConfirm
|
isDryRun := restoreDryRun || !restoreConfirm
|
||||||
|
|
||||||
if isDryRun {
|
if isDryRun {
|
||||||
fmt.Println("\n🔍 DRY-RUN MODE - No changes will be made")
|
fmt.Println("\n[DRY-RUN] DRY-RUN MODE - No changes will be made")
|
||||||
fmt.Printf("\nWould restore:\n")
|
fmt.Printf("\nWould restore:\n")
|
||||||
fmt.Printf(" Archive: %s\n", archivePath)
|
fmt.Printf(" Archive: %s\n", archivePath)
|
||||||
fmt.Printf(" Format: %s\n", format.String())
|
fmt.Printf(" Format: %s\n", format.String())
|
||||||
@@ -587,7 +588,7 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// Run pre-restore diagnosis if requested
|
// Run pre-restore diagnosis if requested
|
||||||
if restoreDiagnose {
|
if restoreDiagnose {
|
||||||
log.Info("🔍 Running pre-restore diagnosis...")
|
log.Info("[DIAG] Running pre-restore diagnosis...")
|
||||||
|
|
||||||
diagnoser := restore.NewDiagnoser(log, restoreVerbose)
|
diagnoser := restore.NewDiagnoser(log, restoreVerbose)
|
||||||
result, err := diagnoser.DiagnoseFile(archivePath)
|
result, err := diagnoser.DiagnoseFile(archivePath)
|
||||||
@@ -598,7 +599,7 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
|
|||||||
diagnoser.PrintDiagnosis(result)
|
diagnoser.PrintDiagnosis(result)
|
||||||
|
|
||||||
if !result.IsValid {
|
if !result.IsValid {
|
||||||
log.Error("❌ Pre-restore diagnosis found issues")
|
log.Error("[FAIL] Pre-restore diagnosis found issues")
|
||||||
if result.IsTruncated {
|
if result.IsTruncated {
|
||||||
log.Error(" The backup file appears to be TRUNCATED")
|
log.Error(" The backup file appears to be TRUNCATED")
|
||||||
}
|
}
|
||||||
@@ -612,7 +613,7 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
log.Warn("Continuing despite diagnosis errors (--force enabled)")
|
log.Warn("Continuing despite diagnosis errors (--force enabled)")
|
||||||
} else {
|
} else {
|
||||||
log.Info("✅ Backup file passed diagnosis")
|
log.Info("[OK] Backup file passed diagnosis")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -632,7 +633,7 @@ func runRestoreSingle(cmd *cobra.Command, args []string) error {
|
|||||||
// Audit log: restore success
|
// Audit log: restore success
|
||||||
auditLogger.LogRestoreComplete(user, targetDB, time.Since(startTime))
|
auditLogger.LogRestoreComplete(user, targetDB, time.Since(startTime))
|
||||||
|
|
||||||
log.Info("✅ Restore completed successfully", "database", targetDB)
|
log.Info("[OK] Restore completed successfully", "database", targetDB)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -700,7 +701,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Warn("⚠️ Using alternative working directory for extraction")
|
log.Warn("[WARN] Using alternative working directory for extraction")
|
||||||
log.Warn(" This is recommended when system disk space is limited")
|
log.Warn(" This is recommended when system disk space is limited")
|
||||||
log.Warn(" Location: " + restoreWorkdir)
|
log.Warn(" Location: " + restoreWorkdir)
|
||||||
}
|
}
|
||||||
@@ -753,7 +754,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
isDryRun := restoreDryRun || !restoreConfirm
|
isDryRun := restoreDryRun || !restoreConfirm
|
||||||
|
|
||||||
if isDryRun {
|
if isDryRun {
|
||||||
fmt.Println("\n🔍 DRY-RUN MODE - No changes will be made")
|
fmt.Println("\n[DRY-RUN] DRY-RUN MODE - No changes will be made")
|
||||||
fmt.Printf("\nWould restore cluster:\n")
|
fmt.Printf("\nWould restore cluster:\n")
|
||||||
fmt.Printf(" Archive: %s\n", archivePath)
|
fmt.Printf(" Archive: %s\n", archivePath)
|
||||||
fmt.Printf(" Parallel Jobs: %d (0 = auto)\n", restoreJobs)
|
fmt.Printf(" Parallel Jobs: %d (0 = auto)\n", restoreJobs)
|
||||||
@@ -763,7 +764,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
if restoreCleanCluster {
|
if restoreCleanCluster {
|
||||||
fmt.Printf(" Clean Cluster: true (will drop %d existing database(s))\n", len(existingDBs))
|
fmt.Printf(" Clean Cluster: true (will drop %d existing database(s))\n", len(existingDBs))
|
||||||
if len(existingDBs) > 0 {
|
if len(existingDBs) > 0 {
|
||||||
fmt.Printf("\n⚠️ Databases to be dropped:\n")
|
fmt.Printf("\n[WARN] Databases to be dropped:\n")
|
||||||
for _, dbName := range existingDBs {
|
for _, dbName := range existingDBs {
|
||||||
fmt.Printf(" - %s\n", dbName)
|
fmt.Printf(" - %s\n", dbName)
|
||||||
}
|
}
|
||||||
@@ -775,7 +776,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// Warning for clean-cluster
|
// Warning for clean-cluster
|
||||||
if restoreCleanCluster && len(existingDBs) > 0 {
|
if restoreCleanCluster && len(existingDBs) > 0 {
|
||||||
log.Warn("🔥 Clean cluster mode enabled")
|
log.Warn("[!!] Clean cluster mode enabled")
|
||||||
log.Warn(fmt.Sprintf(" %d existing database(s) will be DROPPED before restore!", len(existingDBs)))
|
log.Warn(fmt.Sprintf(" %d existing database(s) will be DROPPED before restore!", len(existingDBs)))
|
||||||
for _, dbName := range existingDBs {
|
for _, dbName := range existingDBs {
|
||||||
log.Warn(" - " + dbName)
|
log.Warn(" - " + dbName)
|
||||||
@@ -828,12 +829,13 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// Run pre-restore diagnosis if requested
|
// Run pre-restore diagnosis if requested
|
||||||
if restoreDiagnose {
|
if restoreDiagnose {
|
||||||
log.Info("🔍 Running pre-restore diagnosis...")
|
log.Info("[DIAG] Running pre-restore diagnosis...")
|
||||||
|
|
||||||
// Create temp directory for extraction
|
// Create temp directory for extraction in configured WorkDir
|
||||||
diagTempDir, err := os.MkdirTemp("", "dbbackup-diagnose-*")
|
workDir := cfg.GetEffectiveWorkDir()
|
||||||
|
diagTempDir, err := os.MkdirTemp(workDir, "dbbackup-diagnose-*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create temp directory for diagnosis: %w", err)
|
return fmt.Errorf("failed to create temp directory for diagnosis in %s: %w", workDir, err)
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(diagTempDir)
|
defer os.RemoveAll(diagTempDir)
|
||||||
|
|
||||||
@@ -853,10 +855,10 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(invalidDumps) > 0 {
|
if len(invalidDumps) > 0 {
|
||||||
log.Error("❌ Pre-restore diagnosis found issues",
|
log.Error("[FAIL] Pre-restore diagnosis found issues",
|
||||||
"invalid_dumps", len(invalidDumps),
|
"invalid_dumps", len(invalidDumps),
|
||||||
"total_dumps", len(results))
|
"total_dumps", len(results))
|
||||||
fmt.Println("\n⚠️ The following dumps have issues and will likely fail during restore:")
|
fmt.Println("\n[WARN] The following dumps have issues and will likely fail during restore:")
|
||||||
for _, name := range invalidDumps {
|
for _, name := range invalidDumps {
|
||||||
fmt.Printf(" - %s\n", name)
|
fmt.Printf(" - %s\n", name)
|
||||||
}
|
}
|
||||||
@@ -868,7 +870,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
log.Warn("Continuing despite diagnosis errors (--force enabled)")
|
log.Warn("Continuing despite diagnosis errors (--force enabled)")
|
||||||
} else {
|
} else {
|
||||||
log.Info("✅ All dumps passed diagnosis", "count", len(results))
|
log.Info("[OK] All dumps passed diagnosis", "count", len(results))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -888,7 +890,7 @@ func runRestoreCluster(cmd *cobra.Command, args []string) error {
|
|||||||
// Audit log: restore success
|
// Audit log: restore success
|
||||||
auditLogger.LogRestoreComplete(user, "all_databases", time.Since(startTime))
|
auditLogger.LogRestoreComplete(user, "all_databases", time.Since(startTime))
|
||||||
|
|
||||||
log.Info("✅ Cluster restore completed successfully")
|
log.Info("[OK] Cluster restore completed successfully")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -937,7 +939,7 @@ func runRestoreList(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Print header
|
// Print header
|
||||||
fmt.Printf("\n📦 Available backup archives in %s\n\n", backupDir)
|
fmt.Printf("\n[LIST] Available backup archives in %s\n\n", backupDir)
|
||||||
fmt.Printf("%-40s %-25s %-12s %-20s %s\n",
|
fmt.Printf("%-40s %-25s %-12s %-20s %s\n",
|
||||||
"FILENAME", "FORMAT", "SIZE", "MODIFIED", "DATABASE")
|
"FILENAME", "FORMAT", "SIZE", "MODIFIED", "DATABASE")
|
||||||
fmt.Println(strings.Repeat("-", 120))
|
fmt.Println(strings.Repeat("-", 120))
|
||||||
@@ -1054,9 +1056,9 @@ func runRestorePITR(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Display recovery target info
|
// Display recovery target info
|
||||||
log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
log.Info("=====================================================")
|
||||||
log.Info(" Point-in-Time Recovery (PITR)")
|
log.Info(" Point-in-Time Recovery (PITR)")
|
||||||
log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
log.Info("=====================================================")
|
||||||
log.Info("")
|
log.Info("")
|
||||||
log.Info(target.String())
|
log.Info(target.String())
|
||||||
log.Info("")
|
log.Info("")
|
||||||
@@ -1080,6 +1082,6 @@ func runRestorePITR(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("PITR restore failed: %w", err)
|
return fmt.Errorf("PITR restore failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("✅ PITR restore completed successfully")
|
log.Info("[OK] PITR restore completed successfully")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
56
cmd/rto.go
56
cmd/rto.go
@@ -181,13 +181,13 @@ func runRTOStatus(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
// Display status
|
// Display status
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("╔═══════════════════════════════════════════════════════════╗")
|
fmt.Println("+-----------------------------------------------------------+")
|
||||||
fmt.Println("║ RTO/RPO STATUS SUMMARY ║")
|
fmt.Println("| RTO/RPO STATUS SUMMARY |")
|
||||||
fmt.Println("╠═══════════════════════════════════════════════════════════╣")
|
fmt.Println("+-----------------------------------------------------------+")
|
||||||
fmt.Printf("║ Target RTO: %-15s Target RPO: %-15s ║\n",
|
fmt.Printf("| Target RTO: %-15s Target RPO: %-15s |\n",
|
||||||
formatDuration(config.TargetRTO),
|
formatDuration(config.TargetRTO),
|
||||||
formatDuration(config.TargetRPO))
|
formatDuration(config.TargetRPO))
|
||||||
fmt.Println("╠═══════════════════════════════════════════════════════════╣")
|
fmt.Println("+-----------------------------------------------------------+")
|
||||||
|
|
||||||
// Compliance status
|
// Compliance status
|
||||||
rpoRate := 0.0
|
rpoRate := 0.0
|
||||||
@@ -199,31 +199,31 @@ func runRTOStatus(cmd *cobra.Command, args []string) error {
|
|||||||
fullRate = float64(summary.FullyCompliant) / float64(summary.TotalDatabases) * 100
|
fullRate = float64(summary.FullyCompliant) / float64(summary.TotalDatabases) * 100
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("║ Databases: %-5d ║\n", summary.TotalDatabases)
|
fmt.Printf("| Databases: %-5d |\n", summary.TotalDatabases)
|
||||||
fmt.Printf("║ RPO Compliant: %-5d (%.0f%%) ║\n", summary.RPOCompliant, rpoRate)
|
fmt.Printf("| RPO Compliant: %-5d (%.0f%%) |\n", summary.RPOCompliant, rpoRate)
|
||||||
fmt.Printf("║ RTO Compliant: %-5d (%.0f%%) ║\n", summary.RTOCompliant, rtoRate)
|
fmt.Printf("| RTO Compliant: %-5d (%.0f%%) |\n", summary.RTOCompliant, rtoRate)
|
||||||
fmt.Printf("║ Fully Compliant: %-3d (%.0f%%) ║\n", summary.FullyCompliant, fullRate)
|
fmt.Printf("| Fully Compliant: %-3d (%.0f%%) |\n", summary.FullyCompliant, fullRate)
|
||||||
|
|
||||||
if summary.CriticalIssues > 0 {
|
if summary.CriticalIssues > 0 {
|
||||||
fmt.Printf("║ ⚠️ Critical Issues: %-3d ║\n", summary.CriticalIssues)
|
fmt.Printf("| [WARN] Critical Issues: %-3d |\n", summary.CriticalIssues)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("╠═══════════════════════════════════════════════════════════╣")
|
fmt.Println("+-----------------------------------------------------------+")
|
||||||
fmt.Printf("║ Average RPO: %-15s Worst: %-15s ║\n",
|
fmt.Printf("| Average RPO: %-15s Worst: %-15s |\n",
|
||||||
formatDuration(summary.AverageRPO),
|
formatDuration(summary.AverageRPO),
|
||||||
formatDuration(summary.WorstRPO))
|
formatDuration(summary.WorstRPO))
|
||||||
fmt.Printf("║ Average RTO: %-15s Worst: %-15s ║\n",
|
fmt.Printf("| Average RTO: %-15s Worst: %-15s |\n",
|
||||||
formatDuration(summary.AverageRTO),
|
formatDuration(summary.AverageRTO),
|
||||||
formatDuration(summary.WorstRTO))
|
formatDuration(summary.WorstRTO))
|
||||||
|
|
||||||
if summary.WorstRPODatabase != "" {
|
if summary.WorstRPODatabase != "" {
|
||||||
fmt.Printf("║ Worst RPO Database: %-38s║\n", summary.WorstRPODatabase)
|
fmt.Printf("| Worst RPO Database: %-38s|\n", summary.WorstRPODatabase)
|
||||||
}
|
}
|
||||||
if summary.WorstRTODatabase != "" {
|
if summary.WorstRTODatabase != "" {
|
||||||
fmt.Printf("║ Worst RTO Database: %-38s║\n", summary.WorstRTODatabase)
|
fmt.Printf("| Worst RTO Database: %-38s|\n", summary.WorstRTODatabase)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("╚═══════════════════════════════════════════════════════════╝")
|
fmt.Println("+-----------------------------------------------------------+")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
// Per-database status
|
// Per-database status
|
||||||
@@ -234,19 +234,19 @@ func runRTOStatus(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Println(strings.Repeat("-", 70))
|
fmt.Println(strings.Repeat("-", 70))
|
||||||
|
|
||||||
for _, a := range analyses {
|
for _, a := range analyses {
|
||||||
status := "✅"
|
status := "[OK]"
|
||||||
if !a.RPOCompliant || !a.RTOCompliant {
|
if !a.RPOCompliant || !a.RTOCompliant {
|
||||||
status = "❌"
|
status = "[FAIL]"
|
||||||
}
|
}
|
||||||
|
|
||||||
rpoStr := formatDuration(a.CurrentRPO)
|
rpoStr := formatDuration(a.CurrentRPO)
|
||||||
rtoStr := formatDuration(a.CurrentRTO)
|
rtoStr := formatDuration(a.CurrentRTO)
|
||||||
|
|
||||||
if !a.RPOCompliant {
|
if !a.RPOCompliant {
|
||||||
rpoStr = "⚠️ " + rpoStr
|
rpoStr = "[WARN] " + rpoStr
|
||||||
}
|
}
|
||||||
if !a.RTOCompliant {
|
if !a.RTOCompliant {
|
||||||
rtoStr = "⚠️ " + rtoStr
|
rtoStr = "[WARN] " + rtoStr
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("%-25s %-12s %-12s %s\n",
|
fmt.Printf("%-25s %-12s %-12s %s\n",
|
||||||
@@ -306,21 +306,21 @@ func runRTOCheck(cmd *cobra.Command, args []string) error {
|
|||||||
exitCode := 0
|
exitCode := 0
|
||||||
for _, a := range analyses {
|
for _, a := range analyses {
|
||||||
if !a.RPOCompliant {
|
if !a.RPOCompliant {
|
||||||
fmt.Printf("❌ %s: RPO violation - current %s exceeds target %s\n",
|
fmt.Printf("[FAIL] %s: RPO violation - current %s exceeds target %s\n",
|
||||||
a.Database,
|
a.Database,
|
||||||
formatDuration(a.CurrentRPO),
|
formatDuration(a.CurrentRPO),
|
||||||
formatDuration(config.TargetRPO))
|
formatDuration(config.TargetRPO))
|
||||||
exitCode = 1
|
exitCode = 1
|
||||||
}
|
}
|
||||||
if !a.RTOCompliant {
|
if !a.RTOCompliant {
|
||||||
fmt.Printf("❌ %s: RTO violation - estimated %s exceeds target %s\n",
|
fmt.Printf("[FAIL] %s: RTO violation - estimated %s exceeds target %s\n",
|
||||||
a.Database,
|
a.Database,
|
||||||
formatDuration(a.CurrentRTO),
|
formatDuration(a.CurrentRTO),
|
||||||
formatDuration(config.TargetRTO))
|
formatDuration(config.TargetRTO))
|
||||||
exitCode = 1
|
exitCode = 1
|
||||||
}
|
}
|
||||||
if a.RPOCompliant && a.RTOCompliant {
|
if a.RPOCompliant && a.RTOCompliant {
|
||||||
fmt.Printf("✅ %s: Compliant (RPO: %s, RTO: %s)\n",
|
fmt.Printf("[OK] %s: Compliant (RPO: %s, RTO: %s)\n",
|
||||||
a.Database,
|
a.Database,
|
||||||
formatDuration(a.CurrentRPO),
|
formatDuration(a.CurrentRPO),
|
||||||
formatDuration(a.CurrentRTO))
|
formatDuration(a.CurrentRTO))
|
||||||
@@ -371,13 +371,13 @@ func outputAnalysisText(analyses []*rto.Analysis) error {
|
|||||||
fmt.Println(strings.Repeat("=", 60))
|
fmt.Println(strings.Repeat("=", 60))
|
||||||
|
|
||||||
// Status
|
// Status
|
||||||
rpoStatus := "✅ Compliant"
|
rpoStatus := "[OK] Compliant"
|
||||||
if !a.RPOCompliant {
|
if !a.RPOCompliant {
|
||||||
rpoStatus = "❌ Violation"
|
rpoStatus = "[FAIL] Violation"
|
||||||
}
|
}
|
||||||
rtoStatus := "✅ Compliant"
|
rtoStatus := "[OK] Compliant"
|
||||||
if !a.RTOCompliant {
|
if !a.RTOCompliant {
|
||||||
rtoStatus = "❌ Violation"
|
rtoStatus = "[FAIL] Violation"
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
@@ -420,7 +420,7 @@ func outputAnalysisText(analyses []*rto.Analysis) error {
|
|||||||
fmt.Println(" Recommendations:")
|
fmt.Println(" Recommendations:")
|
||||||
fmt.Println(strings.Repeat("-", 50))
|
fmt.Println(strings.Repeat("-", 50))
|
||||||
for _, r := range a.Recommendations {
|
for _, r := range a.Recommendations {
|
||||||
icon := "💡"
|
icon := "[TIP]"
|
||||||
switch r.Priority {
|
switch r.Priority {
|
||||||
case rto.PriorityCritical:
|
case rto.PriorityCritical:
|
||||||
icon = "🔴"
|
icon = "🔴"
|
||||||
|
|||||||
@@ -141,7 +141,7 @@ func testConnection(ctx context.Context) error {
|
|||||||
|
|
||||||
// Display results
|
// Display results
|
||||||
fmt.Println("Connection Test Results:")
|
fmt.Println("Connection Test Results:")
|
||||||
fmt.Printf(" Status: Connected ✅\n")
|
fmt.Printf(" Status: Connected [OK]\n")
|
||||||
fmt.Printf(" Version: %s\n", version)
|
fmt.Printf(" Version: %s\n", version)
|
||||||
fmt.Printf(" Databases: %d found\n", len(databases))
|
fmt.Printf(" Databases: %d found\n", len(databases))
|
||||||
|
|
||||||
@@ -167,7 +167,7 @@ func testConnection(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("✅ Status check completed successfully!")
|
fmt.Println("[OK] Status check completed successfully!")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -96,17 +96,17 @@ func runVerifyBackup(cmd *cobra.Command, args []string) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("📁 %s\n", filepath.Base(backupFile))
|
fmt.Printf("[FILE] %s\n", filepath.Base(backupFile))
|
||||||
|
|
||||||
if quickVerify {
|
if quickVerify {
|
||||||
// Quick check: size only
|
// Quick check: size only
|
||||||
err := verification.QuickCheck(backupFile)
|
err := verification.QuickCheck(backupFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf(" ❌ FAILED: %v\n\n", err)
|
fmt.Printf(" [FAIL] FAILED: %v\n\n", err)
|
||||||
failureCount++
|
failureCount++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Printf(" ✅ VALID (quick check)\n\n")
|
fmt.Printf(" [OK] VALID (quick check)\n\n")
|
||||||
successCount++
|
successCount++
|
||||||
} else {
|
} else {
|
||||||
// Full verification with SHA-256
|
// Full verification with SHA-256
|
||||||
@@ -116,7 +116,7 @@ func runVerifyBackup(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if result.Valid {
|
if result.Valid {
|
||||||
fmt.Printf(" ✅ VALID\n")
|
fmt.Printf(" [OK] VALID\n")
|
||||||
if verboseVerify {
|
if verboseVerify {
|
||||||
meta, _ := metadata.Load(backupFile)
|
meta, _ := metadata.Load(backupFile)
|
||||||
fmt.Printf(" Size: %s\n", metadata.FormatSize(meta.SizeBytes))
|
fmt.Printf(" Size: %s\n", metadata.FormatSize(meta.SizeBytes))
|
||||||
@@ -127,7 +127,7 @@ func runVerifyBackup(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
successCount++
|
successCount++
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" ❌ FAILED: %v\n", result.Error)
|
fmt.Printf(" [FAIL] FAILED: %v\n", result.Error)
|
||||||
if verboseVerify {
|
if verboseVerify {
|
||||||
if !result.FileExists {
|
if !result.FileExists {
|
||||||
fmt.Printf(" File does not exist\n")
|
fmt.Printf(" File does not exist\n")
|
||||||
@@ -147,11 +147,11 @@ func runVerifyBackup(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Summary
|
// Summary
|
||||||
fmt.Println(strings.Repeat("─", 50))
|
fmt.Println(strings.Repeat("-", 50))
|
||||||
fmt.Printf("Total: %d backups\n", len(backupFiles))
|
fmt.Printf("Total: %d backups\n", len(backupFiles))
|
||||||
fmt.Printf("✅ Valid: %d\n", successCount)
|
fmt.Printf("[OK] Valid: %d\n", successCount)
|
||||||
if failureCount > 0 {
|
if failureCount > 0 {
|
||||||
fmt.Printf("❌ Failed: %d\n", failureCount)
|
fmt.Printf("[FAIL] Failed: %d\n", failureCount)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -195,16 +195,16 @@ func runVerifyCloudBackup(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
for _, uri := range args {
|
for _, uri := range args {
|
||||||
if !isCloudURI(uri) {
|
if !isCloudURI(uri) {
|
||||||
fmt.Printf("⚠️ Skipping non-cloud URI: %s\n", uri)
|
fmt.Printf("[WARN] Skipping non-cloud URI: %s\n", uri)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("☁️ %s\n", uri)
|
fmt.Printf("[CLOUD] %s\n", uri)
|
||||||
|
|
||||||
// Download and verify
|
// Download and verify
|
||||||
result, err := verifyCloudBackup(cmd.Context(), uri, quickVerify, verboseVerify)
|
result, err := verifyCloudBackup(cmd.Context(), uri, quickVerify, verboseVerify)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf(" ❌ FAILED: %v\n\n", err)
|
fmt.Printf(" [FAIL] FAILED: %v\n\n", err)
|
||||||
failureCount++
|
failureCount++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -212,7 +212,7 @@ func runVerifyCloudBackup(cmd *cobra.Command, args []string) error {
|
|||||||
// Cleanup temp file
|
// Cleanup temp file
|
||||||
defer result.Cleanup()
|
defer result.Cleanup()
|
||||||
|
|
||||||
fmt.Printf(" ✅ VALID\n")
|
fmt.Printf(" [OK] VALID\n")
|
||||||
if verboseVerify && result.MetadataPath != "" {
|
if verboseVerify && result.MetadataPath != "" {
|
||||||
meta, _ := metadata.Load(result.MetadataPath)
|
meta, _ := metadata.Load(result.MetadataPath)
|
||||||
if meta != nil {
|
if meta != nil {
|
||||||
@@ -226,7 +226,7 @@ func runVerifyCloudBackup(cmd *cobra.Command, args []string) error {
|
|||||||
successCount++
|
successCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n✅ Summary: %d valid, %d failed\n", successCount, failureCount)
|
fmt.Printf("\n[OK] Summary: %d valid, %d failed\n", successCount, failureCount)
|
||||||
|
|
||||||
if failureCount > 0 {
|
if failureCount > 0 {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
|||||||
@@ -2,12 +2,14 @@ package auth
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"dbbackup/internal/config"
|
"dbbackup/internal/config"
|
||||||
)
|
)
|
||||||
@@ -69,7 +71,10 @@ func checkPgHbaConf(user string) AuthMethod {
|
|||||||
|
|
||||||
// findHbaFileViaPostgres asks PostgreSQL for the hba_file location
|
// findHbaFileViaPostgres asks PostgreSQL for the hba_file location
|
||||||
func findHbaFileViaPostgres() string {
|
func findHbaFileViaPostgres() string {
|
||||||
cmd := exec.Command("psql", "-U", "postgres", "-t", "-c", "SHOW hba_file;")
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, "psql", "-U", "postgres", "-t", "-c", "SHOW hba_file;")
|
||||||
output, err := cmd.Output()
|
output, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
@@ -82,8 +87,11 @@ func parsePgHbaConf(path string, user string) AuthMethod {
|
|||||||
// Try with sudo if we can't read directly
|
// Try with sudo if we can't read directly
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Try with sudo
|
// Try with sudo (with timeout)
|
||||||
cmd := exec.Command("sudo", "cat", path)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, "sudo", "cat", path)
|
||||||
output, err := cmd.Output()
|
output, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return AuthUnknown
|
return AuthUnknown
|
||||||
@@ -196,13 +204,13 @@ func CheckAuthenticationMismatch(cfg *config.Config) (bool, string) {
|
|||||||
func buildAuthMismatchMessage(osUser, dbUser string, method AuthMethod) string {
|
func buildAuthMismatchMessage(osUser, dbUser string, method AuthMethod) string {
|
||||||
var msg strings.Builder
|
var msg strings.Builder
|
||||||
|
|
||||||
msg.WriteString("\n⚠️ Authentication Mismatch Detected\n")
|
msg.WriteString("\n[WARN] Authentication Mismatch Detected\n")
|
||||||
msg.WriteString(strings.Repeat("=", 60) + "\n\n")
|
msg.WriteString(strings.Repeat("=", 60) + "\n\n")
|
||||||
|
|
||||||
msg.WriteString(fmt.Sprintf(" PostgreSQL is using '%s' authentication\n", method))
|
msg.WriteString(fmt.Sprintf(" PostgreSQL is using '%s' authentication\n", method))
|
||||||
msg.WriteString(fmt.Sprintf(" OS user '%s' cannot authenticate as DB user '%s'\n\n", osUser, dbUser))
|
msg.WriteString(fmt.Sprintf(" OS user '%s' cannot authenticate as DB user '%s'\n\n", osUser, dbUser))
|
||||||
|
|
||||||
msg.WriteString("💡 Solutions (choose one):\n\n")
|
msg.WriteString("[TIP] Solutions (choose one):\n\n")
|
||||||
|
|
||||||
msg.WriteString(fmt.Sprintf(" 1. Run as matching user:\n"))
|
msg.WriteString(fmt.Sprintf(" 1. Run as matching user:\n"))
|
||||||
msg.WriteString(fmt.Sprintf(" sudo -u %s %s\n\n", dbUser, getCommandLine()))
|
msg.WriteString(fmt.Sprintf(" sudo -u %s %s\n\n", dbUser, getCommandLine()))
|
||||||
@@ -218,7 +226,7 @@ func buildAuthMismatchMessage(osUser, dbUser string, method AuthMethod) string {
|
|||||||
msg.WriteString(" 4. Provide password via flag:\n")
|
msg.WriteString(" 4. Provide password via flag:\n")
|
||||||
msg.WriteString(fmt.Sprintf(" %s --password your_password\n\n", getCommandLine()))
|
msg.WriteString(fmt.Sprintf(" %s --password your_password\n\n", getCommandLine()))
|
||||||
|
|
||||||
msg.WriteString("📝 Note: For production use, ~/.pgpass or PGPASSWORD are recommended\n")
|
msg.WriteString("[NOTE] Note: For production use, ~/.pgpass or PGPASSWORD are recommended\n")
|
||||||
msg.WriteString(" to avoid exposing passwords in command history.\n\n")
|
msg.WriteString(" to avoid exposing passwords in command history.\n\n")
|
||||||
|
|
||||||
msg.WriteString(strings.Repeat("=", 60) + "\n")
|
msg.WriteString(strings.Repeat("=", 60) + "\n")
|
||||||
|
|||||||
@@ -87,20 +87,46 @@ func IsBackupEncrypted(backupPath string) bool {
|
|||||||
return meta.Encrypted
|
return meta.Encrypted
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback: check if file starts with encryption nonce
|
// No metadata found - check file format to determine if encrypted
|
||||||
|
// Known unencrypted formats have specific magic bytes:
|
||||||
|
// - Gzip: 1f 8b
|
||||||
|
// - PGDMP (PostgreSQL custom): 50 47 44 4d 50 (PGDMP)
|
||||||
|
// - Plain SQL: starts with text (-- or SET or CREATE)
|
||||||
|
// - Tar: 75 73 74 61 72 (ustar) at offset 257
|
||||||
|
//
|
||||||
|
// If file doesn't match any known format, it MIGHT be encrypted,
|
||||||
|
// but we return false to avoid false positives. User must provide
|
||||||
|
// metadata file or use --encrypt flag explicitly.
|
||||||
file, err := os.Open(backupPath)
|
file, err := os.Open(backupPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
// Try to read nonce - if it succeeds, likely encrypted
|
header := make([]byte, 6)
|
||||||
nonce := make([]byte, crypto.NonceSize)
|
if n, err := file.Read(header); err != nil || n < 2 {
|
||||||
if n, err := file.Read(nonce); err != nil || n != crypto.NonceSize {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
// Check for known unencrypted formats
|
||||||
|
// Gzip magic: 1f 8b
|
||||||
|
if header[0] == 0x1f && header[1] == 0x8b {
|
||||||
|
return false // Gzip compressed - not encrypted
|
||||||
|
}
|
||||||
|
|
||||||
|
// PGDMP magic (PostgreSQL custom format)
|
||||||
|
if len(header) >= 5 && string(header[:5]) == "PGDMP" {
|
||||||
|
return false // PostgreSQL custom dump - not encrypted
|
||||||
|
}
|
||||||
|
|
||||||
|
// Plain text SQL (starts with --, SET, CREATE, etc.)
|
||||||
|
if header[0] == '-' || header[0] == 'S' || header[0] == 'C' || header[0] == '/' {
|
||||||
|
return false // Plain text SQL - not encrypted
|
||||||
|
}
|
||||||
|
|
||||||
|
// Without metadata, we cannot reliably determine encryption status
|
||||||
|
// Return false to avoid blocking restores with false positives
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecryptBackupFile decrypts an encrypted backup file
|
// DecryptBackupFile decrypts an encrypted backup file
|
||||||
|
|||||||
@@ -443,6 +443,14 @@ func (e *Engine) BackupCluster(ctx context.Context) error {
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer func() { <-semaphore }() // Release
|
defer func() { <-semaphore }() // Release
|
||||||
|
|
||||||
|
// Panic recovery - prevent one database failure from crashing entire cluster backup
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
e.log.Error("Panic in database backup goroutine", "database", name, "panic", r)
|
||||||
|
atomic.AddInt32(&failCount, 1)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Check for cancellation at start of goroutine
|
// Check for cancellation at start of goroutine
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@@ -465,7 +473,7 @@ func (e *Engine) BackupCluster(ctx context.Context) error {
|
|||||||
mu.Lock()
|
mu.Lock()
|
||||||
e.printf(" Database size: %s\n", sizeStr)
|
e.printf(" Database size: %s\n", sizeStr)
|
||||||
if size > 10*1024*1024*1024 { // > 10GB
|
if size > 10*1024*1024*1024 { // > 10GB
|
||||||
e.printf(" ⚠️ Large database detected - this may take a while\n")
|
e.printf(" [WARN] Large database detected - this may take a while\n")
|
||||||
}
|
}
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
}
|
}
|
||||||
@@ -502,40 +510,24 @@ func (e *Engine) BackupCluster(ctx context.Context) error {
|
|||||||
|
|
||||||
cmd := e.db.BuildBackupCommand(name, dumpFile, options)
|
cmd := e.db.BuildBackupCommand(name, dumpFile, options)
|
||||||
|
|
||||||
// Calculate timeout based on database size:
|
// NO TIMEOUT for individual database backups
|
||||||
// - Minimum 2 hours for small databases
|
// Large databases with large objects can take many hours
|
||||||
// - Add 1 hour per 20GB for large databases
|
// The parent context handles cancellation if needed
|
||||||
// - This allows ~69GB database to take up to 5+ hours
|
err := e.executeCommand(ctx, cmd, dumpFile)
|
||||||
timeout := 2 * time.Hour
|
|
||||||
if size, err := e.db.GetDatabaseSize(ctx, name); err == nil {
|
|
||||||
sizeGB := size / (1024 * 1024 * 1024)
|
|
||||||
if sizeGB > 20 {
|
|
||||||
extraHours := (sizeGB / 20) + 1
|
|
||||||
timeout = time.Duration(2+extraHours) * time.Hour
|
|
||||||
mu.Lock()
|
|
||||||
e.printf(" Extended timeout: %v (for %dGB database)\n", timeout, sizeGB)
|
|
||||||
mu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dbCtx, cancel := context.WithTimeout(ctx, timeout)
|
|
||||||
defer cancel()
|
|
||||||
err := e.executeCommand(dbCtx, cmd, dumpFile)
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Warn("Failed to backup database", "database", name, "error", err)
|
e.log.Warn("Failed to backup database", "database", name, "error", err)
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
e.printf(" ⚠️ WARNING: Failed to backup %s: %v\n", name, err)
|
e.printf(" [WARN] WARNING: Failed to backup %s: %v\n", name, err)
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
atomic.AddInt32(&failCount, 1)
|
atomic.AddInt32(&failCount, 1)
|
||||||
} else {
|
} else {
|
||||||
compressedCandidate := strings.TrimSuffix(dumpFile, ".dump") + ".sql.gz"
|
compressedCandidate := strings.TrimSuffix(dumpFile, ".dump") + ".sql.gz"
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
if info, err := os.Stat(compressedCandidate); err == nil {
|
if info, err := os.Stat(compressedCandidate); err == nil {
|
||||||
e.printf(" ✅ Completed %s (%s)\n", name, formatBytes(info.Size()))
|
e.printf(" [OK] Completed %s (%s)\n", name, formatBytes(info.Size()))
|
||||||
} else if info, err := os.Stat(dumpFile); err == nil {
|
} else if info, err := os.Stat(dumpFile); err == nil {
|
||||||
e.printf(" ✅ Completed %s (%s)\n", name, formatBytes(info.Size()))
|
e.printf(" [OK] Completed %s (%s)\n", name, formatBytes(info.Size()))
|
||||||
}
|
}
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
atomic.AddInt32(&successCount, 1)
|
atomic.AddInt32(&successCount, 1)
|
||||||
@@ -614,12 +606,36 @@ func (e *Engine) executeCommandWithProgress(ctx context.Context, cmdArgs []strin
|
|||||||
return fmt.Errorf("failed to start command: %w", err)
|
return fmt.Errorf("failed to start command: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Monitor progress via stderr
|
// Monitor progress via stderr in goroutine
|
||||||
go e.monitorCommandProgress(stderr, tracker)
|
stderrDone := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(stderrDone)
|
||||||
|
e.monitorCommandProgress(stderr, tracker)
|
||||||
|
}()
|
||||||
|
|
||||||
// Wait for command to complete
|
// Wait for command to complete with proper context handling
|
||||||
if err := cmd.Wait(); err != nil {
|
cmdDone := make(chan error, 1)
|
||||||
return fmt.Errorf("backup command failed: %w", err)
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed (success or failure)
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Context cancelled - kill process to unblock
|
||||||
|
e.log.Warn("Backup cancelled - killing process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone // Wait for goroutine to finish
|
||||||
|
cmdErr = ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for stderr reader to finish
|
||||||
|
<-stderrDone
|
||||||
|
|
||||||
|
if cmdErr != nil {
|
||||||
|
return fmt.Errorf("backup command failed: %w", cmdErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -696,8 +712,12 @@ func (e *Engine) executeMySQLWithProgressAndCompression(ctx context.Context, cmd
|
|||||||
return fmt.Errorf("failed to get stderr pipe: %w", err)
|
return fmt.Errorf("failed to get stderr pipe: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start monitoring progress
|
// Start monitoring progress in goroutine
|
||||||
go e.monitorCommandProgress(stderr, tracker)
|
stderrDone := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(stderrDone)
|
||||||
|
e.monitorCommandProgress(stderr, tracker)
|
||||||
|
}()
|
||||||
|
|
||||||
// Start both commands
|
// Start both commands
|
||||||
if err := gzipCmd.Start(); err != nil {
|
if err := gzipCmd.Start(); err != nil {
|
||||||
@@ -705,20 +725,41 @@ func (e *Engine) executeMySQLWithProgressAndCompression(ctx context.Context, cmd
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := dumpCmd.Start(); err != nil {
|
if err := dumpCmd.Start(); err != nil {
|
||||||
|
gzipCmd.Process.Kill()
|
||||||
return fmt.Errorf("failed to start mysqldump: %w", err)
|
return fmt.Errorf("failed to start mysqldump: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for mysqldump to complete
|
// Wait for mysqldump with context handling
|
||||||
if err := dumpCmd.Wait(); err != nil {
|
dumpDone := make(chan error, 1)
|
||||||
return fmt.Errorf("mysqldump failed: %w", err)
|
go func() {
|
||||||
|
dumpDone <- dumpCmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var dumpErr error
|
||||||
|
select {
|
||||||
|
case dumpErr = <-dumpDone:
|
||||||
|
// mysqldump completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.log.Warn("Backup cancelled - killing mysqldump")
|
||||||
|
dumpCmd.Process.Kill()
|
||||||
|
gzipCmd.Process.Kill()
|
||||||
|
<-dumpDone
|
||||||
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait for stderr reader
|
||||||
|
<-stderrDone
|
||||||
|
|
||||||
// Close pipe and wait for gzip
|
// Close pipe and wait for gzip
|
||||||
pipe.Close()
|
pipe.Close()
|
||||||
if err := gzipCmd.Wait(); err != nil {
|
if err := gzipCmd.Wait(); err != nil {
|
||||||
return fmt.Errorf("gzip failed: %w", err)
|
return fmt.Errorf("gzip failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if dumpErr != nil {
|
||||||
|
return fmt.Errorf("mysqldump failed: %w", dumpErr)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -749,19 +790,45 @@ func (e *Engine) executeMySQLWithCompression(ctx context.Context, cmdArgs []stri
|
|||||||
gzipCmd.Stdin = stdin
|
gzipCmd.Stdin = stdin
|
||||||
gzipCmd.Stdout = outFile
|
gzipCmd.Stdout = outFile
|
||||||
|
|
||||||
// Start both commands
|
// Start gzip first
|
||||||
if err := gzipCmd.Start(); err != nil {
|
if err := gzipCmd.Start(); err != nil {
|
||||||
return fmt.Errorf("failed to start gzip: %w", err)
|
return fmt.Errorf("failed to start gzip: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := dumpCmd.Run(); err != nil {
|
// Start mysqldump
|
||||||
return fmt.Errorf("mysqldump failed: %w", err)
|
if err := dumpCmd.Start(); err != nil {
|
||||||
|
gzipCmd.Process.Kill()
|
||||||
|
return fmt.Errorf("failed to start mysqldump: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait for mysqldump with context handling
|
||||||
|
dumpDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
dumpDone <- dumpCmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var dumpErr error
|
||||||
|
select {
|
||||||
|
case dumpErr = <-dumpDone:
|
||||||
|
// mysqldump completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.log.Warn("Backup cancelled - killing mysqldump")
|
||||||
|
dumpCmd.Process.Kill()
|
||||||
|
gzipCmd.Process.Kill()
|
||||||
|
<-dumpDone
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close pipe and wait for gzip
|
||||||
|
stdin.Close()
|
||||||
if err := gzipCmd.Wait(); err != nil {
|
if err := gzipCmd.Wait(); err != nil {
|
||||||
return fmt.Errorf("gzip failed: %w", err)
|
return fmt.Errorf("gzip failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if dumpErr != nil {
|
||||||
|
return fmt.Errorf("mysqldump failed: %w", dumpErr)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -898,15 +965,46 @@ func (e *Engine) createArchive(ctx context.Context, sourceDir, outputFile string
|
|||||||
goto regularTar
|
goto regularTar
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for tar to finish
|
// Wait for tar with proper context handling
|
||||||
if err := cmd.Wait(); err != nil {
|
tarDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
tarDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var tarErr error
|
||||||
|
select {
|
||||||
|
case tarErr = <-tarDone:
|
||||||
|
// tar completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.log.Warn("Archive creation cancelled - killing processes")
|
||||||
|
cmd.Process.Kill()
|
||||||
pigzCmd.Process.Kill()
|
pigzCmd.Process.Kill()
|
||||||
return fmt.Errorf("tar failed: %w", err)
|
<-tarDone
|
||||||
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for pigz to finish
|
if tarErr != nil {
|
||||||
if err := pigzCmd.Wait(); err != nil {
|
pigzCmd.Process.Kill()
|
||||||
return fmt.Errorf("pigz compression failed: %w", err)
|
return fmt.Errorf("tar failed: %w", tarErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for pigz with proper context handling
|
||||||
|
pigzDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
pigzDone <- pigzCmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var pigzErr error
|
||||||
|
select {
|
||||||
|
case pigzErr = <-pigzDone:
|
||||||
|
case <-ctx.Done():
|
||||||
|
pigzCmd.Process.Kill()
|
||||||
|
<-pigzDone
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if pigzErr != nil {
|
||||||
|
return fmt.Errorf("pigz compression failed: %w", pigzErr)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1251,8 +1349,10 @@ func (e *Engine) executeCommand(ctx context.Context, cmdArgs []string, outputFil
|
|||||||
return fmt.Errorf("failed to start backup command: %w", err)
|
return fmt.Errorf("failed to start backup command: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stream stderr output (don't buffer it all in memory)
|
// Stream stderr output in goroutine (don't buffer it all in memory)
|
||||||
|
stderrDone := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
|
defer close(stderrDone)
|
||||||
scanner := bufio.NewScanner(stderr)
|
scanner := bufio.NewScanner(stderr)
|
||||||
scanner.Buffer(make([]byte, 64*1024), 1024*1024) // 1MB max line size
|
scanner.Buffer(make([]byte, 64*1024), 1024*1024) // 1MB max line size
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
@@ -1263,10 +1363,30 @@ func (e *Engine) executeCommand(ctx context.Context, cmdArgs []string, outputFil
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Wait for command to complete
|
// Wait for command to complete with proper context handling
|
||||||
if err := cmd.Wait(); err != nil {
|
cmdDone := make(chan error, 1)
|
||||||
e.log.Error("Backup command failed", "error", err, "database", filepath.Base(outputFile))
|
go func() {
|
||||||
return fmt.Errorf("backup command failed: %w", err)
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed (success or failure)
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Context cancelled - kill process to unblock
|
||||||
|
e.log.Warn("Backup cancelled - killing pg_dump process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone // Wait for goroutine to finish
|
||||||
|
cmdErr = ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for stderr reader to finish
|
||||||
|
<-stderrDone
|
||||||
|
|
||||||
|
if cmdErr != nil {
|
||||||
|
e.log.Error("Backup command failed", "error", cmdErr, "database", filepath.Base(outputFile))
|
||||||
|
return fmt.Errorf("backup command failed: %w", cmdErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -242,7 +242,7 @@ func TestIncrementalBackupRestore(t *testing.T) {
|
|||||||
t.Errorf("Unchanged file base/12345/1235 not found in restore: %v", err)
|
t.Errorf("Unchanged file base/12345/1235 not found in restore: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Log("✅ Incremental backup and restore test completed successfully")
|
t.Log("[OK] Incremental backup and restore test completed successfully")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIncrementalBackupErrors tests error handling
|
// TestIncrementalBackupErrors tests error handling
|
||||||
|
|||||||
@@ -75,16 +75,16 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
status = "CRITICAL"
|
status = "CRITICAL"
|
||||||
icon = "❌"
|
icon = "[X]"
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
status = "WARNING"
|
status = "WARNING"
|
||||||
icon = "⚠️ "
|
icon = "[!]"
|
||||||
} else {
|
} else {
|
||||||
status = "OK"
|
status = "OK"
|
||||||
icon = "✓"
|
icon = "[+]"
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := fmt.Sprintf(`📊 Disk Space Check (%s):
|
msg := fmt.Sprintf(`[DISK] Disk Space Check (%s):
|
||||||
Path: %s
|
Path: %s
|
||||||
Total: %s
|
Total: %s
|
||||||
Available: %s (%.1f%% used)
|
Available: %s (%.1f%% used)
|
||||||
@@ -98,13 +98,13 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
status)
|
status)
|
||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
msg += "\n \n ⚠️ CRITICAL: Insufficient disk space!"
|
msg += "\n \n [!!] CRITICAL: Insufficient disk space!"
|
||||||
msg += "\n Operation blocked. Free up space before continuing."
|
msg += "\n Operation blocked. Free up space before continuing."
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
msg += "\n \n ⚠️ WARNING: Low disk space!"
|
msg += "\n \n [!] WARNING: Low disk space!"
|
||||||
msg += "\n Backup may fail if database is larger than estimated."
|
msg += "\n Backup may fail if database is larger than estimated."
|
||||||
} else {
|
} else {
|
||||||
msg += "\n \n ✓ Sufficient space available"
|
msg += "\n \n [+] Sufficient space available"
|
||||||
}
|
}
|
||||||
|
|
||||||
return msg
|
return msg
|
||||||
|
|||||||
@@ -75,16 +75,16 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
status = "CRITICAL"
|
status = "CRITICAL"
|
||||||
icon = "❌"
|
icon = "[X]"
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
status = "WARNING"
|
status = "WARNING"
|
||||||
icon = "⚠️ "
|
icon = "[!]"
|
||||||
} else {
|
} else {
|
||||||
status = "OK"
|
status = "OK"
|
||||||
icon = "✓"
|
icon = "[+]"
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := fmt.Sprintf(`📊 Disk Space Check (%s):
|
msg := fmt.Sprintf(`[DISK] Disk Space Check (%s):
|
||||||
Path: %s
|
Path: %s
|
||||||
Total: %s
|
Total: %s
|
||||||
Available: %s (%.1f%% used)
|
Available: %s (%.1f%% used)
|
||||||
@@ -98,13 +98,13 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
status)
|
status)
|
||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
msg += "\n \n ⚠️ CRITICAL: Insufficient disk space!"
|
msg += "\n \n [!!] CRITICAL: Insufficient disk space!"
|
||||||
msg += "\n Operation blocked. Free up space before continuing."
|
msg += "\n Operation blocked. Free up space before continuing."
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
msg += "\n \n ⚠️ WARNING: Low disk space!"
|
msg += "\n \n [!] WARNING: Low disk space!"
|
||||||
msg += "\n Backup may fail if database is larger than estimated."
|
msg += "\n Backup may fail if database is larger than estimated."
|
||||||
} else {
|
} else {
|
||||||
msg += "\n \n ✓ Sufficient space available"
|
msg += "\n \n [+] Sufficient space available"
|
||||||
}
|
}
|
||||||
|
|
||||||
return msg
|
return msg
|
||||||
|
|||||||
@@ -58,16 +58,16 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
status = "CRITICAL"
|
status = "CRITICAL"
|
||||||
icon = "❌"
|
icon = "[X]"
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
status = "WARNING"
|
status = "WARNING"
|
||||||
icon = "⚠️ "
|
icon = "[!]"
|
||||||
} else {
|
} else {
|
||||||
status = "OK"
|
status = "OK"
|
||||||
icon = "✓"
|
icon = "[+]"
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := fmt.Sprintf(`📊 Disk Space Check (%s):
|
msg := fmt.Sprintf(`[DISK] Disk Space Check (%s):
|
||||||
Path: %s
|
Path: %s
|
||||||
Total: %s
|
Total: %s
|
||||||
Available: %s (%.1f%% used)
|
Available: %s (%.1f%% used)
|
||||||
@@ -81,13 +81,13 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
status)
|
status)
|
||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
msg += "\n \n ⚠️ CRITICAL: Insufficient disk space!"
|
msg += "\n \n [!!] CRITICAL: Insufficient disk space!"
|
||||||
msg += "\n Operation blocked. Free up space before continuing."
|
msg += "\n Operation blocked. Free up space before continuing."
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
msg += "\n \n ⚠️ WARNING: Low disk space!"
|
msg += "\n \n [!] WARNING: Low disk space!"
|
||||||
msg += "\n Backup may fail if database is larger than estimated."
|
msg += "\n Backup may fail if database is larger than estimated."
|
||||||
} else {
|
} else {
|
||||||
msg += "\n \n ✓ Sufficient space available"
|
msg += "\n \n [+] Sufficient space available"
|
||||||
}
|
}
|
||||||
|
|
||||||
return msg
|
return msg
|
||||||
|
|||||||
@@ -94,16 +94,16 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
status = "CRITICAL"
|
status = "CRITICAL"
|
||||||
icon = "❌"
|
icon = "[X]"
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
status = "WARNING"
|
status = "WARNING"
|
||||||
icon = "⚠️ "
|
icon = "[!]"
|
||||||
} else {
|
} else {
|
||||||
status = "OK"
|
status = "OK"
|
||||||
icon = "✓"
|
icon = "[+]"
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := fmt.Sprintf(`📊 Disk Space Check (%s):
|
msg := fmt.Sprintf(`[DISK] Disk Space Check (%s):
|
||||||
Path: %s
|
Path: %s
|
||||||
Total: %s
|
Total: %s
|
||||||
Available: %s (%.1f%% used)
|
Available: %s (%.1f%% used)
|
||||||
@@ -117,13 +117,13 @@ func FormatDiskSpaceMessage(check *DiskSpaceCheck) string {
|
|||||||
status)
|
status)
|
||||||
|
|
||||||
if check.Critical {
|
if check.Critical {
|
||||||
msg += "\n \n ⚠️ CRITICAL: Insufficient disk space!"
|
msg += "\n \n [!!] CRITICAL: Insufficient disk space!"
|
||||||
msg += "\n Operation blocked. Free up space before continuing."
|
msg += "\n Operation blocked. Free up space before continuing."
|
||||||
} else if check.Warning {
|
} else if check.Warning {
|
||||||
msg += "\n \n ⚠️ WARNING: Low disk space!"
|
msg += "\n \n [!] WARNING: Low disk space!"
|
||||||
msg += "\n Backup may fail if database is larger than estimated."
|
msg += "\n Backup may fail if database is larger than estimated."
|
||||||
} else {
|
} else {
|
||||||
msg += "\n \n ✓ Sufficient space available"
|
msg += "\n \n [+] Sufficient space available"
|
||||||
}
|
}
|
||||||
|
|
||||||
return msg
|
return msg
|
||||||
|
|||||||
@@ -234,22 +234,22 @@ func FormatErrorWithHint(errorMsg string) string {
|
|||||||
var icon string
|
var icon string
|
||||||
switch classification.Type {
|
switch classification.Type {
|
||||||
case "ignorable":
|
case "ignorable":
|
||||||
icon = "ℹ️ "
|
icon = "[i]"
|
||||||
case "warning":
|
case "warning":
|
||||||
icon = "⚠️ "
|
icon = "[!]"
|
||||||
case "critical":
|
case "critical":
|
||||||
icon = "❌"
|
icon = "[X]"
|
||||||
case "fatal":
|
case "fatal":
|
||||||
icon = "🛑"
|
icon = "[!!]"
|
||||||
default:
|
default:
|
||||||
icon = "⚠️ "
|
icon = "[!]"
|
||||||
}
|
}
|
||||||
|
|
||||||
output := fmt.Sprintf("%s %s Error\n\n", icon, strings.ToUpper(classification.Type))
|
output := fmt.Sprintf("%s %s Error\n\n", icon, strings.ToUpper(classification.Type))
|
||||||
output += fmt.Sprintf("Category: %s\n", classification.Category)
|
output += fmt.Sprintf("Category: %s\n", classification.Category)
|
||||||
output += fmt.Sprintf("Message: %s\n\n", classification.Message)
|
output += fmt.Sprintf("Message: %s\n\n", classification.Message)
|
||||||
output += fmt.Sprintf("💡 Hint: %s\n\n", classification.Hint)
|
output += fmt.Sprintf("[HINT] Hint: %s\n\n", classification.Hint)
|
||||||
output += fmt.Sprintf("🔧 Action: %s\n", classification.Action)
|
output += fmt.Sprintf("[ACTION] Action: %s\n", classification.Action)
|
||||||
|
|
||||||
return output
|
return output
|
||||||
}
|
}
|
||||||
@@ -257,7 +257,7 @@ func FormatErrorWithHint(errorMsg string) string {
|
|||||||
// FormatMultipleErrors formats multiple errors with classification
|
// FormatMultipleErrors formats multiple errors with classification
|
||||||
func FormatMultipleErrors(errors []string) string {
|
func FormatMultipleErrors(errors []string) string {
|
||||||
if len(errors) == 0 {
|
if len(errors) == 0 {
|
||||||
return "✓ No errors"
|
return "[+] No errors"
|
||||||
}
|
}
|
||||||
|
|
||||||
ignorable := 0
|
ignorable := 0
|
||||||
@@ -285,22 +285,22 @@ func FormatMultipleErrors(errors []string) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
output := "📊 Error Summary:\n\n"
|
output := "[SUMMARY] Error Summary:\n\n"
|
||||||
if ignorable > 0 {
|
if ignorable > 0 {
|
||||||
output += fmt.Sprintf(" ℹ️ %d ignorable (objects already exist)\n", ignorable)
|
output += fmt.Sprintf(" [i] %d ignorable (objects already exist)\n", ignorable)
|
||||||
}
|
}
|
||||||
if warnings > 0 {
|
if warnings > 0 {
|
||||||
output += fmt.Sprintf(" ⚠️ %d warnings\n", warnings)
|
output += fmt.Sprintf(" [!] %d warnings\n", warnings)
|
||||||
}
|
}
|
||||||
if critical > 0 {
|
if critical > 0 {
|
||||||
output += fmt.Sprintf(" ❌ %d critical errors\n", critical)
|
output += fmt.Sprintf(" [X] %d critical errors\n", critical)
|
||||||
}
|
}
|
||||||
if fatal > 0 {
|
if fatal > 0 {
|
||||||
output += fmt.Sprintf(" 🛑 %d fatal errors\n", fatal)
|
output += fmt.Sprintf(" [!!] %d fatal errors\n", fatal)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(criticalErrors) > 0 {
|
if len(criticalErrors) > 0 {
|
||||||
output += "\n📝 Critical Issues:\n\n"
|
output += "\n[CRITICAL] Critical Issues:\n\n"
|
||||||
for i, err := range criticalErrors {
|
for i, err := range criticalErrors {
|
||||||
class := ClassifyError(err)
|
class := ClassifyError(err)
|
||||||
output += fmt.Sprintf("%d. %s\n", i+1, class.Hint)
|
output += fmt.Sprintf("%d. %s\n", i+1, class.Hint)
|
||||||
|
|||||||
@@ -49,15 +49,15 @@ func (s CheckStatus) String() string {
|
|||||||
func (s CheckStatus) Icon() string {
|
func (s CheckStatus) Icon() string {
|
||||||
switch s {
|
switch s {
|
||||||
case StatusPassed:
|
case StatusPassed:
|
||||||
return "✓"
|
return "[+]"
|
||||||
case StatusWarning:
|
case StatusWarning:
|
||||||
return "⚠"
|
return "[!]"
|
||||||
case StatusFailed:
|
case StatusFailed:
|
||||||
return "✗"
|
return "[-]"
|
||||||
case StatusSkipped:
|
case StatusSkipped:
|
||||||
return "○"
|
return "[ ]"
|
||||||
default:
|
default:
|
||||||
return "?"
|
return "[?]"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,9 +11,9 @@ func FormatPreflightReport(result *PreflightResult, dbName string, verbose bool)
|
|||||||
var sb strings.Builder
|
var sb strings.Builder
|
||||||
|
|
||||||
sb.WriteString("\n")
|
sb.WriteString("\n")
|
||||||
sb.WriteString("╔══════════════════════════════════════════════════════════════╗\n")
|
sb.WriteString("+==============================================================+\n")
|
||||||
sb.WriteString("║ [DRY RUN] Preflight Check Results ║\n")
|
sb.WriteString("| [DRY RUN] Preflight Check Results |\n")
|
||||||
sb.WriteString("╚══════════════════════════════════════════════════════════════╝\n")
|
sb.WriteString("+==============================================================+\n")
|
||||||
sb.WriteString("\n")
|
sb.WriteString("\n")
|
||||||
|
|
||||||
// Database info
|
// Database info
|
||||||
@@ -29,7 +29,7 @@ func FormatPreflightReport(result *PreflightResult, dbName string, verbose bool)
|
|||||||
|
|
||||||
// Check results
|
// Check results
|
||||||
sb.WriteString(" Checks:\n")
|
sb.WriteString(" Checks:\n")
|
||||||
sb.WriteString(" ─────────────────────────────────────────────────────────────\n")
|
sb.WriteString(" --------------------------------------------------------------\n")
|
||||||
|
|
||||||
for _, check := range result.Checks {
|
for _, check := range result.Checks {
|
||||||
icon := check.Status.Icon()
|
icon := check.Status.Icon()
|
||||||
@@ -40,26 +40,26 @@ func FormatPreflightReport(result *PreflightResult, dbName string, verbose bool)
|
|||||||
color, icon, reset, check.Name+":", check.Message))
|
color, icon, reset, check.Name+":", check.Message))
|
||||||
|
|
||||||
if verbose && check.Details != "" {
|
if verbose && check.Details != "" {
|
||||||
sb.WriteString(fmt.Sprintf(" └─ %s\n", check.Details))
|
sb.WriteString(fmt.Sprintf(" +- %s\n", check.Details))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sb.WriteString(" ─────────────────────────────────────────────────────────────\n")
|
sb.WriteString(" --------------------------------------------------------------\n")
|
||||||
sb.WriteString("\n")
|
sb.WriteString("\n")
|
||||||
|
|
||||||
// Summary
|
// Summary
|
||||||
if result.AllPassed {
|
if result.AllPassed {
|
||||||
if result.HasWarnings {
|
if result.HasWarnings {
|
||||||
sb.WriteString(" ⚠️ All checks passed with warnings\n")
|
sb.WriteString(" [!] All checks passed with warnings\n")
|
||||||
sb.WriteString("\n")
|
sb.WriteString("\n")
|
||||||
sb.WriteString(" Ready to backup. Remove --dry-run to execute.\n")
|
sb.WriteString(" Ready to backup. Remove --dry-run to execute.\n")
|
||||||
} else {
|
} else {
|
||||||
sb.WriteString(" ✅ All checks passed\n")
|
sb.WriteString(" [OK] All checks passed\n")
|
||||||
sb.WriteString("\n")
|
sb.WriteString("\n")
|
||||||
sb.WriteString(" Ready to backup. Remove --dry-run to execute.\n")
|
sb.WriteString(" Ready to backup. Remove --dry-run to execute.\n")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
sb.WriteString(fmt.Sprintf(" ❌ %d check(s) failed\n", result.FailureCount))
|
sb.WriteString(fmt.Sprintf(" [FAIL] %d check(s) failed\n", result.FailureCount))
|
||||||
sb.WriteString("\n")
|
sb.WriteString("\n")
|
||||||
sb.WriteString(" Fix the issues above before running backup.\n")
|
sb.WriteString(" Fix the issues above before running backup.\n")
|
||||||
}
|
}
|
||||||
@@ -96,7 +96,7 @@ func FormatPreflightReportPlain(result *PreflightResult, dbName string) string {
|
|||||||
status := fmt.Sprintf("[%s]", check.Status.String())
|
status := fmt.Sprintf("[%s]", check.Status.String())
|
||||||
sb.WriteString(fmt.Sprintf(" %-10s %-25s %s\n", status, check.Name+":", check.Message))
|
sb.WriteString(fmt.Sprintf(" %-10s %-25s %s\n", status, check.Name+":", check.Message))
|
||||||
if check.Details != "" {
|
if check.Details != "" {
|
||||||
sb.WriteString(fmt.Sprintf(" └─ %s\n", check.Details))
|
sb.WriteString(fmt.Sprintf(" +- %s\n", check.Details))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
"dbbackup/internal/logger"
|
"dbbackup/internal/logger"
|
||||||
)
|
)
|
||||||
@@ -116,8 +117,11 @@ func KillOrphanedProcesses(log logger.Logger) error {
|
|||||||
|
|
||||||
// findProcessesByName returns PIDs of processes matching the given name
|
// findProcessesByName returns PIDs of processes matching the given name
|
||||||
func findProcessesByName(name string, excludePID int) ([]int, error) {
|
func findProcessesByName(name string, excludePID int) ([]int, error) {
|
||||||
// Use pgrep for efficient process searching
|
// Use pgrep for efficient process searching with timeout
|
||||||
cmd := exec.Command("pgrep", "-x", name)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, "pgrep", "-x", name)
|
||||||
output, err := cmd.Output()
|
output, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Exit code 1 means no processes found (not an error)
|
// Exit code 1 means no processes found (not an error)
|
||||||
|
|||||||
@@ -90,7 +90,7 @@ func NewAzureBackend(cfg *Config) (*AzureBackend, error) {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Use default Azure credential (managed identity, environment variables, etc.)
|
// Use default Azure credential (managed identity, environment variables, etc.)
|
||||||
return nil, fmt.Errorf("Azure authentication requires account name and key, or use AZURE_STORAGE_CONNECTION_STRING environment variable")
|
return nil, fmt.Errorf("azure authentication requires account name and key, or use AZURE_STORAGE_CONNECTION_STRING environment variable")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -217,14 +217,17 @@ func New() *Config {
|
|||||||
SingleDBName: getEnvString("SINGLE_DB_NAME", ""),
|
SingleDBName: getEnvString("SINGLE_DB_NAME", ""),
|
||||||
RestoreDBName: getEnvString("RESTORE_DB_NAME", ""),
|
RestoreDBName: getEnvString("RESTORE_DB_NAME", ""),
|
||||||
|
|
||||||
// Timeouts
|
// Timeouts - default 24 hours (1440 min) to handle very large databases with large objects
|
||||||
ClusterTimeoutMinutes: getEnvInt("CLUSTER_TIMEOUT_MIN", 240),
|
ClusterTimeoutMinutes: getEnvInt("CLUSTER_TIMEOUT_MIN", 1440),
|
||||||
|
|
||||||
// Cluster parallelism (default: 2 concurrent operations for faster cluster backup/restore)
|
// Cluster parallelism (default: 2 concurrent operations for faster cluster backup/restore)
|
||||||
ClusterParallelism: getEnvInt("CLUSTER_PARALLELISM", 2),
|
ClusterParallelism: getEnvInt("CLUSTER_PARALLELISM", 2),
|
||||||
|
|
||||||
|
// Working directory for large operations (default: system temp)
|
||||||
|
WorkDir: getEnvString("WORK_DIR", ""),
|
||||||
|
|
||||||
// Swap file management
|
// Swap file management
|
||||||
SwapFilePath: getEnvString("SWAP_FILE_PATH", "/tmp/dbbackup_swap"),
|
SwapFilePath: "", // Will be set after WorkDir is initialized
|
||||||
SwapFileSizeGB: getEnvInt("SWAP_FILE_SIZE_GB", 0), // 0 = disabled by default
|
SwapFileSizeGB: getEnvInt("SWAP_FILE_SIZE_GB", 0), // 0 = disabled by default
|
||||||
AutoSwap: getEnvBool("AUTO_SWAP", false),
|
AutoSwap: getEnvBool("AUTO_SWAP", false),
|
||||||
|
|
||||||
@@ -264,6 +267,13 @@ func New() *Config {
|
|||||||
cfg.SSLMode = "prefer"
|
cfg.SSLMode = "prefer"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set SwapFilePath using WorkDir if not explicitly set via env var
|
||||||
|
if envSwap := os.Getenv("SWAP_FILE_PATH"); envSwap != "" {
|
||||||
|
cfg.SwapFilePath = envSwap
|
||||||
|
} else {
|
||||||
|
cfg.SwapFilePath = filepath.Join(cfg.GetEffectiveWorkDir(), "dbbackup_swap")
|
||||||
|
}
|
||||||
|
|
||||||
return cfg
|
return cfg
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -499,6 +509,14 @@ func GetCurrentOSUser() string {
|
|||||||
return getCurrentUser()
|
return getCurrentUser()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetEffectiveWorkDir returns the configured WorkDir or system temp as fallback
|
||||||
|
func (c *Config) GetEffectiveWorkDir() string {
|
||||||
|
if c.WorkDir != "" {
|
||||||
|
return c.WorkDir
|
||||||
|
}
|
||||||
|
return os.TempDir()
|
||||||
|
}
|
||||||
|
|
||||||
func getDefaultBackupDir() string {
|
func getDefaultBackupDir() string {
|
||||||
// Try to create a sensible default backup directory
|
// Try to create a sensible default backup directory
|
||||||
homeDir, _ := os.UserHomeDir()
|
homeDir, _ := os.UserHomeDir()
|
||||||
@@ -516,7 +534,7 @@ func getDefaultBackupDir() string {
|
|||||||
return "/var/lib/pgsql/pg_backups"
|
return "/var/lib/pgsql/pg_backups"
|
||||||
}
|
}
|
||||||
|
|
||||||
return "/tmp/db_backups"
|
return filepath.Join(os.TempDir(), "db_backups")
|
||||||
}
|
}
|
||||||
|
|
||||||
// CPU-related helper functions
|
// CPU-related helper functions
|
||||||
|
|||||||
@@ -28,8 +28,9 @@ type LocalConfig struct {
|
|||||||
DumpJobs int
|
DumpJobs int
|
||||||
|
|
||||||
// Performance settings
|
// Performance settings
|
||||||
CPUWorkload string
|
CPUWorkload string
|
||||||
MaxCores int
|
MaxCores int
|
||||||
|
ClusterTimeout int // Cluster operation timeout in minutes (default: 1440 = 24 hours)
|
||||||
|
|
||||||
// Security settings
|
// Security settings
|
||||||
RetentionDays int
|
RetentionDays int
|
||||||
@@ -121,6 +122,10 @@ func LoadLocalConfig() (*LocalConfig, error) {
|
|||||||
if mc, err := strconv.Atoi(value); err == nil {
|
if mc, err := strconv.Atoi(value); err == nil {
|
||||||
cfg.MaxCores = mc
|
cfg.MaxCores = mc
|
||||||
}
|
}
|
||||||
|
case "cluster_timeout":
|
||||||
|
if ct, err := strconv.Atoi(value); err == nil {
|
||||||
|
cfg.ClusterTimeout = ct
|
||||||
|
}
|
||||||
}
|
}
|
||||||
case "security":
|
case "security":
|
||||||
switch key {
|
switch key {
|
||||||
@@ -199,6 +204,9 @@ func SaveLocalConfig(cfg *LocalConfig) error {
|
|||||||
if cfg.MaxCores != 0 {
|
if cfg.MaxCores != 0 {
|
||||||
sb.WriteString(fmt.Sprintf("max_cores = %d\n", cfg.MaxCores))
|
sb.WriteString(fmt.Sprintf("max_cores = %d\n", cfg.MaxCores))
|
||||||
}
|
}
|
||||||
|
if cfg.ClusterTimeout != 0 {
|
||||||
|
sb.WriteString(fmt.Sprintf("cluster_timeout = %d\n", cfg.ClusterTimeout))
|
||||||
|
}
|
||||||
sb.WriteString("\n")
|
sb.WriteString("\n")
|
||||||
|
|
||||||
// Security section
|
// Security section
|
||||||
@@ -268,6 +276,10 @@ func ApplyLocalConfig(cfg *Config, local *LocalConfig) {
|
|||||||
if local.MaxCores != 0 {
|
if local.MaxCores != 0 {
|
||||||
cfg.MaxCores = local.MaxCores
|
cfg.MaxCores = local.MaxCores
|
||||||
}
|
}
|
||||||
|
// Apply cluster timeout from config file (overrides default)
|
||||||
|
if local.ClusterTimeout != 0 {
|
||||||
|
cfg.ClusterTimeoutMinutes = local.ClusterTimeout
|
||||||
|
}
|
||||||
if cfg.RetentionDays == 30 && local.RetentionDays != 0 {
|
if cfg.RetentionDays == 30 && local.RetentionDays != 0 {
|
||||||
cfg.RetentionDays = local.RetentionDays
|
cfg.RetentionDays = local.RetentionDays
|
||||||
}
|
}
|
||||||
@@ -282,21 +294,22 @@ func ApplyLocalConfig(cfg *Config, local *LocalConfig) {
|
|||||||
// ConfigFromConfig creates a LocalConfig from a Config
|
// ConfigFromConfig creates a LocalConfig from a Config
|
||||||
func ConfigFromConfig(cfg *Config) *LocalConfig {
|
func ConfigFromConfig(cfg *Config) *LocalConfig {
|
||||||
return &LocalConfig{
|
return &LocalConfig{
|
||||||
DBType: cfg.DatabaseType,
|
DBType: cfg.DatabaseType,
|
||||||
Host: cfg.Host,
|
Host: cfg.Host,
|
||||||
Port: cfg.Port,
|
Port: cfg.Port,
|
||||||
User: cfg.User,
|
User: cfg.User,
|
||||||
Database: cfg.Database,
|
Database: cfg.Database,
|
||||||
SSLMode: cfg.SSLMode,
|
SSLMode: cfg.SSLMode,
|
||||||
BackupDir: cfg.BackupDir,
|
BackupDir: cfg.BackupDir,
|
||||||
WorkDir: cfg.WorkDir,
|
WorkDir: cfg.WorkDir,
|
||||||
Compression: cfg.CompressionLevel,
|
Compression: cfg.CompressionLevel,
|
||||||
Jobs: cfg.Jobs,
|
Jobs: cfg.Jobs,
|
||||||
DumpJobs: cfg.DumpJobs,
|
DumpJobs: cfg.DumpJobs,
|
||||||
CPUWorkload: cfg.CPUWorkloadType,
|
CPUWorkload: cfg.CPUWorkloadType,
|
||||||
MaxCores: cfg.MaxCores,
|
MaxCores: cfg.MaxCores,
|
||||||
RetentionDays: cfg.RetentionDays,
|
ClusterTimeout: cfg.ClusterTimeoutMinutes,
|
||||||
MinBackups: cfg.MinBackups,
|
RetentionDays: cfg.RetentionDays,
|
||||||
MaxRetries: cfg.MaxRetries,
|
MinBackups: cfg.MinBackups,
|
||||||
|
MaxRetries: cfg.MaxRetries,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ import (
|
|||||||
|
|
||||||
"github.com/jackc/pgx/v5/pgxpool"
|
"github.com/jackc/pgx/v5/pgxpool"
|
||||||
"github.com/jackc/pgx/v5/stdlib"
|
"github.com/jackc/pgx/v5/stdlib"
|
||||||
_ "github.com/jackc/pgx/v5/stdlib" // PostgreSQL driver (pgx)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// PostgreSQL implements Database interface for PostgreSQL
|
// PostgreSQL implements Database interface for PostgreSQL
|
||||||
|
|||||||
228
internal/dedup/chunker.go
Normal file
228
internal/dedup/chunker.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
// Package dedup provides content-defined chunking and deduplication
|
||||||
|
// for database backups, similar to restic/borgbackup but with native
|
||||||
|
// database dump support.
|
||||||
|
package dedup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Chunker constants for content-defined chunking
|
||||||
|
const (
|
||||||
|
// DefaultMinChunkSize is the minimum chunk size (4KB)
|
||||||
|
DefaultMinChunkSize = 4 * 1024
|
||||||
|
|
||||||
|
// DefaultAvgChunkSize is the target average chunk size (8KB)
|
||||||
|
DefaultAvgChunkSize = 8 * 1024
|
||||||
|
|
||||||
|
// DefaultMaxChunkSize is the maximum chunk size (32KB)
|
||||||
|
DefaultMaxChunkSize = 32 * 1024
|
||||||
|
|
||||||
|
// WindowSize for the rolling hash
|
||||||
|
WindowSize = 48
|
||||||
|
|
||||||
|
// ChunkMask determines average chunk size
|
||||||
|
// For 8KB average: we look for hash % 8192 == 0
|
||||||
|
ChunkMask = DefaultAvgChunkSize - 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// Gear hash table - random values for each byte
|
||||||
|
// This is used for the Gear rolling hash which is simpler and faster than Buzhash
|
||||||
|
var gearTable = [256]uint64{
|
||||||
|
0x5c95c078, 0x22408989, 0x2d48a214, 0x12842087, 0x530f8afb, 0x474536b9, 0x2963b4f1, 0x44cb738b,
|
||||||
|
0x4ea7403d, 0x4d606b6e, 0x074ec5d3, 0x3f7e82f4, 0x4e3d26e7, 0x5cb4e82f, 0x7b0a1ef5, 0x3d4e7c92,
|
||||||
|
0x2a81ed69, 0x7f853df8, 0x452c8cf7, 0x0f4f3c9d, 0x3a5e81b7, 0x6cb2d819, 0x2e4c5f93, 0x7e8a1c57,
|
||||||
|
0x1f9d3e8c, 0x4b7c2a5d, 0x3c8f1d6e, 0x5d2a7b4f, 0x6e9c3f8a, 0x7a4d1e5c, 0x2b8c4f7d, 0x4f7d2c9e,
|
||||||
|
0x5a1e3d7c, 0x6b4f8a2d, 0x3e7c9d5a, 0x7d2a4f8b, 0x4c9e7d3a, 0x5b8a1c6e, 0x2d5f4a9c, 0x7a3c8d6b,
|
||||||
|
0x6e2a7b4d, 0x3f8c5d9a, 0x4a7d3e5b, 0x5c9a2d7e, 0x7b4e8f3c, 0x2a6d9c5b, 0x3e4a7d8c, 0x5d7b2e9a,
|
||||||
|
0x4c8a3d7b, 0x6e9d5c8a, 0x7a3e4d9c, 0x2b5c8a7d, 0x4d7e3a9c, 0x5a9c7d3e, 0x3c8b5a7d, 0x7d4e9c2a,
|
||||||
|
0x6a3d8c5b, 0x4e7a9d3c, 0x5c2a7b9e, 0x3a9d4e7c, 0x7b8c5a2d, 0x2d7e4a9c, 0x4a3c9d7b, 0x5e9a7c3d,
|
||||||
|
0x6c4d8a5b, 0x3b7e9c4a, 0x7a5c2d8b, 0x4d9a3e7c, 0x5b7c4a9e, 0x2e8a5d3c, 0x3c9e7a4d, 0x7d4a8c5b,
|
||||||
|
0x6b2d9a7c, 0x4a8c3e5d, 0x5d7a9c2e, 0x3e4c7b9a, 0x7c9d5a4b, 0x2a7e8c3d, 0x4c5a9d7e, 0x5a3e7c4b,
|
||||||
|
0x6d8a2c9e, 0x3c7b4a8d, 0x7e2d9c5a, 0x4b9a7e3c, 0x5c4d8a7b, 0x2d9e3c5a, 0x3a7c9d4e, 0x7b5a4c8d,
|
||||||
|
0x6a9c2e7b, 0x4d3e8a9c, 0x5e7b4d2a, 0x3b9a7c5d, 0x7c4e8a3b, 0x2e7d9c4a, 0x4a8b3e7d, 0x5d2c9a7e,
|
||||||
|
0x6c7a5d3e, 0x3e9c4a7b, 0x7a8d2c5e, 0x4c3e9a7d, 0x5b9c7e2a, 0x2a4d7c9e, 0x3d8a5c4b, 0x7e7b9a3c,
|
||||||
|
0x6b4a8d9e, 0x4e9c3b7a, 0x5a7d4e9c, 0x3c2a8b7d, 0x7d9e5c4a, 0x2b8a7d3e, 0x4d5c9a2b, 0x5e3a7c8d,
|
||||||
|
0x6a9d4b7c, 0x3b7a9c5e, 0x7c4b8a2d, 0x4a9e7c3b, 0x5d2b9a4e, 0x2e7c4d9a, 0x3a9b7e4c, 0x7e5a3c8b,
|
||||||
|
0x6c8a9d4e, 0x4b7c2a5e, 0x5a3e9c7d, 0x3d9a4b7c, 0x7a2d5e9c, 0x2c8b7a3d, 0x4e9c5a2b, 0x5b4d7e9a,
|
||||||
|
0x6d7a3c8b, 0x3e2b9a5d, 0x7c9d4a7e, 0x4a5e3c9b, 0x5e7a9d2c, 0x2b3c7e9a, 0x3a9e4b7d, 0x7d8a5c3e,
|
||||||
|
0x6b9c2d4a, 0x4c7e9a3b, 0x5a2c8b7e, 0x3b4d9a5c, 0x7e9b3a4d, 0x2d5a7c9e, 0x4b8d3e7a, 0x5c9a4b2d,
|
||||||
|
0x6a7c8d9e, 0x3c9e5a7b, 0x7b4a2c9d, 0x4d3b7e9a, 0x5e9c4a3b, 0x2a7b9d4e, 0x3e5c8a7b, 0x7a9d3e5c,
|
||||||
|
0x6c2a7b8d, 0x4e9a5c3b, 0x5b7d2a9e, 0x3a4e9c7b, 0x7d8b3a5c, 0x2c9e7a4b, 0x4a3d5e9c, 0x5d7b8a2e,
|
||||||
|
0x6b9a4c7d, 0x3d5a9e4b, 0x7e2c7b9a, 0x4b9d3a5e, 0x5c4e7a9d, 0x2e8a3c7b, 0x3b7c9e5a, 0x7a4d8b3e,
|
||||||
|
0x6d9c5a2b, 0x4a7e3d9c, 0x5e2a9b7d, 0x3c9a7e4b, 0x7b3e5c9a, 0x2a4b8d7e, 0x4d9c2a5b, 0x5a7d9e3c,
|
||||||
|
0x6c3b8a7d, 0x3e9d4a5c, 0x7d5c2b9e, 0x4c8a7d3b, 0x5b9e3c7a, 0x2d7a9c4e, 0x3a5e7b9d, 0x7e8b4a3c,
|
||||||
|
0x6a2d9e7b, 0x4b3e5a9d, 0x5d9c7b2a, 0x3b7d4e9c, 0x7c9a3b5e, 0x2e5c8a7d, 0x4a7b9d3e, 0x5c3a7e9b,
|
||||||
|
0x6d9e5c4a, 0x3c4a7b9e, 0x7a9d2e5c, 0x4e7c9a3d, 0x5a8b4e7c, 0x2b9a3d7e, 0x3d5b8a9c, 0x7b4e9a2d,
|
||||||
|
0x6c7d3a9e, 0x4a9c5e3b, 0x5e2b7d9a, 0x3a8d4c7b, 0x7d3e9a5c, 0x2c7a8b9e, 0x4b5d3a7c, 0x5c9a7e2b,
|
||||||
|
0x6a4b9d3e, 0x3e7c2a9d, 0x7c8a5b4e, 0x4d9e3c7a, 0x5b3a9e7c, 0x2e9c7b4a, 0x3b4e8a9d, 0x7a9c4e3b,
|
||||||
|
0x6d2a7c9e, 0x4c8b9a5d, 0x5a9e2b7c, 0x3c3d7a9e, 0x7e5a9c4b, 0x2a8d3e7c, 0x4e7a5c9b, 0x5d9b8a2e,
|
||||||
|
0x6b4c9e7a, 0x3a9d5b4e, 0x7b2e8a9c, 0x4a5c3e9b, 0x5c9a4d7e, 0x2d7e9a3c, 0x3e8b7c5a, 0x7c9e2a4d,
|
||||||
|
0x6a3b7d9c, 0x4d9a8b3e, 0x5e5c2a7b, 0x3b4a9d7c, 0x7a7c5e9b, 0x2c9b4a8d, 0x4b3e7c9a, 0x5a9d3b7e,
|
||||||
|
0x6c8a4e9d, 0x3d7b9c5a, 0x7e2a4b9c, 0x4c9e5d3a, 0x5b7a9c4e, 0x2e4d8a7b, 0x3a9c7e5d, 0x7b8d3a9e,
|
||||||
|
0x6d5c9a4b, 0x4a2e7b9d, 0x5d9b4c8a, 0x3c7a9e2b, 0x7d4b8c9e, 0x2b9a5c4d, 0x4e7d3a9c, 0x5c8a9e7b,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chunk represents a single deduplicated chunk
|
||||||
|
type Chunk struct {
|
||||||
|
// Hash is the SHA-256 hash of the chunk data (content-addressed)
|
||||||
|
Hash string
|
||||||
|
|
||||||
|
// Data is the raw chunk bytes
|
||||||
|
Data []byte
|
||||||
|
|
||||||
|
// Offset is the byte offset in the original file
|
||||||
|
Offset int64
|
||||||
|
|
||||||
|
// Length is the size of this chunk
|
||||||
|
Length int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkerConfig holds configuration for the chunker
|
||||||
|
type ChunkerConfig struct {
|
||||||
|
MinSize int // Minimum chunk size
|
||||||
|
AvgSize int // Target average chunk size
|
||||||
|
MaxSize int // Maximum chunk size
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultChunkerConfig returns sensible defaults
|
||||||
|
func DefaultChunkerConfig() ChunkerConfig {
|
||||||
|
return ChunkerConfig{
|
||||||
|
MinSize: DefaultMinChunkSize,
|
||||||
|
AvgSize: DefaultAvgChunkSize,
|
||||||
|
MaxSize: DefaultMaxChunkSize,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chunker performs content-defined chunking using Gear hash
|
||||||
|
type Chunker struct {
|
||||||
|
reader io.Reader
|
||||||
|
config ChunkerConfig
|
||||||
|
|
||||||
|
// Rolling hash state
|
||||||
|
hash uint64
|
||||||
|
|
||||||
|
// Current chunk state
|
||||||
|
buf []byte
|
||||||
|
offset int64
|
||||||
|
mask uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChunker creates a new chunker for the given reader
|
||||||
|
func NewChunker(r io.Reader, config ChunkerConfig) *Chunker {
|
||||||
|
// Calculate mask for target average size
|
||||||
|
// We want: avg_size = 1 / P(boundary)
|
||||||
|
// With mask, P(boundary) = 1 / (mask + 1)
|
||||||
|
// So mask = avg_size - 1
|
||||||
|
mask := uint64(config.AvgSize - 1)
|
||||||
|
|
||||||
|
return &Chunker{
|
||||||
|
reader: r,
|
||||||
|
config: config,
|
||||||
|
buf: make([]byte, 0, config.MaxSize),
|
||||||
|
mask: mask,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next chunk from the input stream
|
||||||
|
// Returns io.EOF when no more data is available
|
||||||
|
func (c *Chunker) Next() (*Chunk, error) {
|
||||||
|
c.buf = c.buf[:0]
|
||||||
|
c.hash = 0
|
||||||
|
|
||||||
|
// Read bytes until we find a chunk boundary or hit max size
|
||||||
|
singleByte := make([]byte, 1)
|
||||||
|
|
||||||
|
for {
|
||||||
|
n, err := c.reader.Read(singleByte)
|
||||||
|
if n == 0 {
|
||||||
|
if err == io.EOF {
|
||||||
|
// Return remaining data as final chunk
|
||||||
|
if len(c.buf) > 0 {
|
||||||
|
return c.makeChunk(), nil
|
||||||
|
}
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
b := singleByte[0]
|
||||||
|
c.buf = append(c.buf, b)
|
||||||
|
|
||||||
|
// Update Gear rolling hash
|
||||||
|
// Gear hash: hash = (hash << 1) + gear_table[byte]
|
||||||
|
c.hash = (c.hash << 1) + gearTable[b]
|
||||||
|
|
||||||
|
// Check for chunk boundary after minimum size
|
||||||
|
if len(c.buf) >= c.config.MinSize {
|
||||||
|
// Check if we hit a boundary (hash matches mask pattern)
|
||||||
|
if (c.hash & c.mask) == 0 {
|
||||||
|
return c.makeChunk(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force boundary at max size
|
||||||
|
if len(c.buf) >= c.config.MaxSize {
|
||||||
|
return c.makeChunk(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeChunk creates a Chunk from the current buffer
|
||||||
|
func (c *Chunker) makeChunk() *Chunk {
|
||||||
|
// Compute SHA-256 hash
|
||||||
|
h := sha256.Sum256(c.buf)
|
||||||
|
hash := hex.EncodeToString(h[:])
|
||||||
|
|
||||||
|
// Copy data
|
||||||
|
data := make([]byte, len(c.buf))
|
||||||
|
copy(data, c.buf)
|
||||||
|
|
||||||
|
chunk := &Chunk{
|
||||||
|
Hash: hash,
|
||||||
|
Data: data,
|
||||||
|
Offset: c.offset,
|
||||||
|
Length: len(data),
|
||||||
|
}
|
||||||
|
|
||||||
|
c.offset += int64(len(data))
|
||||||
|
return chunk
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkReader splits a reader into content-defined chunks
|
||||||
|
// and returns them via a channel for concurrent processing
|
||||||
|
func ChunkReader(r io.Reader, config ChunkerConfig) (<-chan *Chunk, <-chan error) {
|
||||||
|
chunks := make(chan *Chunk, 100)
|
||||||
|
errs := make(chan error, 1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(chunks)
|
||||||
|
defer close(errs)
|
||||||
|
|
||||||
|
chunker := NewChunker(r, config)
|
||||||
|
for {
|
||||||
|
chunk, err := chunker.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
errs <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
chunks <- chunk
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return chunks, errs
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashData computes SHA-256 hash of data
|
||||||
|
func HashData(data []byte) string {
|
||||||
|
h := sha256.Sum256(data)
|
||||||
|
return hex.EncodeToString(h[:])
|
||||||
|
}
|
||||||
217
internal/dedup/chunker_test.go
Normal file
217
internal/dedup/chunker_test.go
Normal file
@@ -0,0 +1,217 @@
|
|||||||
|
package dedup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestChunker_Basic(t *testing.T) {
|
||||||
|
// Create test data
|
||||||
|
data := make([]byte, 100*1024) // 100KB
|
||||||
|
rand.Read(data)
|
||||||
|
|
||||||
|
chunker := NewChunker(bytes.NewReader(data), DefaultChunkerConfig())
|
||||||
|
|
||||||
|
var chunks []*Chunk
|
||||||
|
var totalBytes int
|
||||||
|
|
||||||
|
for {
|
||||||
|
chunk, err := chunker.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Chunker.Next() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunks = append(chunks, chunk)
|
||||||
|
totalBytes += chunk.Length
|
||||||
|
|
||||||
|
// Verify chunk properties
|
||||||
|
if chunk.Length < DefaultMinChunkSize && len(chunks) < 10 {
|
||||||
|
// Only the last chunk can be smaller than min
|
||||||
|
// (unless file is smaller than min)
|
||||||
|
}
|
||||||
|
if chunk.Length > DefaultMaxChunkSize {
|
||||||
|
t.Errorf("Chunk %d exceeds max size: %d > %d", len(chunks), chunk.Length, DefaultMaxChunkSize)
|
||||||
|
}
|
||||||
|
if chunk.Hash == "" {
|
||||||
|
t.Errorf("Chunk %d has empty hash", len(chunks))
|
||||||
|
}
|
||||||
|
if len(chunk.Hash) != 64 { // SHA-256 hex length
|
||||||
|
t.Errorf("Chunk %d has invalid hash length: %d", len(chunks), len(chunk.Hash))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if totalBytes != len(data) {
|
||||||
|
t.Errorf("Total bytes mismatch: got %d, want %d", totalBytes, len(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Chunked %d bytes into %d chunks", totalBytes, len(chunks))
|
||||||
|
t.Logf("Average chunk size: %d bytes", totalBytes/len(chunks))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChunker_Deterministic(t *testing.T) {
|
||||||
|
// Same data should produce same chunks
|
||||||
|
data := make([]byte, 50*1024)
|
||||||
|
rand.Read(data)
|
||||||
|
|
||||||
|
// First pass
|
||||||
|
chunker1 := NewChunker(bytes.NewReader(data), DefaultChunkerConfig())
|
||||||
|
var hashes1 []string
|
||||||
|
for {
|
||||||
|
chunk, err := chunker1.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
hashes1 = append(hashes1, chunk.Hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second pass
|
||||||
|
chunker2 := NewChunker(bytes.NewReader(data), DefaultChunkerConfig())
|
||||||
|
var hashes2 []string
|
||||||
|
for {
|
||||||
|
chunk, err := chunker2.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
hashes2 = append(hashes2, chunk.Hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare
|
||||||
|
if len(hashes1) != len(hashes2) {
|
||||||
|
t.Fatalf("Different chunk counts: %d vs %d", len(hashes1), len(hashes2))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range hashes1 {
|
||||||
|
if hashes1[i] != hashes2[i] {
|
||||||
|
t.Errorf("Hash mismatch at chunk %d: %s vs %s", i, hashes1[i], hashes2[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChunker_ShiftedData(t *testing.T) {
|
||||||
|
// Test that shifted data still shares chunks (the key CDC benefit)
|
||||||
|
original := make([]byte, 100*1024)
|
||||||
|
rand.Read(original)
|
||||||
|
|
||||||
|
// Create shifted version (prepend some bytes)
|
||||||
|
prefix := make([]byte, 1000)
|
||||||
|
rand.Read(prefix)
|
||||||
|
shifted := append(prefix, original...)
|
||||||
|
|
||||||
|
// Chunk both
|
||||||
|
config := DefaultChunkerConfig()
|
||||||
|
|
||||||
|
chunker1 := NewChunker(bytes.NewReader(original), config)
|
||||||
|
hashes1 := make(map[string]bool)
|
||||||
|
for {
|
||||||
|
chunk, err := chunker1.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
hashes1[chunk.Hash] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
chunker2 := NewChunker(bytes.NewReader(shifted), config)
|
||||||
|
var matched, total int
|
||||||
|
for {
|
||||||
|
chunk, err := chunker2.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
total++
|
||||||
|
if hashes1[chunk.Hash] {
|
||||||
|
matched++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should have significant overlap despite the shift
|
||||||
|
overlapRatio := float64(matched) / float64(total)
|
||||||
|
t.Logf("Chunk overlap after %d-byte shift: %.1f%% (%d/%d chunks)",
|
||||||
|
len(prefix), overlapRatio*100, matched, total)
|
||||||
|
|
||||||
|
// We expect at least 50% overlap for content-defined chunking
|
||||||
|
if overlapRatio < 0.5 {
|
||||||
|
t.Errorf("Low chunk overlap: %.1f%% (expected >50%%)", overlapRatio*100)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChunker_SmallFile(t *testing.T) {
|
||||||
|
// File smaller than min chunk size
|
||||||
|
data := []byte("hello world")
|
||||||
|
chunker := NewChunker(bytes.NewReader(data), DefaultChunkerConfig())
|
||||||
|
|
||||||
|
chunk, err := chunker.Next()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if chunk.Length != len(data) {
|
||||||
|
t.Errorf("Expected chunk length %d, got %d", len(data), chunk.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should be EOF after
|
||||||
|
_, err = chunker.Next()
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Errorf("Expected EOF, got %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChunker_EmptyFile(t *testing.T) {
|
||||||
|
chunker := NewChunker(bytes.NewReader(nil), DefaultChunkerConfig())
|
||||||
|
|
||||||
|
_, err := chunker.Next()
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Errorf("Expected EOF for empty file, got %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHashData(t *testing.T) {
|
||||||
|
hash := HashData([]byte("test"))
|
||||||
|
if len(hash) != 64 {
|
||||||
|
t.Errorf("Expected 64-char hash, got %d", len(hash))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Known SHA-256 of "test"
|
||||||
|
expected := "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08"
|
||||||
|
if hash != expected {
|
||||||
|
t.Errorf("Hash mismatch: got %s, want %s", hash, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkChunker(b *testing.B) {
|
||||||
|
// 1MB of random data
|
||||||
|
data := make([]byte, 1024*1024)
|
||||||
|
rand.Read(data)
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
b.SetBytes(int64(len(data)))
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
chunker := NewChunker(bytes.NewReader(data), DefaultChunkerConfig())
|
||||||
|
for {
|
||||||
|
_, err := chunker.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
239
internal/dedup/index.go
Normal file
239
internal/dedup/index.go
Normal file
@@ -0,0 +1,239 @@
|
|||||||
|
package dedup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
_ "github.com/mattn/go-sqlite3" // SQLite driver
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChunkIndex provides fast chunk lookups using SQLite
|
||||||
|
type ChunkIndex struct {
|
||||||
|
db *sql.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChunkIndex opens or creates a chunk index database
|
||||||
|
func NewChunkIndex(basePath string) (*ChunkIndex, error) {
|
||||||
|
dbPath := filepath.Join(basePath, "chunks.db")
|
||||||
|
|
||||||
|
db, err := sql.Open("sqlite3", dbPath+"?_journal_mode=WAL&_synchronous=NORMAL")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
idx := &ChunkIndex{db: db}
|
||||||
|
if err := idx.migrate(); err != nil {
|
||||||
|
db.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return idx, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrate creates the schema if needed
|
||||||
|
func (idx *ChunkIndex) migrate() error {
|
||||||
|
schema := `
|
||||||
|
CREATE TABLE IF NOT EXISTS chunks (
|
||||||
|
hash TEXT PRIMARY KEY,
|
||||||
|
size_raw INTEGER NOT NULL,
|
||||||
|
size_stored INTEGER NOT NULL,
|
||||||
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
last_accessed DATETIME,
|
||||||
|
ref_count INTEGER DEFAULT 1
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS manifests (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
database_type TEXT,
|
||||||
|
database_name TEXT,
|
||||||
|
database_host TEXT,
|
||||||
|
created_at DATETIME,
|
||||||
|
original_size INTEGER,
|
||||||
|
stored_size INTEGER,
|
||||||
|
chunk_count INTEGER,
|
||||||
|
new_chunks INTEGER,
|
||||||
|
dedup_ratio REAL,
|
||||||
|
sha256 TEXT,
|
||||||
|
verified_at DATETIME
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_chunks_created ON chunks(created_at);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_chunks_accessed ON chunks(last_accessed);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_manifests_created ON manifests(created_at);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_manifests_database ON manifests(database_name);
|
||||||
|
`
|
||||||
|
|
||||||
|
_, err := idx.db.Exec(schema)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the database
|
||||||
|
func (idx *ChunkIndex) Close() error {
|
||||||
|
return idx.db.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddChunk records a chunk in the index
|
||||||
|
func (idx *ChunkIndex) AddChunk(hash string, sizeRaw, sizeStored int) error {
|
||||||
|
_, err := idx.db.Exec(`
|
||||||
|
INSERT INTO chunks (hash, size_raw, size_stored, created_at, last_accessed, ref_count)
|
||||||
|
VALUES (?, ?, ?, ?, ?, 1)
|
||||||
|
ON CONFLICT(hash) DO UPDATE SET
|
||||||
|
ref_count = ref_count + 1,
|
||||||
|
last_accessed = ?
|
||||||
|
`, hash, sizeRaw, sizeStored, time.Now(), time.Now(), time.Now())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasChunk checks if a chunk exists in the index
|
||||||
|
func (idx *ChunkIndex) HasChunk(hash string) (bool, error) {
|
||||||
|
var count int
|
||||||
|
err := idx.db.QueryRow("SELECT COUNT(*) FROM chunks WHERE hash = ?", hash).Scan(&count)
|
||||||
|
return count > 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetChunk retrieves chunk metadata
|
||||||
|
func (idx *ChunkIndex) GetChunk(hash string) (*ChunkMeta, error) {
|
||||||
|
var m ChunkMeta
|
||||||
|
err := idx.db.QueryRow(`
|
||||||
|
SELECT hash, size_raw, size_stored, created_at, ref_count
|
||||||
|
FROM chunks WHERE hash = ?
|
||||||
|
`, hash).Scan(&m.Hash, &m.SizeRaw, &m.SizeStored, &m.CreatedAt, &m.RefCount)
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkMeta holds metadata about a chunk
|
||||||
|
type ChunkMeta struct {
|
||||||
|
Hash string
|
||||||
|
SizeRaw int64
|
||||||
|
SizeStored int64
|
||||||
|
CreatedAt time.Time
|
||||||
|
RefCount int
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecrementRef decreases the reference count for a chunk
|
||||||
|
// Returns true if the chunk should be deleted (ref_count <= 0)
|
||||||
|
func (idx *ChunkIndex) DecrementRef(hash string) (shouldDelete bool, err error) {
|
||||||
|
result, err := idx.db.Exec(`
|
||||||
|
UPDATE chunks SET ref_count = ref_count - 1 WHERE hash = ?
|
||||||
|
`, hash)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
affected, _ := result.RowsAffected()
|
||||||
|
if affected == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var refCount int
|
||||||
|
err = idx.db.QueryRow("SELECT ref_count FROM chunks WHERE hash = ?", hash).Scan(&refCount)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return refCount <= 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveChunk removes a chunk from the index
|
||||||
|
func (idx *ChunkIndex) RemoveChunk(hash string) error {
|
||||||
|
_, err := idx.db.Exec("DELETE FROM chunks WHERE hash = ?", hash)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddManifest records a manifest in the index
|
||||||
|
func (idx *ChunkIndex) AddManifest(m *Manifest) error {
|
||||||
|
_, err := idx.db.Exec(`
|
||||||
|
INSERT OR REPLACE INTO manifests
|
||||||
|
(id, database_type, database_name, database_host, created_at,
|
||||||
|
original_size, stored_size, chunk_count, new_chunks, dedup_ratio, sha256)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
|
`, m.ID, m.DatabaseType, m.DatabaseName, m.DatabaseHost, m.CreatedAt,
|
||||||
|
m.OriginalSize, m.StoredSize, m.ChunkCount, m.NewChunks, m.DedupRatio, m.SHA256)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveManifest removes a manifest from the index
|
||||||
|
func (idx *ChunkIndex) RemoveManifest(id string) error {
|
||||||
|
_, err := idx.db.Exec("DELETE FROM manifests WHERE id = ?", id)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexStats holds statistics about the dedup index
|
||||||
|
type IndexStats struct {
|
||||||
|
TotalChunks int64
|
||||||
|
TotalManifests int64
|
||||||
|
TotalSizeRaw int64 // Uncompressed, undeduplicated
|
||||||
|
TotalSizeStored int64 // On-disk after dedup+compression
|
||||||
|
DedupRatio float64
|
||||||
|
OldestChunk time.Time
|
||||||
|
NewestChunk time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats returns statistics about the index
|
||||||
|
func (idx *ChunkIndex) Stats() (*IndexStats, error) {
|
||||||
|
stats := &IndexStats{}
|
||||||
|
|
||||||
|
var oldestStr, newestStr string
|
||||||
|
err := idx.db.QueryRow(`
|
||||||
|
SELECT
|
||||||
|
COUNT(*),
|
||||||
|
COALESCE(SUM(size_raw), 0),
|
||||||
|
COALESCE(SUM(size_stored), 0),
|
||||||
|
COALESCE(MIN(created_at), ''),
|
||||||
|
COALESCE(MAX(created_at), '')
|
||||||
|
FROM chunks
|
||||||
|
`).Scan(&stats.TotalChunks, &stats.TotalSizeRaw, &stats.TotalSizeStored,
|
||||||
|
&oldestStr, &newestStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse time strings
|
||||||
|
if oldestStr != "" {
|
||||||
|
stats.OldestChunk, _ = time.Parse("2006-01-02 15:04:05", oldestStr)
|
||||||
|
}
|
||||||
|
if newestStr != "" {
|
||||||
|
stats.NewestChunk, _ = time.Parse("2006-01-02 15:04:05", newestStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
idx.db.QueryRow("SELECT COUNT(*) FROM manifests").Scan(&stats.TotalManifests)
|
||||||
|
|
||||||
|
if stats.TotalSizeRaw > 0 {
|
||||||
|
stats.DedupRatio = 1.0 - float64(stats.TotalSizeStored)/float64(stats.TotalSizeRaw)
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListOrphanedChunks returns chunks that have ref_count <= 0
|
||||||
|
func (idx *ChunkIndex) ListOrphanedChunks() ([]string, error) {
|
||||||
|
rows, err := idx.db.Query("SELECT hash FROM chunks WHERE ref_count <= 0")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var hashes []string
|
||||||
|
for rows.Next() {
|
||||||
|
var hash string
|
||||||
|
if err := rows.Scan(&hash); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
}
|
||||||
|
return hashes, rows.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Vacuum cleans up the database
|
||||||
|
func (idx *ChunkIndex) Vacuum() error {
|
||||||
|
_, err := idx.db.Exec("VACUUM")
|
||||||
|
return err
|
||||||
|
}
|
||||||
188
internal/dedup/manifest.go
Normal file
188
internal/dedup/manifest.go
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
package dedup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Manifest describes a single backup as a list of chunks
|
||||||
|
type Manifest struct {
|
||||||
|
// ID is the unique identifier (typically timestamp-based)
|
||||||
|
ID string `json:"id"`
|
||||||
|
|
||||||
|
// Name is an optional human-readable name
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
|
||||||
|
// CreatedAt is when this backup was created
|
||||||
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
|
||||||
|
// Database information
|
||||||
|
DatabaseType string `json:"database_type"` // postgres, mysql
|
||||||
|
DatabaseName string `json:"database_name"`
|
||||||
|
DatabaseHost string `json:"database_host"`
|
||||||
|
|
||||||
|
// Chunks is the ordered list of chunk hashes
|
||||||
|
// The file is reconstructed by concatenating chunks in order
|
||||||
|
Chunks []ChunkRef `json:"chunks"`
|
||||||
|
|
||||||
|
// Stats about the backup
|
||||||
|
OriginalSize int64 `json:"original_size"` // Size before deduplication
|
||||||
|
StoredSize int64 `json:"stored_size"` // Size after dedup (new chunks only)
|
||||||
|
ChunkCount int `json:"chunk_count"` // Total chunks
|
||||||
|
NewChunks int `json:"new_chunks"` // Chunks that weren't deduplicated
|
||||||
|
DedupRatio float64 `json:"dedup_ratio"` // 1.0 = no dedup, 0.0 = 100% dedup
|
||||||
|
|
||||||
|
// Encryption and compression settings used
|
||||||
|
Encrypted bool `json:"encrypted"`
|
||||||
|
Compressed bool `json:"compressed"`
|
||||||
|
|
||||||
|
// Verification
|
||||||
|
SHA256 string `json:"sha256"` // Hash of reconstructed file
|
||||||
|
VerifiedAt time.Time `json:"verified_at,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkRef references a chunk in the manifest
|
||||||
|
type ChunkRef struct {
|
||||||
|
Hash string `json:"h"` // SHA-256 hash (64 chars)
|
||||||
|
Offset int64 `json:"o"` // Offset in original file
|
||||||
|
Length int `json:"l"` // Chunk length
|
||||||
|
}
|
||||||
|
|
||||||
|
// ManifestStore manages backup manifests
|
||||||
|
type ManifestStore struct {
|
||||||
|
basePath string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewManifestStore creates a new manifest store
|
||||||
|
func NewManifestStore(basePath string) (*ManifestStore, error) {
|
||||||
|
manifestDir := filepath.Join(basePath, "manifests")
|
||||||
|
if err := os.MkdirAll(manifestDir, 0700); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create manifest directory: %w", err)
|
||||||
|
}
|
||||||
|
return &ManifestStore{basePath: basePath}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// manifestPath returns the path for a manifest ID
|
||||||
|
func (s *ManifestStore) manifestPath(id string) string {
|
||||||
|
return filepath.Join(s.basePath, "manifests", id+".manifest.json")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save writes a manifest to disk
|
||||||
|
func (s *ManifestStore) Save(m *Manifest) error {
|
||||||
|
path := s.manifestPath(m.ID)
|
||||||
|
|
||||||
|
data, err := json.MarshalIndent(m, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Atomic write
|
||||||
|
tmpPath := path + ".tmp"
|
||||||
|
if err := os.WriteFile(tmpPath, data, 0600); err != nil {
|
||||||
|
return fmt.Errorf("failed to write manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Rename(tmpPath, path); err != nil {
|
||||||
|
os.Remove(tmpPath)
|
||||||
|
return fmt.Errorf("failed to commit manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load reads a manifest from disk
|
||||||
|
func (s *ManifestStore) Load(id string) (*Manifest, error) {
|
||||||
|
path := s.manifestPath(id)
|
||||||
|
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read manifest %s: %w", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var m Manifest
|
||||||
|
if err := json.Unmarshal(data, &m); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse manifest %s: %w", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes a manifest
|
||||||
|
func (s *ManifestStore) Delete(id string) error {
|
||||||
|
path := s.manifestPath(id)
|
||||||
|
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("failed to delete manifest %s: %w", id, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns all manifest IDs
|
||||||
|
func (s *ManifestStore) List() ([]string, error) {
|
||||||
|
manifestDir := filepath.Join(s.basePath, "manifests")
|
||||||
|
entries, err := os.ReadDir(manifestDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to list manifests: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var ids []string
|
||||||
|
for _, e := range entries {
|
||||||
|
if e.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := e.Name()
|
||||||
|
if len(name) > 14 && name[len(name)-14:] == ".manifest.json" {
|
||||||
|
ids = append(ids, name[:len(name)-14])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListAll returns all manifests sorted by creation time (newest first)
|
||||||
|
func (s *ManifestStore) ListAll() ([]*Manifest, error) {
|
||||||
|
ids, err := s.List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var manifests []*Manifest
|
||||||
|
for _, id := range ids {
|
||||||
|
m, err := s.Load(id)
|
||||||
|
if err != nil {
|
||||||
|
continue // Skip corrupted manifests
|
||||||
|
}
|
||||||
|
manifests = append(manifests, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by creation time (newest first)
|
||||||
|
for i := 0; i < len(manifests)-1; i++ {
|
||||||
|
for j := i + 1; j < len(manifests); j++ {
|
||||||
|
if manifests[j].CreatedAt.After(manifests[i].CreatedAt) {
|
||||||
|
manifests[i], manifests[j] = manifests[j], manifests[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return manifests, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetChunkHashes returns all unique chunk hashes referenced by manifests
|
||||||
|
func (s *ManifestStore) GetChunkHashes() (map[string]int, error) {
|
||||||
|
manifests, err := s.ListAll()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map hash -> reference count
|
||||||
|
refs := make(map[string]int)
|
||||||
|
for _, m := range manifests {
|
||||||
|
for _, c := range m.Chunks {
|
||||||
|
refs[c.Hash]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return refs, nil
|
||||||
|
}
|
||||||
367
internal/dedup/store.go
Normal file
367
internal/dedup/store.go
Normal file
@@ -0,0 +1,367 @@
|
|||||||
|
package dedup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"crypto/aes"
|
||||||
|
"crypto/cipher"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChunkStore manages content-addressed chunk storage
|
||||||
|
// Chunks are stored as: <base>/<prefix>/<hash>.chunk[.gz][.enc]
|
||||||
|
type ChunkStore struct {
|
||||||
|
basePath string
|
||||||
|
compress bool
|
||||||
|
encryptionKey []byte // 32 bytes for AES-256
|
||||||
|
mu sync.RWMutex
|
||||||
|
existingChunks map[string]bool // Cache of known chunks
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreConfig holds configuration for the chunk store
|
||||||
|
type StoreConfig struct {
|
||||||
|
BasePath string
|
||||||
|
Compress bool // Enable gzip compression
|
||||||
|
EncryptionKey string // Optional: hex-encoded 32-byte key for AES-256-GCM
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChunkStore creates a new chunk store
|
||||||
|
func NewChunkStore(config StoreConfig) (*ChunkStore, error) {
|
||||||
|
store := &ChunkStore{
|
||||||
|
basePath: config.BasePath,
|
||||||
|
compress: config.Compress,
|
||||||
|
existingChunks: make(map[string]bool),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse encryption key if provided
|
||||||
|
if config.EncryptionKey != "" {
|
||||||
|
key, err := hex.DecodeString(config.EncryptionKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid encryption key: %w", err)
|
||||||
|
}
|
||||||
|
if len(key) != 32 {
|
||||||
|
return nil, fmt.Errorf("encryption key must be 32 bytes (got %d)", len(key))
|
||||||
|
}
|
||||||
|
store.encryptionKey = key
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create base directory structure
|
||||||
|
if err := os.MkdirAll(config.BasePath, 0700); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create chunks and manifests directories
|
||||||
|
for _, dir := range []string{"chunks", "manifests"} {
|
||||||
|
if err := os.MkdirAll(filepath.Join(config.BasePath, dir), 0700); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create %s directory: %w", dir, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return store, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// chunkPath returns the filesystem path for a chunk hash
|
||||||
|
// Uses 2-character prefix for directory sharding (256 subdirs)
|
||||||
|
func (s *ChunkStore) chunkPath(hash string) string {
|
||||||
|
if len(hash) < 2 {
|
||||||
|
return filepath.Join(s.basePath, "chunks", "xx", hash+s.chunkExt())
|
||||||
|
}
|
||||||
|
prefix := hash[:2]
|
||||||
|
return filepath.Join(s.basePath, "chunks", prefix, hash+s.chunkExt())
|
||||||
|
}
|
||||||
|
|
||||||
|
// chunkExt returns the file extension based on compression/encryption settings
|
||||||
|
func (s *ChunkStore) chunkExt() string {
|
||||||
|
ext := ".chunk"
|
||||||
|
if s.compress {
|
||||||
|
ext += ".gz"
|
||||||
|
}
|
||||||
|
if s.encryptionKey != nil {
|
||||||
|
ext += ".enc"
|
||||||
|
}
|
||||||
|
return ext
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has checks if a chunk exists in the store
|
||||||
|
func (s *ChunkStore) Has(hash string) bool {
|
||||||
|
s.mu.RLock()
|
||||||
|
if exists, ok := s.existingChunks[hash]; ok {
|
||||||
|
s.mu.RUnlock()
|
||||||
|
return exists
|
||||||
|
}
|
||||||
|
s.mu.RUnlock()
|
||||||
|
|
||||||
|
// Check filesystem
|
||||||
|
path := s.chunkPath(hash)
|
||||||
|
_, err := os.Stat(path)
|
||||||
|
exists := err == nil
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
s.existingChunks[hash] = exists
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
return exists
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put stores a chunk, returning true if it was new (not deduplicated)
|
||||||
|
func (s *ChunkStore) Put(chunk *Chunk) (isNew bool, err error) {
|
||||||
|
// Check if already exists (deduplication!)
|
||||||
|
if s.Has(chunk.Hash) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
path := s.chunkPath(chunk.Hash)
|
||||||
|
|
||||||
|
// Create prefix directory
|
||||||
|
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
|
||||||
|
return false, fmt.Errorf("failed to create chunk directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare data
|
||||||
|
data := chunk.Data
|
||||||
|
|
||||||
|
// Compress if enabled
|
||||||
|
if s.compress {
|
||||||
|
data, err = s.compressData(data)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("compression failed: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypt if enabled
|
||||||
|
if s.encryptionKey != nil {
|
||||||
|
data, err = s.encryptData(data)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("encryption failed: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write atomically (write to temp, then rename)
|
||||||
|
tmpPath := path + ".tmp"
|
||||||
|
if err := os.WriteFile(tmpPath, data, 0600); err != nil {
|
||||||
|
return false, fmt.Errorf("failed to write chunk: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Rename(tmpPath, path); err != nil {
|
||||||
|
os.Remove(tmpPath)
|
||||||
|
return false, fmt.Errorf("failed to commit chunk: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update cache
|
||||||
|
s.mu.Lock()
|
||||||
|
s.existingChunks[chunk.Hash] = true
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retrieves a chunk by hash
|
||||||
|
func (s *ChunkStore) Get(hash string) (*Chunk, error) {
|
||||||
|
path := s.chunkPath(hash)
|
||||||
|
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read chunk %s: %w", hash, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt if encrypted
|
||||||
|
if s.encryptionKey != nil {
|
||||||
|
data, err = s.decryptData(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("decryption failed: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decompress if compressed
|
||||||
|
if s.compress {
|
||||||
|
data, err = s.decompressData(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("decompression failed: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify hash
|
||||||
|
h := sha256.Sum256(data)
|
||||||
|
actualHash := hex.EncodeToString(h[:])
|
||||||
|
if actualHash != hash {
|
||||||
|
return nil, fmt.Errorf("chunk hash mismatch: expected %s, got %s", hash, actualHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Chunk{
|
||||||
|
Hash: hash,
|
||||||
|
Data: data,
|
||||||
|
Length: len(data),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes a chunk from the store
|
||||||
|
func (s *ChunkStore) Delete(hash string) error {
|
||||||
|
path := s.chunkPath(hash)
|
||||||
|
|
||||||
|
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("failed to delete chunk %s: %w", hash, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
delete(s.existingChunks, hash)
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats returns storage statistics
|
||||||
|
type StoreStats struct {
|
||||||
|
TotalChunks int64
|
||||||
|
TotalSize int64 // Bytes on disk (after compression/encryption)
|
||||||
|
UniqueSize int64 // Bytes of unique data
|
||||||
|
Directories int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats returns statistics about the chunk store
|
||||||
|
func (s *ChunkStore) Stats() (*StoreStats, error) {
|
||||||
|
stats := &StoreStats{}
|
||||||
|
|
||||||
|
chunksDir := filepath.Join(s.basePath, "chunks")
|
||||||
|
err := filepath.Walk(chunksDir, func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if info.IsDir() {
|
||||||
|
stats.Directories++
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
stats.TotalChunks++
|
||||||
|
stats.TotalSize += info.Size()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
return stats, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadIndex loads the existing chunk hashes into memory
|
||||||
|
func (s *ChunkStore) LoadIndex() error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
s.existingChunks = make(map[string]bool)
|
||||||
|
|
||||||
|
chunksDir := filepath.Join(s.basePath, "chunks")
|
||||||
|
return filepath.Walk(chunksDir, func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil || info.IsDir() {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract hash from filename
|
||||||
|
base := filepath.Base(path)
|
||||||
|
hash := base
|
||||||
|
// Remove extensions
|
||||||
|
for _, ext := range []string{".enc", ".gz", ".chunk"} {
|
||||||
|
if len(hash) > len(ext) && hash[len(hash)-len(ext):] == ext {
|
||||||
|
hash = hash[:len(hash)-len(ext)]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(hash) == 64 { // SHA-256 hex length
|
||||||
|
s.existingChunks[hash] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// compressData compresses data using gzip
|
||||||
|
func (s *ChunkStore) compressData(data []byte) ([]byte, error) {
|
||||||
|
var buf []byte
|
||||||
|
w, err := gzip.NewWriterLevel((*bytesBuffer)(&buf), gzip.BestCompression)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err := w.Write(data); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// bytesBuffer is a simple io.Writer that appends to a byte slice
|
||||||
|
type bytesBuffer []byte
|
||||||
|
|
||||||
|
func (b *bytesBuffer) Write(p []byte) (int, error) {
|
||||||
|
*b = append(*b, p...)
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decompressData decompresses gzip data
|
||||||
|
func (s *ChunkStore) decompressData(data []byte) ([]byte, error) {
|
||||||
|
r, err := gzip.NewReader(&bytesReader{data: data})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
return io.ReadAll(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// bytesReader is a simple io.Reader from a byte slice
|
||||||
|
type bytesReader struct {
|
||||||
|
data []byte
|
||||||
|
pos int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *bytesReader) Read(p []byte) (int, error) {
|
||||||
|
if r.pos >= len(r.data) {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
n := copy(p, r.data[r.pos:])
|
||||||
|
r.pos += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encryptData encrypts data using AES-256-GCM
|
||||||
|
func (s *ChunkStore) encryptData(plaintext []byte) ([]byte, error) {
|
||||||
|
block, err := aes.NewCipher(s.encryptionKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
gcm, err := cipher.NewGCM(block)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nonce := make([]byte, gcm.NonceSize())
|
||||||
|
if _, err := rand.Read(nonce); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepend nonce to ciphertext
|
||||||
|
return gcm.Seal(nonce, nonce, plaintext, nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decryptData decrypts AES-256-GCM encrypted data
|
||||||
|
func (s *ChunkStore) decryptData(ciphertext []byte) ([]byte, error) {
|
||||||
|
block, err := aes.NewCipher(s.encryptionKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
gcm, err := cipher.NewGCM(block)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ciphertext) < gcm.NonceSize() {
|
||||||
|
return nil, fmt.Errorf("ciphertext too short")
|
||||||
|
}
|
||||||
|
|
||||||
|
nonce := ciphertext[:gcm.NonceSize()]
|
||||||
|
ciphertext = ciphertext[gcm.NonceSize():]
|
||||||
|
|
||||||
|
return gcm.Open(nil, nonce, ciphertext, nil)
|
||||||
|
}
|
||||||
@@ -223,11 +223,11 @@ func (r *DrillResult) IsSuccess() bool {
|
|||||||
|
|
||||||
// Summary returns a human-readable summary of the drill
|
// Summary returns a human-readable summary of the drill
|
||||||
func (r *DrillResult) Summary() string {
|
func (r *DrillResult) Summary() string {
|
||||||
status := "✅ PASSED"
|
status := "[OK] PASSED"
|
||||||
if !r.Success {
|
if !r.Success {
|
||||||
status = "❌ FAILED"
|
status = "[FAIL] FAILED"
|
||||||
} else if r.Status == StatusPartial {
|
} else if r.Status == StatusPartial {
|
||||||
status = "⚠️ PARTIAL"
|
status = "[WARN] PARTIAL"
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("%s - %s (%.2fs) - %d tables, %d rows",
|
return fmt.Sprintf("%s - %s (%.2fs) - %d tables, %d rows",
|
||||||
|
|||||||
@@ -41,20 +41,20 @@ func (e *Engine) Run(ctx context.Context, config *DrillConfig) (*DrillResult, er
|
|||||||
TargetRTO: float64(config.MaxRestoreSeconds),
|
TargetRTO: float64(config.MaxRestoreSeconds),
|
||||||
}
|
}
|
||||||
|
|
||||||
e.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
e.log.Info("=====================================================")
|
||||||
e.log.Info(" 🧪 DR Drill: " + result.DrillID)
|
e.log.Info(" [TEST] DR Drill: " + result.DrillID)
|
||||||
e.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
e.log.Info("=====================================================")
|
||||||
e.log.Info("")
|
e.log.Info("")
|
||||||
|
|
||||||
// Cleanup function for error cases
|
// Cleanup function for error cases
|
||||||
var containerID string
|
var containerID string
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
if containerID != "" && config.CleanupOnExit && (result.Success || !config.KeepOnFailure) {
|
if containerID != "" && config.CleanupOnExit && (result.Success || !config.KeepOnFailure) {
|
||||||
e.log.Info("🗑️ Cleaning up container...")
|
e.log.Info("[DEL] Cleaning up container...")
|
||||||
e.docker.RemoveContainer(context.Background(), containerID)
|
e.docker.RemoveContainer(context.Background(), containerID)
|
||||||
} else if containerID != "" {
|
} else if containerID != "" {
|
||||||
result.ContainerKept = true
|
result.ContainerKept = true
|
||||||
e.log.Info("📦 Container kept for debugging: " + containerID)
|
e.log.Info("[PKG] Container kept for debugging: " + containerID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
@@ -88,7 +88,7 @@ func (e *Engine) Run(ctx context.Context, config *DrillConfig) (*DrillResult, er
|
|||||||
}
|
}
|
||||||
containerID = container.ID
|
containerID = container.ID
|
||||||
result.ContainerID = containerID
|
result.ContainerID = containerID
|
||||||
e.log.Info("📦 Container started: " + containerID[:12])
|
e.log.Info("[PKG] Container started: " + containerID[:12])
|
||||||
|
|
||||||
// Wait for container to be healthy
|
// Wait for container to be healthy
|
||||||
if err := e.docker.WaitForHealth(ctx, containerID, config.DatabaseType, config.ContainerTimeout); err != nil {
|
if err := e.docker.WaitForHealth(ctx, containerID, config.DatabaseType, config.ContainerTimeout); err != nil {
|
||||||
@@ -118,7 +118,7 @@ func (e *Engine) Run(ctx context.Context, config *DrillConfig) (*DrillResult, er
|
|||||||
result.RestoreTime = time.Since(restoreStart).Seconds()
|
result.RestoreTime = time.Since(restoreStart).Seconds()
|
||||||
e.completePhase(&phase, fmt.Sprintf("Restored in %.2fs", result.RestoreTime))
|
e.completePhase(&phase, fmt.Sprintf("Restored in %.2fs", result.RestoreTime))
|
||||||
result.Phases = append(result.Phases, phase)
|
result.Phases = append(result.Phases, phase)
|
||||||
e.log.Info(fmt.Sprintf("✅ Backup restored in %.2fs", result.RestoreTime))
|
e.log.Info(fmt.Sprintf("[OK] Backup restored in %.2fs", result.RestoreTime))
|
||||||
|
|
||||||
// Phase 4: Validate
|
// Phase 4: Validate
|
||||||
phase = e.startPhase("Validate Database")
|
phase = e.startPhase("Validate Database")
|
||||||
@@ -182,24 +182,24 @@ func (e *Engine) preflightChecks(ctx context.Context, config *DrillConfig) error
|
|||||||
if err := e.docker.CheckDockerAvailable(ctx); err != nil {
|
if err := e.docker.CheckDockerAvailable(ctx); err != nil {
|
||||||
return fmt.Errorf("docker not available: %w", err)
|
return fmt.Errorf("docker not available: %w", err)
|
||||||
}
|
}
|
||||||
e.log.Info("✓ Docker is available")
|
e.log.Info("[OK] Docker is available")
|
||||||
|
|
||||||
// Check backup file exists
|
// Check backup file exists
|
||||||
if _, err := os.Stat(config.BackupPath); err != nil {
|
if _, err := os.Stat(config.BackupPath); err != nil {
|
||||||
return fmt.Errorf("backup file not found: %s", config.BackupPath)
|
return fmt.Errorf("backup file not found: %s", config.BackupPath)
|
||||||
}
|
}
|
||||||
e.log.Info("✓ Backup file exists: " + filepath.Base(config.BackupPath))
|
e.log.Info("[OK] Backup file exists: " + filepath.Base(config.BackupPath))
|
||||||
|
|
||||||
// Pull Docker image
|
// Pull Docker image
|
||||||
image := config.ContainerImage
|
image := config.ContainerImage
|
||||||
if image == "" {
|
if image == "" {
|
||||||
image = GetDefaultImage(config.DatabaseType, "")
|
image = GetDefaultImage(config.DatabaseType, "")
|
||||||
}
|
}
|
||||||
e.log.Info("⬇️ Pulling image: " + image)
|
e.log.Info("[DOWN] Pulling image: " + image)
|
||||||
if err := e.docker.PullImage(ctx, image); err != nil {
|
if err := e.docker.PullImage(ctx, image); err != nil {
|
||||||
return fmt.Errorf("failed to pull image: %w", err)
|
return fmt.Errorf("failed to pull image: %w", err)
|
||||||
}
|
}
|
||||||
e.log.Info("✓ Image ready: " + image)
|
e.log.Info("[OK] Image ready: " + image)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -243,7 +243,7 @@ func (e *Engine) restoreBackup(ctx context.Context, config *DrillConfig, contain
|
|||||||
backupName := filepath.Base(config.BackupPath)
|
backupName := filepath.Base(config.BackupPath)
|
||||||
containerBackupPath := "/tmp/" + backupName
|
containerBackupPath := "/tmp/" + backupName
|
||||||
|
|
||||||
e.log.Info("📁 Copying backup to container...")
|
e.log.Info("[DIR] Copying backup to container...")
|
||||||
if err := e.docker.CopyToContainer(ctx, containerID, config.BackupPath, containerBackupPath); err != nil {
|
if err := e.docker.CopyToContainer(ctx, containerID, config.BackupPath, containerBackupPath); err != nil {
|
||||||
return fmt.Errorf("failed to copy backup: %w", err)
|
return fmt.Errorf("failed to copy backup: %w", err)
|
||||||
}
|
}
|
||||||
@@ -256,7 +256,7 @@ func (e *Engine) restoreBackup(ctx context.Context, config *DrillConfig, contain
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Restore based on database type and format
|
// Restore based on database type and format
|
||||||
e.log.Info("🔄 Restoring backup...")
|
e.log.Info("[EXEC] Restoring backup...")
|
||||||
return e.executeRestore(ctx, config, containerID, containerBackupPath, containerConfig)
|
return e.executeRestore(ctx, config, containerID, containerBackupPath, containerConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -366,13 +366,13 @@ func (e *Engine) validateDatabase(ctx context.Context, config *DrillConfig, resu
|
|||||||
tables, err := validator.GetTableList(ctx)
|
tables, err := validator.GetTableList(ctx)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
result.TableCount = len(tables)
|
result.TableCount = len(tables)
|
||||||
e.log.Info(fmt.Sprintf("📊 Tables found: %d", result.TableCount))
|
e.log.Info(fmt.Sprintf("[STATS] Tables found: %d", result.TableCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
totalRows, err := validator.GetTotalRowCount(ctx)
|
totalRows, err := validator.GetTotalRowCount(ctx)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
result.TotalRows = totalRows
|
result.TotalRows = totalRows
|
||||||
e.log.Info(fmt.Sprintf("📊 Total rows: %d", result.TotalRows))
|
e.log.Info(fmt.Sprintf("[STATS] Total rows: %d", result.TotalRows))
|
||||||
}
|
}
|
||||||
|
|
||||||
dbSize, err := validator.GetDatabaseSize(ctx, config.DatabaseName)
|
dbSize, err := validator.GetDatabaseSize(ctx, config.DatabaseName)
|
||||||
@@ -387,9 +387,9 @@ func (e *Engine) validateDatabase(ctx context.Context, config *DrillConfig, resu
|
|||||||
result.CheckResults = append(result.CheckResults, tr)
|
result.CheckResults = append(result.CheckResults, tr)
|
||||||
if !tr.Success {
|
if !tr.Success {
|
||||||
errorCount++
|
errorCount++
|
||||||
e.log.Warn("❌ " + tr.Message)
|
e.log.Warn("[FAIL] " + tr.Message)
|
||||||
} else {
|
} else {
|
||||||
e.log.Info("✓ " + tr.Message)
|
e.log.Info("[OK] " + tr.Message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -404,9 +404,9 @@ func (e *Engine) validateDatabase(ctx context.Context, config *DrillConfig, resu
|
|||||||
totalQueryTime += qr.Duration
|
totalQueryTime += qr.Duration
|
||||||
if !qr.Success {
|
if !qr.Success {
|
||||||
errorCount++
|
errorCount++
|
||||||
e.log.Warn(fmt.Sprintf("❌ %s: %s", qr.Name, qr.Error))
|
e.log.Warn(fmt.Sprintf("[FAIL] %s: %s", qr.Name, qr.Error))
|
||||||
} else {
|
} else {
|
||||||
e.log.Info(fmt.Sprintf("✓ %s: %s (%.0fms)", qr.Name, qr.Result, qr.Duration))
|
e.log.Info(fmt.Sprintf("[OK] %s: %s (%.0fms)", qr.Name, qr.Result, qr.Duration))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(queryResults) > 0 {
|
if len(queryResults) > 0 {
|
||||||
@@ -421,9 +421,9 @@ func (e *Engine) validateDatabase(ctx context.Context, config *DrillConfig, resu
|
|||||||
result.CheckResults = append(result.CheckResults, cr)
|
result.CheckResults = append(result.CheckResults, cr)
|
||||||
if !cr.Success {
|
if !cr.Success {
|
||||||
errorCount++
|
errorCount++
|
||||||
e.log.Warn("❌ " + cr.Message)
|
e.log.Warn("[FAIL] " + cr.Message)
|
||||||
} else {
|
} else {
|
||||||
e.log.Info("✓ " + cr.Message)
|
e.log.Info("[OK] " + cr.Message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -433,7 +433,7 @@ func (e *Engine) validateDatabase(ctx context.Context, config *DrillConfig, resu
|
|||||||
errorCount++
|
errorCount++
|
||||||
msg := fmt.Sprintf("Total rows (%d) below minimum (%d)", result.TotalRows, config.MinRowCount)
|
msg := fmt.Sprintf("Total rows (%d) below minimum (%d)", result.TotalRows, config.MinRowCount)
|
||||||
result.Warnings = append(result.Warnings, msg)
|
result.Warnings = append(result.Warnings, msg)
|
||||||
e.log.Warn("⚠️ " + msg)
|
e.log.Warn("[WARN] " + msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return errorCount
|
return errorCount
|
||||||
@@ -441,7 +441,7 @@ func (e *Engine) validateDatabase(ctx context.Context, config *DrillConfig, resu
|
|||||||
|
|
||||||
// startPhase starts a new drill phase
|
// startPhase starts a new drill phase
|
||||||
func (e *Engine) startPhase(name string) DrillPhase {
|
func (e *Engine) startPhase(name string) DrillPhase {
|
||||||
e.log.Info("▶️ " + name)
|
e.log.Info("[RUN] " + name)
|
||||||
return DrillPhase{
|
return DrillPhase{
|
||||||
Name: name,
|
Name: name,
|
||||||
Status: "running",
|
Status: "running",
|
||||||
@@ -463,7 +463,7 @@ func (e *Engine) failPhase(phase *DrillPhase, message string) {
|
|||||||
phase.Duration = phase.EndTime.Sub(phase.StartTime).Seconds()
|
phase.Duration = phase.EndTime.Sub(phase.StartTime).Seconds()
|
||||||
phase.Status = "failed"
|
phase.Status = "failed"
|
||||||
phase.Message = message
|
phase.Message = message
|
||||||
e.log.Error("❌ Phase failed: " + message)
|
e.log.Error("[FAIL] Phase failed: " + message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// finalize completes the drill result
|
// finalize completes the drill result
|
||||||
@@ -472,9 +472,9 @@ func (e *Engine) finalize(result *DrillResult) {
|
|||||||
result.Duration = result.EndTime.Sub(result.StartTime).Seconds()
|
result.Duration = result.EndTime.Sub(result.StartTime).Seconds()
|
||||||
|
|
||||||
e.log.Info("")
|
e.log.Info("")
|
||||||
e.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
e.log.Info("=====================================================")
|
||||||
e.log.Info(" " + result.Summary())
|
e.log.Info(" " + result.Summary())
|
||||||
e.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
e.log.Info("=====================================================")
|
||||||
|
|
||||||
if result.Success {
|
if result.Success {
|
||||||
e.log.Info(fmt.Sprintf(" RTO: %.2fs (target: %.0fs) %s",
|
e.log.Info(fmt.Sprintf(" RTO: %.2fs (target: %.0fs) %s",
|
||||||
@@ -484,9 +484,9 @@ func (e *Engine) finalize(result *DrillResult) {
|
|||||||
|
|
||||||
func boolIcon(b bool) string {
|
func boolIcon(b bool) string {
|
||||||
if b {
|
if b {
|
||||||
return "✅"
|
return "[OK]"
|
||||||
}
|
}
|
||||||
return "❌"
|
return "[FAIL]"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup removes drill resources
|
// Cleanup removes drill resources
|
||||||
@@ -498,7 +498,7 @@ func (e *Engine) Cleanup(ctx context.Context, drillID string) error {
|
|||||||
|
|
||||||
for _, c := range containers {
|
for _, c := range containers {
|
||||||
if strings.Contains(c.Name, drillID) || (drillID == "" && strings.HasPrefix(c.Name, "drill_")) {
|
if strings.Contains(c.Name, drillID) || (drillID == "" && strings.HasPrefix(c.Name, "drill_")) {
|
||||||
e.log.Info("🗑️ Removing container: " + c.Name)
|
e.log.Info("[DEL] Removing container: " + c.Name)
|
||||||
if err := e.docker.RemoveContainer(ctx, c.ID); err != nil {
|
if err := e.docker.RemoveContainer(ctx, c.ID); err != nil {
|
||||||
e.log.Warn("Failed to remove container", "id", c.ID, "error", err)
|
e.log.Warn("Failed to remove container", "id", c.ID, "error", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
|
|
||||||
func TestEncryptDecrypt(t *testing.T) {
|
func TestEncryptDecrypt(t *testing.T) {
|
||||||
// Test data
|
// Test data
|
||||||
original := []byte("This is a secret database backup that needs encryption! 🔒")
|
original := []byte("This is a secret database backup that needs encryption! [LOCK]")
|
||||||
|
|
||||||
// Test with passphrase
|
// Test with passphrase
|
||||||
t.Run("Passphrase", func(t *testing.T) {
|
t.Run("Passphrase", func(t *testing.T) {
|
||||||
@@ -57,7 +57,7 @@ func TestEncryptDecrypt(t *testing.T) {
|
|||||||
string(original), string(decrypted))
|
string(original), string(decrypted))
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Log("✅ Encryption/decryption successful")
|
t.Log("[OK] Encryption/decryption successful")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Test with direct key
|
// Test with direct key
|
||||||
@@ -102,7 +102,7 @@ func TestEncryptDecrypt(t *testing.T) {
|
|||||||
t.Errorf("Decrypted data doesn't match original")
|
t.Errorf("Decrypted data doesn't match original")
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Log("✅ Direct key encryption/decryption successful")
|
t.Log("[OK] Direct key encryption/decryption successful")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Test wrong password
|
// Test wrong password
|
||||||
@@ -133,7 +133,7 @@ func TestEncryptDecrypt(t *testing.T) {
|
|||||||
t.Error("Expected decryption to fail with wrong password, but it succeeded")
|
t.Error("Expected decryption to fail with wrong password, but it succeeded")
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Logf("✅ Wrong password correctly rejected: %v", err)
|
t.Logf("[OK] Wrong password correctly rejected: %v", err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -183,7 +183,7 @@ func TestLargeData(t *testing.T) {
|
|||||||
t.Errorf("Large data decryption failed")
|
t.Errorf("Large data decryption failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Log("✅ Large data encryption/decryption successful")
|
t.Log("[OK] Large data encryption/decryption successful")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyGeneration(t *testing.T) {
|
func TestKeyGeneration(t *testing.T) {
|
||||||
@@ -207,7 +207,7 @@ func TestKeyGeneration(t *testing.T) {
|
|||||||
t.Error("Generated keys are identical - randomness broken!")
|
t.Error("Generated keys are identical - randomness broken!")
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Log("✅ Key generation successful")
|
t.Log("[OK] Key generation successful")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyDerivation(t *testing.T) {
|
func TestKeyDerivation(t *testing.T) {
|
||||||
@@ -230,5 +230,5 @@ func TestKeyDerivation(t *testing.T) {
|
|||||||
t.Error("Different salts produced same key")
|
t.Error("Different salts produced same key")
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Log("✅ Key derivation successful")
|
t.Log("[OK] Key derivation successful")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -339,7 +339,7 @@ func (e *CloneEngine) Backup(ctx context.Context, opts *BackupOptions) (*BackupR
|
|||||||
|
|
||||||
// Save metadata
|
// Save metadata
|
||||||
meta := &metadata.BackupMetadata{
|
meta := &metadata.BackupMetadata{
|
||||||
Version: "3.40.0",
|
Version: "3.42.1",
|
||||||
Timestamp: startTime,
|
Timestamp: startTime,
|
||||||
Database: opts.Database,
|
Database: opts.Database,
|
||||||
DatabaseType: "mysql",
|
DatabaseType: "mysql",
|
||||||
|
|||||||
@@ -234,10 +234,26 @@ func (e *MySQLDumpEngine) Backup(ctx context.Context, opts *BackupOptions) (*Bac
|
|||||||
gzWriter.Close()
|
gzWriter.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for command
|
// Wait for command with proper context handling
|
||||||
if err := cmd.Wait(); err != nil {
|
cmdDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.log.Warn("MySQL backup cancelled - killing process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone
|
||||||
|
cmdErr = ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if cmdErr != nil {
|
||||||
stderr := stderrBuf.String()
|
stderr := stderrBuf.String()
|
||||||
return nil, fmt.Errorf("mysqldump failed: %w\n%s", err, stderr)
|
return nil, fmt.Errorf("mysqldump failed: %w\n%s", cmdErr, stderr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get file info
|
// Get file info
|
||||||
@@ -254,7 +270,7 @@ func (e *MySQLDumpEngine) Backup(ctx context.Context, opts *BackupOptions) (*Bac
|
|||||||
|
|
||||||
// Save metadata
|
// Save metadata
|
||||||
meta := &metadata.BackupMetadata{
|
meta := &metadata.BackupMetadata{
|
||||||
Version: "3.40.0",
|
Version: "3.42.1",
|
||||||
Timestamp: startTime,
|
Timestamp: startTime,
|
||||||
Database: opts.Database,
|
Database: opts.Database,
|
||||||
DatabaseType: "mysql",
|
DatabaseType: "mysql",
|
||||||
@@ -442,8 +458,25 @@ func (e *MySQLDumpEngine) BackupToWriter(ctx context.Context, w io.Writer, opts
|
|||||||
gzWriter.Close()
|
gzWriter.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cmd.Wait(); err != nil {
|
// Wait for command with proper context handling
|
||||||
return nil, fmt.Errorf("mysqldump failed: %w\n%s", err, stderrBuf.String())
|
cmdDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.log.Warn("MySQL streaming backup cancelled - killing process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone
|
||||||
|
cmdErr = ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if cmdErr != nil {
|
||||||
|
return nil, fmt.Errorf("mysqldump failed: %w\n%s", cmdErr, stderrBuf.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
return &BackupResult{
|
return &BackupResult{
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ func (b *BtrfsBackend) Detect(dataDir string) (bool, error) {
|
|||||||
// CreateSnapshot creates a Btrfs snapshot
|
// CreateSnapshot creates a Btrfs snapshot
|
||||||
func (b *BtrfsBackend) CreateSnapshot(ctx context.Context, opts SnapshotOptions) (*Snapshot, error) {
|
func (b *BtrfsBackend) CreateSnapshot(ctx context.Context, opts SnapshotOptions) (*Snapshot, error) {
|
||||||
if b.config == nil || b.config.Subvolume == "" {
|
if b.config == nil || b.config.Subvolume == "" {
|
||||||
return nil, fmt.Errorf("Btrfs subvolume not configured")
|
return nil, fmt.Errorf("btrfs subvolume not configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate snapshot name
|
// Generate snapshot name
|
||||||
|
|||||||
@@ -188,6 +188,8 @@ func (e *SnapshotEngine) Backup(ctx context.Context, opts *BackupOptions) (*Back
|
|||||||
// Step 4: Mount snapshot
|
// Step 4: Mount snapshot
|
||||||
mountPoint := e.config.MountPoint
|
mountPoint := e.config.MountPoint
|
||||||
if mountPoint == "" {
|
if mountPoint == "" {
|
||||||
|
// Note: snapshot engine uses snapshot.Config which doesnt have GetEffectiveWorkDir()
|
||||||
|
// TODO: Refactor to use main config.Config for WorkDir support
|
||||||
mountPoint = filepath.Join(os.TempDir(), fmt.Sprintf("dbbackup_snap_%s", timestamp))
|
mountPoint = filepath.Join(os.TempDir(), fmt.Sprintf("dbbackup_snap_%s", timestamp))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -223,7 +225,7 @@ func (e *SnapshotEngine) Backup(ctx context.Context, opts *BackupOptions) (*Back
|
|||||||
|
|
||||||
// Save metadata
|
// Save metadata
|
||||||
meta := &metadata.BackupMetadata{
|
meta := &metadata.BackupMetadata{
|
||||||
Version: "3.40.0",
|
Version: "3.42.1",
|
||||||
Timestamp: startTime,
|
Timestamp: startTime,
|
||||||
Database: opts.Database,
|
Database: opts.Database,
|
||||||
DatabaseType: "mysql",
|
DatabaseType: "mysql",
|
||||||
|
|||||||
@@ -658,9 +658,9 @@ func (i *Installer) printNextSteps(opts InstallOptions) {
|
|||||||
serviceName := strings.Replace(timerName, ".timer", ".service", 1)
|
serviceName := strings.Replace(timerName, ".timer", ".service", 1)
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("✅ Installation successful!")
|
fmt.Println("[OK] Installation successful!")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("📋 Next steps:")
|
fmt.Println("[NEXT] Next steps:")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf(" 1. Edit configuration: sudo nano %s\n", opts.ConfigPath)
|
fmt.Printf(" 1. Edit configuration: sudo nano %s\n", opts.ConfigPath)
|
||||||
fmt.Printf(" 2. Set credentials: sudo nano /etc/dbbackup/env.d/%s.conf\n", opts.Instance)
|
fmt.Printf(" 2. Set credentials: sudo nano /etc/dbbackup/env.d/%s.conf\n", opts.Instance)
|
||||||
@@ -668,12 +668,12 @@ func (i *Installer) printNextSteps(opts InstallOptions) {
|
|||||||
fmt.Printf(" 4. Verify timer status: sudo systemctl status %s\n", timerName)
|
fmt.Printf(" 4. Verify timer status: sudo systemctl status %s\n", timerName)
|
||||||
fmt.Printf(" 5. Run backup manually: sudo systemctl start %s\n", serviceName)
|
fmt.Printf(" 5. Run backup manually: sudo systemctl start %s\n", serviceName)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("📊 View backup logs:")
|
fmt.Println("[LOGS] View backup logs:")
|
||||||
fmt.Printf(" journalctl -u %s -f\n", serviceName)
|
fmt.Printf(" journalctl -u %s -f\n", serviceName)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
if opts.WithMetrics {
|
if opts.WithMetrics {
|
||||||
fmt.Println("📈 Prometheus metrics:")
|
fmt.Println("[METRICS] Prometheus metrics:")
|
||||||
fmt.Printf(" curl http://localhost:%d/metrics\n", opts.MetricsPort)
|
fmt.Printf(" curl http://localhost:%d/metrics\n", opts.MetricsPort)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ func NewEngine(sourceCfg, targetCfg *config.Config, log logger.Logger) (*Engine,
|
|||||||
targetDB: targetDB,
|
targetDB: targetDB,
|
||||||
log: log,
|
log: log,
|
||||||
progress: progress.NewSpinner(),
|
progress: progress.NewSpinner(),
|
||||||
workDir: os.TempDir(),
|
workDir: sourceCfg.GetEffectiveWorkDir(),
|
||||||
keepBackup: false,
|
keepBackup: false,
|
||||||
jobs: 4,
|
jobs: 4,
|
||||||
dryRun: false,
|
dryRun: false,
|
||||||
|
|||||||
@@ -202,9 +202,9 @@ func (b *Batcher) formatSummaryDigest(events []*Event, success, failure, dbCount
|
|||||||
|
|
||||||
func (b *Batcher) formatCompactDigest(events []*Event, success, failure int) string {
|
func (b *Batcher) formatCompactDigest(events []*Event, success, failure int) string {
|
||||||
if failure > 0 {
|
if failure > 0 {
|
||||||
return fmt.Sprintf("⚠️ %d/%d operations failed", failure, len(events))
|
return fmt.Sprintf("[WARN] %d/%d operations failed", failure, len(events))
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("✅ All %d operations successful", success)
|
return fmt.Sprintf("[OK] All %d operations successful", success)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Batcher) formatDetailedDigest(events []*Event) string {
|
func (b *Batcher) formatDetailedDigest(events []*Event) string {
|
||||||
@@ -215,9 +215,9 @@ func (b *Batcher) formatDetailedDigest(events []*Event) string {
|
|||||||
icon := "•"
|
icon := "•"
|
||||||
switch e.Severity {
|
switch e.Severity {
|
||||||
case SeverityError, SeverityCritical:
|
case SeverityError, SeverityCritical:
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
case SeverityWarning:
|
case SeverityWarning:
|
||||||
icon = "⚠️"
|
icon = "[WARN]"
|
||||||
}
|
}
|
||||||
|
|
||||||
msg += fmt.Sprintf("%s [%s] %s: %s\n",
|
msg += fmt.Sprintf("%s [%s] %s: %s\n",
|
||||||
|
|||||||
@@ -183,43 +183,43 @@ func DefaultConfig() Config {
|
|||||||
|
|
||||||
// FormatEventSubject generates a subject line for notifications
|
// FormatEventSubject generates a subject line for notifications
|
||||||
func FormatEventSubject(event *Event) string {
|
func FormatEventSubject(event *Event) string {
|
||||||
icon := "ℹ️"
|
icon := "[INFO]"
|
||||||
switch event.Severity {
|
switch event.Severity {
|
||||||
case SeverityWarning:
|
case SeverityWarning:
|
||||||
icon = "⚠️"
|
icon = "[WARN]"
|
||||||
case SeverityError, SeverityCritical:
|
case SeverityError, SeverityCritical:
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
}
|
}
|
||||||
|
|
||||||
verb := "Event"
|
verb := "Event"
|
||||||
switch event.Type {
|
switch event.Type {
|
||||||
case EventBackupStarted:
|
case EventBackupStarted:
|
||||||
verb = "Backup Started"
|
verb = "Backup Started"
|
||||||
icon = "🔄"
|
icon = "[EXEC]"
|
||||||
case EventBackupCompleted:
|
case EventBackupCompleted:
|
||||||
verb = "Backup Completed"
|
verb = "Backup Completed"
|
||||||
icon = "✅"
|
icon = "[OK]"
|
||||||
case EventBackupFailed:
|
case EventBackupFailed:
|
||||||
verb = "Backup Failed"
|
verb = "Backup Failed"
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
case EventRestoreStarted:
|
case EventRestoreStarted:
|
||||||
verb = "Restore Started"
|
verb = "Restore Started"
|
||||||
icon = "🔄"
|
icon = "[EXEC]"
|
||||||
case EventRestoreCompleted:
|
case EventRestoreCompleted:
|
||||||
verb = "Restore Completed"
|
verb = "Restore Completed"
|
||||||
icon = "✅"
|
icon = "[OK]"
|
||||||
case EventRestoreFailed:
|
case EventRestoreFailed:
|
||||||
verb = "Restore Failed"
|
verb = "Restore Failed"
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
case EventCleanupCompleted:
|
case EventCleanupCompleted:
|
||||||
verb = "Cleanup Completed"
|
verb = "Cleanup Completed"
|
||||||
icon = "🗑️"
|
icon = "[DEL]"
|
||||||
case EventVerifyCompleted:
|
case EventVerifyCompleted:
|
||||||
verb = "Verification Passed"
|
verb = "Verification Passed"
|
||||||
icon = "✅"
|
icon = "[OK]"
|
||||||
case EventVerifyFailed:
|
case EventVerifyFailed:
|
||||||
verb = "Verification Failed"
|
verb = "Verification Failed"
|
||||||
icon = "❌"
|
icon = "[FAIL]"
|
||||||
case EventPITRRecovery:
|
case EventPITRRecovery:
|
||||||
verb = "PITR Recovery"
|
verb = "PITR Recovery"
|
||||||
icon = "⏪"
|
icon = "⏪"
|
||||||
|
|||||||
@@ -30,52 +30,52 @@ type Templates struct {
|
|||||||
func DefaultTemplates() map[EventType]Templates {
|
func DefaultTemplates() map[EventType]Templates {
|
||||||
return map[EventType]Templates{
|
return map[EventType]Templates{
|
||||||
EventBackupStarted: {
|
EventBackupStarted: {
|
||||||
Subject: "🔄 Backup Started: {{.Database}} on {{.Hostname}}",
|
Subject: "[EXEC] Backup Started: {{.Database}} on {{.Hostname}}",
|
||||||
TextBody: backupStartedText,
|
TextBody: backupStartedText,
|
||||||
HTMLBody: backupStartedHTML,
|
HTMLBody: backupStartedHTML,
|
||||||
},
|
},
|
||||||
EventBackupCompleted: {
|
EventBackupCompleted: {
|
||||||
Subject: "✅ Backup Completed: {{.Database}} on {{.Hostname}}",
|
Subject: "[OK] Backup Completed: {{.Database}} on {{.Hostname}}",
|
||||||
TextBody: backupCompletedText,
|
TextBody: backupCompletedText,
|
||||||
HTMLBody: backupCompletedHTML,
|
HTMLBody: backupCompletedHTML,
|
||||||
},
|
},
|
||||||
EventBackupFailed: {
|
EventBackupFailed: {
|
||||||
Subject: "❌ Backup FAILED: {{.Database}} on {{.Hostname}}",
|
Subject: "[FAIL] Backup FAILED: {{.Database}} on {{.Hostname}}",
|
||||||
TextBody: backupFailedText,
|
TextBody: backupFailedText,
|
||||||
HTMLBody: backupFailedHTML,
|
HTMLBody: backupFailedHTML,
|
||||||
},
|
},
|
||||||
EventRestoreStarted: {
|
EventRestoreStarted: {
|
||||||
Subject: "🔄 Restore Started: {{.Database}} on {{.Hostname}}",
|
Subject: "[EXEC] Restore Started: {{.Database}} on {{.Hostname}}",
|
||||||
TextBody: restoreStartedText,
|
TextBody: restoreStartedText,
|
||||||
HTMLBody: restoreStartedHTML,
|
HTMLBody: restoreStartedHTML,
|
||||||
},
|
},
|
||||||
EventRestoreCompleted: {
|
EventRestoreCompleted: {
|
||||||
Subject: "✅ Restore Completed: {{.Database}} on {{.Hostname}}",
|
Subject: "[OK] Restore Completed: {{.Database}} on {{.Hostname}}",
|
||||||
TextBody: restoreCompletedText,
|
TextBody: restoreCompletedText,
|
||||||
HTMLBody: restoreCompletedHTML,
|
HTMLBody: restoreCompletedHTML,
|
||||||
},
|
},
|
||||||
EventRestoreFailed: {
|
EventRestoreFailed: {
|
||||||
Subject: "❌ Restore FAILED: {{.Database}} on {{.Hostname}}",
|
Subject: "[FAIL] Restore FAILED: {{.Database}} on {{.Hostname}}",
|
||||||
TextBody: restoreFailedText,
|
TextBody: restoreFailedText,
|
||||||
HTMLBody: restoreFailedHTML,
|
HTMLBody: restoreFailedHTML,
|
||||||
},
|
},
|
||||||
EventVerificationPassed: {
|
EventVerificationPassed: {
|
||||||
Subject: "✅ Verification Passed: {{.Database}}",
|
Subject: "[OK] Verification Passed: {{.Database}}",
|
||||||
TextBody: verificationPassedText,
|
TextBody: verificationPassedText,
|
||||||
HTMLBody: verificationPassedHTML,
|
HTMLBody: verificationPassedHTML,
|
||||||
},
|
},
|
||||||
EventVerificationFailed: {
|
EventVerificationFailed: {
|
||||||
Subject: "❌ Verification FAILED: {{.Database}}",
|
Subject: "[FAIL] Verification FAILED: {{.Database}}",
|
||||||
TextBody: verificationFailedText,
|
TextBody: verificationFailedText,
|
||||||
HTMLBody: verificationFailedHTML,
|
HTMLBody: verificationFailedHTML,
|
||||||
},
|
},
|
||||||
EventDRDrillPassed: {
|
EventDRDrillPassed: {
|
||||||
Subject: "✅ DR Drill Passed: {{.Database}}",
|
Subject: "[OK] DR Drill Passed: {{.Database}}",
|
||||||
TextBody: drDrillPassedText,
|
TextBody: drDrillPassedText,
|
||||||
HTMLBody: drDrillPassedHTML,
|
HTMLBody: drDrillPassedHTML,
|
||||||
},
|
},
|
||||||
EventDRDrillFailed: {
|
EventDRDrillFailed: {
|
||||||
Subject: "❌ DR Drill FAILED: {{.Database}}",
|
Subject: "[FAIL] DR Drill FAILED: {{.Database}}",
|
||||||
TextBody: drDrillFailedText,
|
TextBody: drDrillFailedText,
|
||||||
HTMLBody: drDrillFailedHTML,
|
HTMLBody: drDrillFailedHTML,
|
||||||
},
|
},
|
||||||
@@ -95,7 +95,7 @@ Started At: {{formatTime .Timestamp}}
|
|||||||
|
|
||||||
const backupStartedHTML = `
|
const backupStartedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #3498db;">🔄 Backup Started</h2>
|
<h2 style="color: #3498db;">[EXEC] Backup Started</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -121,7 +121,7 @@ Completed: {{formatTime .Timestamp}}
|
|||||||
|
|
||||||
const backupCompletedHTML = `
|
const backupCompletedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #27ae60;">✅ Backup Completed</h2>
|
<h2 style="color: #27ae60;">[OK] Backup Completed</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -137,7 +137,7 @@ const backupCompletedHTML = `
|
|||||||
`
|
`
|
||||||
|
|
||||||
const backupFailedText = `
|
const backupFailedText = `
|
||||||
⚠️ BACKUP FAILED ⚠️
|
[WARN] BACKUP FAILED [WARN]
|
||||||
|
|
||||||
Database: {{.Database}}
|
Database: {{.Database}}
|
||||||
Hostname: {{.Hostname}}
|
Hostname: {{.Hostname}}
|
||||||
@@ -152,7 +152,7 @@ Please investigate immediately.
|
|||||||
|
|
||||||
const backupFailedHTML = `
|
const backupFailedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #e74c3c;">❌ Backup FAILED</h2>
|
<h2 style="color: #e74c3c;">[FAIL] Backup FAILED</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -176,7 +176,7 @@ Started At: {{formatTime .Timestamp}}
|
|||||||
|
|
||||||
const restoreStartedHTML = `
|
const restoreStartedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #3498db;">🔄 Restore Started</h2>
|
<h2 style="color: #3498db;">[EXEC] Restore Started</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -200,7 +200,7 @@ Completed: {{formatTime .Timestamp}}
|
|||||||
|
|
||||||
const restoreCompletedHTML = `
|
const restoreCompletedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #27ae60;">✅ Restore Completed</h2>
|
<h2 style="color: #27ae60;">[OK] Restore Completed</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -214,7 +214,7 @@ const restoreCompletedHTML = `
|
|||||||
`
|
`
|
||||||
|
|
||||||
const restoreFailedText = `
|
const restoreFailedText = `
|
||||||
⚠️ RESTORE FAILED ⚠️
|
[WARN] RESTORE FAILED [WARN]
|
||||||
|
|
||||||
Database: {{.Database}}
|
Database: {{.Database}}
|
||||||
Hostname: {{.Hostname}}
|
Hostname: {{.Hostname}}
|
||||||
@@ -229,7 +229,7 @@ Please investigate immediately.
|
|||||||
|
|
||||||
const restoreFailedHTML = `
|
const restoreFailedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #e74c3c;">❌ Restore FAILED</h2>
|
<h2 style="color: #e74c3c;">[FAIL] Restore FAILED</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -255,7 +255,7 @@ Verified: {{formatTime .Timestamp}}
|
|||||||
|
|
||||||
const verificationPassedHTML = `
|
const verificationPassedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #27ae60;">✅ Verification Passed</h2>
|
<h2 style="color: #27ae60;">[OK] Verification Passed</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -269,7 +269,7 @@ const verificationPassedHTML = `
|
|||||||
`
|
`
|
||||||
|
|
||||||
const verificationFailedText = `
|
const verificationFailedText = `
|
||||||
⚠️ VERIFICATION FAILED ⚠️
|
[WARN] VERIFICATION FAILED [WARN]
|
||||||
|
|
||||||
Database: {{.Database}}
|
Database: {{.Database}}
|
||||||
Hostname: {{.Hostname}}
|
Hostname: {{.Hostname}}
|
||||||
@@ -284,7 +284,7 @@ Backup integrity may be compromised. Please investigate.
|
|||||||
|
|
||||||
const verificationFailedHTML = `
|
const verificationFailedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #e74c3c;">❌ Verification FAILED</h2>
|
<h2 style="color: #e74c3c;">[FAIL] Verification FAILED</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -314,7 +314,7 @@ Backup restore capability verified.
|
|||||||
|
|
||||||
const drDrillPassedHTML = `
|
const drDrillPassedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #27ae60;">✅ DR Drill Passed</h2>
|
<h2 style="color: #27ae60;">[OK] DR Drill Passed</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
@@ -326,12 +326,12 @@ const drDrillPassedHTML = `
|
|||||||
{{end}}
|
{{end}}
|
||||||
</table>
|
</table>
|
||||||
{{if .Message}}<p style="margin-top: 20px; color: #27ae60;">{{.Message}}</p>{{end}}
|
{{if .Message}}<p style="margin-top: 20px; color: #27ae60;">{{.Message}}</p>{{end}}
|
||||||
<p style="margin-top: 20px; color: #27ae60;">✓ Backup restore capability verified</p>
|
<p style="margin-top: 20px; color: #27ae60;">[OK] Backup restore capability verified</p>
|
||||||
</div>
|
</div>
|
||||||
`
|
`
|
||||||
|
|
||||||
const drDrillFailedText = `
|
const drDrillFailedText = `
|
||||||
⚠️ DR DRILL FAILED ⚠️
|
[WARN] DR DRILL FAILED [WARN]
|
||||||
|
|
||||||
Database: {{.Database}}
|
Database: {{.Database}}
|
||||||
Hostname: {{.Hostname}}
|
Hostname: {{.Hostname}}
|
||||||
@@ -346,7 +346,7 @@ Backup may not be restorable. Please investigate immediately.
|
|||||||
|
|
||||||
const drDrillFailedHTML = `
|
const drDrillFailedHTML = `
|
||||||
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
<div style="font-family: Arial, sans-serif; padding: 20px;">
|
||||||
<h2 style="color: #e74c3c;">❌ DR Drill FAILED</h2>
|
<h2 style="color: #e74c3c;">[FAIL] DR Drill FAILED</h2>
|
||||||
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
<table style="border-collapse: collapse; width: 100%; max-width: 600px;">
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Database:</td><td style="padding: 8px;">{{.Database}}</td></tr>
|
||||||
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
<tr><td style="padding: 8px; font-weight: bold;">Hostname:</td><td style="padding: 8px;">{{.Hostname}}</td></tr>
|
||||||
|
|||||||
@@ -212,7 +212,11 @@ func (m *BinlogManager) detectTools() error {
|
|||||||
|
|
||||||
// detectServerType determines if we're working with MySQL or MariaDB
|
// detectServerType determines if we're working with MySQL or MariaDB
|
||||||
func (m *BinlogManager) detectServerType() DatabaseType {
|
func (m *BinlogManager) detectServerType() DatabaseType {
|
||||||
cmd := exec.Command(m.mysqlbinlogPath, "--version")
|
// Use timeout to prevent blocking if command hangs
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, m.mysqlbinlogPath, "--version")
|
||||||
output, err := cmd.Output()
|
output, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return DatabaseMySQL // Default to MySQL
|
return DatabaseMySQL // Default to MySQL
|
||||||
|
|||||||
@@ -43,9 +43,9 @@ type RestoreOptions struct {
|
|||||||
|
|
||||||
// RestorePointInTime performs a Point-in-Time Recovery
|
// RestorePointInTime performs a Point-in-Time Recovery
|
||||||
func (ro *RestoreOrchestrator) RestorePointInTime(ctx context.Context, opts *RestoreOptions) error {
|
func (ro *RestoreOrchestrator) RestorePointInTime(ctx context.Context, opts *RestoreOptions) error {
|
||||||
ro.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
ro.log.Info("=====================================================")
|
||||||
ro.log.Info(" Point-in-Time Recovery (PITR)")
|
ro.log.Info(" Point-in-Time Recovery (PITR)")
|
||||||
ro.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
ro.log.Info("=====================================================")
|
||||||
ro.log.Info("")
|
ro.log.Info("")
|
||||||
ro.log.Info("Target:", "summary", opts.Target.Summary())
|
ro.log.Info("Target:", "summary", opts.Target.Summary())
|
||||||
ro.log.Info("Base Backup:", "path", opts.BaseBackupPath)
|
ro.log.Info("Base Backup:", "path", opts.BaseBackupPath)
|
||||||
@@ -91,11 +91,11 @@ func (ro *RestoreOrchestrator) RestorePointInTime(ctx context.Context, opts *Res
|
|||||||
return fmt.Errorf("failed to generate recovery configuration: %w", err)
|
return fmt.Errorf("failed to generate recovery configuration: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ro.log.Info("✅ Recovery configuration generated successfully")
|
ro.log.Info("[OK] Recovery configuration generated successfully")
|
||||||
ro.log.Info("")
|
ro.log.Info("")
|
||||||
ro.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
ro.log.Info("=====================================================")
|
||||||
ro.log.Info(" Next Steps:")
|
ro.log.Info(" Next Steps:")
|
||||||
ro.log.Info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
ro.log.Info("=====================================================")
|
||||||
ro.log.Info("")
|
ro.log.Info("")
|
||||||
ro.log.Info("1. Start PostgreSQL to begin recovery:")
|
ro.log.Info("1. Start PostgreSQL to begin recovery:")
|
||||||
ro.log.Info(fmt.Sprintf(" pg_ctl -D %s start", opts.TargetDataDir))
|
ro.log.Info(fmt.Sprintf(" pg_ctl -D %s start", opts.TargetDataDir))
|
||||||
@@ -192,7 +192,7 @@ func (ro *RestoreOrchestrator) validateInputs(opts *RestoreOptions) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ro.log.Info("✅ Validation passed")
|
ro.log.Info("[OK] Validation passed")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,7 +238,7 @@ func (ro *RestoreOrchestrator) extractTarGzBackup(ctx context.Context, source, d
|
|||||||
return fmt.Errorf("tar extraction failed: %w", err)
|
return fmt.Errorf("tar extraction failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ro.log.Info("✅ Base backup extracted successfully")
|
ro.log.Info("[OK] Base backup extracted successfully")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -254,7 +254,7 @@ func (ro *RestoreOrchestrator) extractTarBackup(ctx context.Context, source, des
|
|||||||
return fmt.Errorf("tar extraction failed: %w", err)
|
return fmt.Errorf("tar extraction failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ro.log.Info("✅ Base backup extracted successfully")
|
ro.log.Info("[OK] Base backup extracted successfully")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -270,7 +270,7 @@ func (ro *RestoreOrchestrator) copyDirectoryBackup(ctx context.Context, source,
|
|||||||
return fmt.Errorf("directory copy failed: %w", err)
|
return fmt.Errorf("directory copy failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ro.log.Info("✅ Base backup copied successfully")
|
ro.log.Info("[OK] Base backup copied successfully")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -291,7 +291,7 @@ func (ro *RestoreOrchestrator) startPostgreSQL(ctx context.Context, opts *Restor
|
|||||||
return fmt.Errorf("pg_ctl start failed: %w", err)
|
return fmt.Errorf("pg_ctl start failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ro.log.Info("✅ PostgreSQL started successfully")
|
ro.log.Info("[OK] PostgreSQL started successfully")
|
||||||
ro.log.Info("PostgreSQL is now performing recovery...")
|
ro.log.Info("PostgreSQL is now performing recovery...")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -320,7 +320,7 @@ func (ro *RestoreOrchestrator) monitorRecovery(ctx context.Context, opts *Restor
|
|||||||
// Check if recovery is complete by looking for postmaster.pid
|
// Check if recovery is complete by looking for postmaster.pid
|
||||||
pidFile := filepath.Join(opts.TargetDataDir, "postmaster.pid")
|
pidFile := filepath.Join(opts.TargetDataDir, "postmaster.pid")
|
||||||
if _, err := os.Stat(pidFile); err == nil {
|
if _, err := os.Stat(pidFile); err == nil {
|
||||||
ro.log.Info("✅ PostgreSQL is running")
|
ro.log.Info("[OK] PostgreSQL is running")
|
||||||
|
|
||||||
// Check if recovery files still exist
|
// Check if recovery files still exist
|
||||||
recoverySignal := filepath.Join(opts.TargetDataDir, "recovery.signal")
|
recoverySignal := filepath.Join(opts.TargetDataDir, "recovery.signal")
|
||||||
@@ -328,7 +328,7 @@ func (ro *RestoreOrchestrator) monitorRecovery(ctx context.Context, opts *Restor
|
|||||||
|
|
||||||
if _, err := os.Stat(recoverySignal); os.IsNotExist(err) {
|
if _, err := os.Stat(recoverySignal); os.IsNotExist(err) {
|
||||||
if _, err := os.Stat(recoveryConf); os.IsNotExist(err) {
|
if _, err := os.Stat(recoveryConf); os.IsNotExist(err) {
|
||||||
ro.log.Info("✅ Recovery completed - PostgreSQL promoted to primary")
|
ro.log.Info("[OK] Recovery completed - PostgreSQL promoted to primary")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -256,7 +256,7 @@ func (ot *OperationTracker) Complete(message string) {
|
|||||||
|
|
||||||
// Complete visual indicator
|
// Complete visual indicator
|
||||||
if ot.reporter.indicator != nil {
|
if ot.reporter.indicator != nil {
|
||||||
ot.reporter.indicator.Complete(fmt.Sprintf("✅ %s", message))
|
ot.reporter.indicator.Complete(fmt.Sprintf("[OK] %s", message))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log completion with duration
|
// Log completion with duration
|
||||||
@@ -286,7 +286,7 @@ func (ot *OperationTracker) Fail(err error) {
|
|||||||
|
|
||||||
// Fail visual indicator
|
// Fail visual indicator
|
||||||
if ot.reporter.indicator != nil {
|
if ot.reporter.indicator != nil {
|
||||||
ot.reporter.indicator.Fail(fmt.Sprintf("❌ %s", err.Error()))
|
ot.reporter.indicator.Fail(fmt.Sprintf("[FAIL] %s", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log failure
|
// Log failure
|
||||||
@@ -427,7 +427,7 @@ type OperationSummary struct {
|
|||||||
// FormatSummary returns a formatted string representation of the summary
|
// FormatSummary returns a formatted string representation of the summary
|
||||||
func (os *OperationSummary) FormatSummary() string {
|
func (os *OperationSummary) FormatSummary() string {
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
"📊 Operations Summary:\n"+
|
"[STATS] Operations Summary:\n"+
|
||||||
" Total: %d | Completed: %d | Failed: %d | Running: %d\n"+
|
" Total: %d | Completed: %d | Failed: %d | Running: %d\n"+
|
||||||
" Total Duration: %s",
|
" Total Duration: %s",
|
||||||
os.TotalOperations,
|
os.TotalOperations,
|
||||||
|
|||||||
@@ -92,13 +92,13 @@ func (s *Spinner) Update(message string) {
|
|||||||
// Complete stops the spinner with a success message
|
// Complete stops the spinner with a success message
|
||||||
func (s *Spinner) Complete(message string) {
|
func (s *Spinner) Complete(message string) {
|
||||||
s.Stop()
|
s.Stop()
|
||||||
fmt.Fprintf(s.writer, "\n✅ %s\n", message)
|
fmt.Fprintf(s.writer, "\n[OK] %s\n", message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fail stops the spinner with a failure message
|
// Fail stops the spinner with a failure message
|
||||||
func (s *Spinner) Fail(message string) {
|
func (s *Spinner) Fail(message string) {
|
||||||
s.Stop()
|
s.Stop()
|
||||||
fmt.Fprintf(s.writer, "\n❌ %s\n", message)
|
fmt.Fprintf(s.writer, "\n[FAIL] %s\n", message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops the spinner
|
// Stop stops the spinner
|
||||||
@@ -167,13 +167,13 @@ func (d *Dots) Update(message string) {
|
|||||||
// Complete stops the dots with a success message
|
// Complete stops the dots with a success message
|
||||||
func (d *Dots) Complete(message string) {
|
func (d *Dots) Complete(message string) {
|
||||||
d.Stop()
|
d.Stop()
|
||||||
fmt.Fprintf(d.writer, " ✅ %s\n", message)
|
fmt.Fprintf(d.writer, " [OK] %s\n", message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fail stops the dots with a failure message
|
// Fail stops the dots with a failure message
|
||||||
func (d *Dots) Fail(message string) {
|
func (d *Dots) Fail(message string) {
|
||||||
d.Stop()
|
d.Stop()
|
||||||
fmt.Fprintf(d.writer, " ❌ %s\n", message)
|
fmt.Fprintf(d.writer, " [FAIL] %s\n", message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops the dots indicator
|
// Stop stops the dots indicator
|
||||||
@@ -239,14 +239,14 @@ func (p *ProgressBar) Complete(message string) {
|
|||||||
p.current = p.total
|
p.current = p.total
|
||||||
p.message = message
|
p.message = message
|
||||||
p.render()
|
p.render()
|
||||||
fmt.Fprintf(p.writer, " ✅ %s\n", message)
|
fmt.Fprintf(p.writer, " [OK] %s\n", message)
|
||||||
p.Stop()
|
p.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fail stops the progress bar with failure
|
// Fail stops the progress bar with failure
|
||||||
func (p *ProgressBar) Fail(message string) {
|
func (p *ProgressBar) Fail(message string) {
|
||||||
p.render()
|
p.render()
|
||||||
fmt.Fprintf(p.writer, " ❌ %s\n", message)
|
fmt.Fprintf(p.writer, " [FAIL] %s\n", message)
|
||||||
p.Stop()
|
p.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -298,12 +298,12 @@ func (s *Static) Update(message string) {
|
|||||||
|
|
||||||
// Complete shows completion message
|
// Complete shows completion message
|
||||||
func (s *Static) Complete(message string) {
|
func (s *Static) Complete(message string) {
|
||||||
fmt.Fprintf(s.writer, " ✅ %s\n", message)
|
fmt.Fprintf(s.writer, " [OK] %s\n", message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fail shows failure message
|
// Fail shows failure message
|
||||||
func (s *Static) Fail(message string) {
|
func (s *Static) Fail(message string) {
|
||||||
fmt.Fprintf(s.writer, " ❌ %s\n", message)
|
fmt.Fprintf(s.writer, " [FAIL] %s\n", message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop does nothing for static indicator
|
// Stop does nothing for static indicator
|
||||||
@@ -359,7 +359,7 @@ func (l *LineByLine) Start(message string) {
|
|||||||
if l.estimator != nil {
|
if l.estimator != nil {
|
||||||
displayMsg = l.estimator.GetFullStatus(message)
|
displayMsg = l.estimator.GetFullStatus(message)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(l.writer, "\n🔄 %s\n", displayMsg)
|
fmt.Fprintf(l.writer, "\n[SYNC] %s\n", displayMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update shows an update message
|
// Update shows an update message
|
||||||
@@ -380,12 +380,12 @@ func (l *LineByLine) SetEstimator(estimator *ETAEstimator) {
|
|||||||
|
|
||||||
// Complete shows completion message
|
// Complete shows completion message
|
||||||
func (l *LineByLine) Complete(message string) {
|
func (l *LineByLine) Complete(message string) {
|
||||||
fmt.Fprintf(l.writer, "✅ %s\n\n", message)
|
fmt.Fprintf(l.writer, "[OK] %s\n\n", message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fail shows failure message
|
// Fail shows failure message
|
||||||
func (l *LineByLine) Fail(message string) {
|
func (l *LineByLine) Fail(message string) {
|
||||||
fmt.Fprintf(l.writer, "❌ %s\n\n", message)
|
fmt.Fprintf(l.writer, "[FAIL] %s\n\n", message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop does nothing for line-by-line (no cleanup needed)
|
// Stop does nothing for line-by-line (no cleanup needed)
|
||||||
@@ -396,7 +396,7 @@ func (l *LineByLine) Stop() {
|
|||||||
// Light indicator methods - minimal output
|
// Light indicator methods - minimal output
|
||||||
func (l *Light) Start(message string) {
|
func (l *Light) Start(message string) {
|
||||||
if !l.silent {
|
if !l.silent {
|
||||||
fmt.Fprintf(l.writer, "▶ %s\n", message)
|
fmt.Fprintf(l.writer, "> %s\n", message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -408,13 +408,13 @@ func (l *Light) Update(message string) {
|
|||||||
|
|
||||||
func (l *Light) Complete(message string) {
|
func (l *Light) Complete(message string) {
|
||||||
if !l.silent {
|
if !l.silent {
|
||||||
fmt.Fprintf(l.writer, "✓ %s\n", message)
|
fmt.Fprintf(l.writer, "[OK] %s\n", message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Light) Fail(message string) {
|
func (l *Light) Fail(message string) {
|
||||||
if !l.silent {
|
if !l.silent {
|
||||||
fmt.Fprintf(l.writer, "✗ %s\n", message)
|
fmt.Fprintf(l.writer, "[FAIL] %s\n", message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -296,11 +296,11 @@ func generateID() string {
|
|||||||
func StatusIcon(s ComplianceStatus) string {
|
func StatusIcon(s ComplianceStatus) string {
|
||||||
switch s {
|
switch s {
|
||||||
case StatusCompliant:
|
case StatusCompliant:
|
||||||
return "✅"
|
return "[OK]"
|
||||||
case StatusNonCompliant:
|
case StatusNonCompliant:
|
||||||
return "❌"
|
return "[FAIL]"
|
||||||
case StatusPartial:
|
case StatusPartial:
|
||||||
return "⚠️"
|
return "[WARN]"
|
||||||
case StatusNotApplicable:
|
case StatusNotApplicable:
|
||||||
return "➖"
|
return "➖"
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -47,9 +47,10 @@ type DownloadResult struct {
|
|||||||
|
|
||||||
// Download downloads a backup from cloud storage
|
// Download downloads a backup from cloud storage
|
||||||
func (d *CloudDownloader) Download(ctx context.Context, remotePath string, opts DownloadOptions) (*DownloadResult, error) {
|
func (d *CloudDownloader) Download(ctx context.Context, remotePath string, opts DownloadOptions) (*DownloadResult, error) {
|
||||||
// Determine temp directory
|
// Determine temp directory (use from opts, or from config's WorkDir, or fallback to system temp)
|
||||||
tempDir := opts.TempDir
|
tempDir := opts.TempDir
|
||||||
if tempDir == "" {
|
if tempDir == "" {
|
||||||
|
// Try to get from config if available (passed via opts.TempDir)
|
||||||
tempDir = os.TempDir()
|
tempDir = os.TempDir()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -12,6 +13,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"dbbackup/internal/logger"
|
"dbbackup/internal/logger"
|
||||||
)
|
)
|
||||||
@@ -60,9 +62,9 @@ type DiagnoseDetails struct {
|
|||||||
TableList []string `json:"table_list,omitempty"`
|
TableList []string `json:"table_list,omitempty"`
|
||||||
|
|
||||||
// Compression analysis
|
// Compression analysis
|
||||||
GzipValid bool `json:"gzip_valid,omitempty"`
|
GzipValid bool `json:"gzip_valid,omitempty"`
|
||||||
GzipError string `json:"gzip_error,omitempty"`
|
GzipError string `json:"gzip_error,omitempty"`
|
||||||
ExpandedSize int64 `json:"expanded_size,omitempty"`
|
ExpandedSize int64 `json:"expanded_size,omitempty"`
|
||||||
CompressionRatio float64 `json:"compression_ratio,omitempty"`
|
CompressionRatio float64 `json:"compression_ratio,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -157,7 +159,7 @@ func (d *Diagnoser) diagnosePgDump(filePath string, result *DiagnoseResult) {
|
|||||||
result.IsCorrupted = true
|
result.IsCorrupted = true
|
||||||
result.Details.HasPGDMPSignature = false
|
result.Details.HasPGDMPSignature = false
|
||||||
result.Details.FirstBytes = fmt.Sprintf("%q", header[:minInt(n, 20)])
|
result.Details.FirstBytes = fmt.Sprintf("%q", header[:minInt(n, 20)])
|
||||||
result.Errors = append(result.Errors,
|
result.Errors = append(result.Errors,
|
||||||
"Missing PGDMP signature - file is NOT PostgreSQL custom format",
|
"Missing PGDMP signature - file is NOT PostgreSQL custom format",
|
||||||
"This file may be SQL format incorrectly named as .dump",
|
"This file may be SQL format incorrectly named as .dump",
|
||||||
"Try: file "+filePath+" to check actual file type")
|
"Try: file "+filePath+" to check actual file type")
|
||||||
@@ -185,7 +187,7 @@ func (d *Diagnoser) diagnosePgDumpGz(filePath string, result *DiagnoseResult) {
|
|||||||
result.IsCorrupted = true
|
result.IsCorrupted = true
|
||||||
result.Details.GzipValid = false
|
result.Details.GzipValid = false
|
||||||
result.Details.GzipError = err.Error()
|
result.Details.GzipError = err.Error()
|
||||||
result.Errors = append(result.Errors,
|
result.Errors = append(result.Errors,
|
||||||
fmt.Sprintf("Invalid gzip format: %v", err),
|
fmt.Sprintf("Invalid gzip format: %v", err),
|
||||||
"The file may be truncated or corrupted during transfer")
|
"The file may be truncated or corrupted during transfer")
|
||||||
return
|
return
|
||||||
@@ -210,7 +212,7 @@ func (d *Diagnoser) diagnosePgDumpGz(filePath string, result *DiagnoseResult) {
|
|||||||
} else {
|
} else {
|
||||||
result.Details.HasPGDMPSignature = false
|
result.Details.HasPGDMPSignature = false
|
||||||
result.Details.FirstBytes = fmt.Sprintf("%q", header[:minInt(n, 20)])
|
result.Details.FirstBytes = fmt.Sprintf("%q", header[:minInt(n, 20)])
|
||||||
|
|
||||||
// Check if it's actually SQL content
|
// Check if it's actually SQL content
|
||||||
content := string(header[:n])
|
content := string(header[:n])
|
||||||
if strings.Contains(content, "PostgreSQL") || strings.Contains(content, "pg_dump") ||
|
if strings.Contains(content, "PostgreSQL") || strings.Contains(content, "pg_dump") ||
|
||||||
@@ -233,7 +235,7 @@ func (d *Diagnoser) diagnosePgDumpGz(filePath string, result *DiagnoseResult) {
|
|||||||
// Verify full gzip stream integrity by reading to end
|
// Verify full gzip stream integrity by reading to end
|
||||||
file.Seek(0, 0)
|
file.Seek(0, 0)
|
||||||
gz, _ = gzip.NewReader(file)
|
gz, _ = gzip.NewReader(file)
|
||||||
|
|
||||||
var totalRead int64
|
var totalRead int64
|
||||||
buf := make([]byte, 32*1024)
|
buf := make([]byte, 32*1024)
|
||||||
for {
|
for {
|
||||||
@@ -255,7 +257,7 @@ func (d *Diagnoser) diagnosePgDumpGz(filePath string, result *DiagnoseResult) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
gz.Close()
|
gz.Close()
|
||||||
|
|
||||||
result.Details.ExpandedSize = totalRead
|
result.Details.ExpandedSize = totalRead
|
||||||
if result.FileSize > 0 {
|
if result.FileSize > 0 {
|
||||||
result.Details.CompressionRatio = float64(totalRead) / float64(result.FileSize)
|
result.Details.CompressionRatio = float64(totalRead) / float64(result.FileSize)
|
||||||
@@ -392,7 +394,7 @@ func (d *Diagnoser) diagnoseSQLScript(filePath string, compressed bool, result *
|
|||||||
lastCopyTable, copyStartLine),
|
lastCopyTable, copyStartLine),
|
||||||
"The backup was truncated during data export",
|
"The backup was truncated during data export",
|
||||||
"This explains the 'syntax error' during restore - COPY data is being interpreted as SQL")
|
"This explains the 'syntax error' during restore - COPY data is being interpreted as SQL")
|
||||||
|
|
||||||
if len(copyDataSamples) > 0 {
|
if len(copyDataSamples) > 0 {
|
||||||
result.Errors = append(result.Errors,
|
result.Errors = append(result.Errors,
|
||||||
fmt.Sprintf("Sample orphaned data: %s", copyDataSamples[0]))
|
fmt.Sprintf("Sample orphaned data: %s", copyDataSamples[0]))
|
||||||
@@ -412,8 +414,12 @@ func (d *Diagnoser) diagnoseSQLScript(filePath string, compressed bool, result *
|
|||||||
|
|
||||||
// diagnoseClusterArchive analyzes a cluster tar.gz archive
|
// diagnoseClusterArchive analyzes a cluster tar.gz archive
|
||||||
func (d *Diagnoser) diagnoseClusterArchive(filePath string, result *DiagnoseResult) {
|
func (d *Diagnoser) diagnoseClusterArchive(filePath string, result *DiagnoseResult) {
|
||||||
// First verify tar.gz integrity
|
// First verify tar.gz integrity with timeout
|
||||||
cmd := exec.Command("tar", "-tzf", filePath)
|
// 5 minutes for large archives (multi-GB archives need more time)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, "tar", "-tzf", filePath)
|
||||||
output, err := cmd.Output()
|
output, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
result.IsValid = false
|
result.IsValid = false
|
||||||
@@ -491,13 +497,18 @@ func (d *Diagnoser) diagnoseUnknown(filePath string, result *DiagnoseResult) {
|
|||||||
|
|
||||||
// verifyWithPgRestore uses pg_restore --list to verify dump integrity
|
// verifyWithPgRestore uses pg_restore --list to verify dump integrity
|
||||||
func (d *Diagnoser) verifyWithPgRestore(filePath string, result *DiagnoseResult) {
|
func (d *Diagnoser) verifyWithPgRestore(filePath string, result *DiagnoseResult) {
|
||||||
cmd := exec.Command("pg_restore", "--list", filePath)
|
// Use timeout to prevent blocking on very large dump files
|
||||||
|
// 5 minutes for large dumps (multi-GB dumps with many tables)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, "pg_restore", "--list", filePath)
|
||||||
output, err := cmd.CombinedOutput()
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
result.Details.PgRestoreListable = false
|
result.Details.PgRestoreListable = false
|
||||||
result.Details.PgRestoreError = string(output)
|
result.Details.PgRestoreError = string(output)
|
||||||
|
|
||||||
// Check for specific errors
|
// Check for specific errors
|
||||||
errStr := string(output)
|
errStr := string(output)
|
||||||
if strings.Contains(errStr, "unexpected end of file") ||
|
if strings.Contains(errStr, "unexpected end of file") ||
|
||||||
@@ -544,7 +555,11 @@ func (d *Diagnoser) verifyWithPgRestore(filePath string, result *DiagnoseResult)
|
|||||||
// DiagnoseClusterDumps extracts and diagnoses all dumps in a cluster archive
|
// DiagnoseClusterDumps extracts and diagnoses all dumps in a cluster archive
|
||||||
func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*DiagnoseResult, error) {
|
func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*DiagnoseResult, error) {
|
||||||
// First, try to list archive contents without extracting (fast check)
|
// First, try to list archive contents without extracting (fast check)
|
||||||
listCmd := exec.Command("tar", "-tzf", archivePath)
|
// 10 minutes for very large archives
|
||||||
|
listCtx, listCancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||||
|
defer listCancel()
|
||||||
|
|
||||||
|
listCmd := exec.CommandContext(listCtx, "tar", "-tzf", archivePath)
|
||||||
listOutput, listErr := listCmd.CombinedOutput()
|
listOutput, listErr := listCmd.CombinedOutput()
|
||||||
if listErr != nil {
|
if listErr != nil {
|
||||||
// Archive listing failed - likely corrupted
|
// Archive listing failed - likely corrupted
|
||||||
@@ -557,9 +572,9 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
IsCorrupted: true,
|
IsCorrupted: true,
|
||||||
Details: &DiagnoseDetails{},
|
Details: &DiagnoseDetails{},
|
||||||
}
|
}
|
||||||
|
|
||||||
errOutput := string(listOutput)
|
errOutput := string(listOutput)
|
||||||
if strings.Contains(errOutput, "unexpected end of file") ||
|
if strings.Contains(errOutput, "unexpected end of file") ||
|
||||||
strings.Contains(errOutput, "Unexpected EOF") ||
|
strings.Contains(errOutput, "Unexpected EOF") ||
|
||||||
strings.Contains(errOutput, "truncated") {
|
strings.Contains(errOutput, "truncated") {
|
||||||
errResult.IsTruncated = true
|
errResult.IsTruncated = true
|
||||||
@@ -574,28 +589,34 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
fmt.Sprintf("tar error: %s", truncateString(errOutput, 300)),
|
fmt.Sprintf("tar error: %s", truncateString(errOutput, 300)),
|
||||||
"Run manually: tar -tzf "+archivePath+" 2>&1 | tail -50")
|
"Run manually: tar -tzf "+archivePath+" 2>&1 | tail -50")
|
||||||
}
|
}
|
||||||
|
|
||||||
return []*DiagnoseResult{errResult}, nil
|
return []*DiagnoseResult{errResult}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Archive is listable - now check disk space before extraction
|
// Archive is listable - now check disk space before extraction
|
||||||
files := strings.Split(strings.TrimSpace(string(listOutput)), "\n")
|
files := strings.Split(strings.TrimSpace(string(listOutput)), "\n")
|
||||||
|
|
||||||
// Check if we have enough disk space (estimate 4x archive size needed)
|
// Check if we have enough disk space (estimate 4x archive size needed)
|
||||||
archiveInfo, _ := os.Stat(archivePath)
|
archiveInfo, _ := os.Stat(archivePath)
|
||||||
requiredSpace := archiveInfo.Size() * 4
|
requiredSpace := archiveInfo.Size() * 4
|
||||||
|
|
||||||
// Check temp directory space - try to extract metadata first
|
// Check temp directory space - try to extract metadata first
|
||||||
if stat, err := os.Stat(tempDir); err == nil && stat.IsDir() {
|
if stat, err := os.Stat(tempDir); err == nil && stat.IsDir() {
|
||||||
// Try extraction of a small test file first
|
// Try extraction of a small test file first with timeout
|
||||||
testCmd := exec.Command("tar", "-xzf", archivePath, "-C", tempDir, "--wildcards", "*.json", "--wildcards", "globals.sql")
|
testCtx, testCancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
testCmd := exec.CommandContext(testCtx, "tar", "-xzf", archivePath, "-C", tempDir, "--wildcards", "*.json", "--wildcards", "globals.sql")
|
||||||
testCmd.Run() // Ignore error - just try to extract metadata
|
testCmd.Run() // Ignore error - just try to extract metadata
|
||||||
|
testCancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
d.log.Info("Archive listing successful", "files", len(files))
|
d.log.Info("Archive listing successful", "files", len(files))
|
||||||
|
|
||||||
// Try full extraction
|
// Try full extraction - NO TIMEOUT here as large archives can take a long time
|
||||||
cmd := exec.Command("tar", "-xzf", archivePath, "-C", tempDir)
|
// Use a generous timeout (30 minutes) for very large archives
|
||||||
|
extractCtx, extractCancel := context.WithTimeout(context.Background(), 30*time.Minute)
|
||||||
|
defer extractCancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(extractCtx, "tar", "-xzf", archivePath, "-C", tempDir)
|
||||||
var stderr bytes.Buffer
|
var stderr bytes.Buffer
|
||||||
cmd.Stderr = &stderr
|
cmd.Stderr = &stderr
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
@@ -608,14 +629,14 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
IsValid: false,
|
IsValid: false,
|
||||||
Details: &DiagnoseDetails{},
|
Details: &DiagnoseDetails{},
|
||||||
}
|
}
|
||||||
|
|
||||||
errOutput := stderr.String()
|
errOutput := stderr.String()
|
||||||
if strings.Contains(errOutput, "No space left") ||
|
if strings.Contains(errOutput, "No space left") ||
|
||||||
strings.Contains(errOutput, "cannot write") ||
|
strings.Contains(errOutput, "cannot write") ||
|
||||||
strings.Contains(errOutput, "Disk quota exceeded") {
|
strings.Contains(errOutput, "Disk quota exceeded") {
|
||||||
errResult.Errors = append(errResult.Errors,
|
errResult.Errors = append(errResult.Errors,
|
||||||
"INSUFFICIENT DISK SPACE to extract archive for diagnosis",
|
"INSUFFICIENT DISK SPACE to extract archive for diagnosis",
|
||||||
fmt.Sprintf("Archive size: %s (needs ~%s for extraction)",
|
fmt.Sprintf("Archive size: %s (needs ~%s for extraction)",
|
||||||
formatBytes(archiveInfo.Size()), formatBytes(requiredSpace)),
|
formatBytes(archiveInfo.Size()), formatBytes(requiredSpace)),
|
||||||
"Use CLI diagnosis instead: dbbackup restore diagnose "+archivePath,
|
"Use CLI diagnosis instead: dbbackup restore diagnose "+archivePath,
|
||||||
"Or use --workdir flag to specify a location with more space")
|
"Or use --workdir flag to specify a location with more space")
|
||||||
@@ -634,7 +655,7 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
fmt.Sprintf("Extraction failed: %v", err),
|
fmt.Sprintf("Extraction failed: %v", err),
|
||||||
fmt.Sprintf("tar error: %s", truncateString(errOutput, 300)))
|
fmt.Sprintf("tar error: %s", truncateString(errOutput, 300)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Still report what files we found in the listing
|
// Still report what files we found in the listing
|
||||||
var dumpFiles []string
|
var dumpFiles []string
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
@@ -648,7 +669,7 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
errResult.Warnings = append(errResult.Warnings,
|
errResult.Warnings = append(errResult.Warnings,
|
||||||
fmt.Sprintf("Archive contains %d database dumps (listing only)", len(dumpFiles)))
|
fmt.Sprintf("Archive contains %d database dumps (listing only)", len(dumpFiles)))
|
||||||
}
|
}
|
||||||
|
|
||||||
return []*DiagnoseResult{errResult}, nil
|
return []*DiagnoseResult{errResult}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -693,7 +714,7 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
// PrintDiagnosis outputs a human-readable diagnosis report
|
// PrintDiagnosis outputs a human-readable diagnosis report
|
||||||
func (d *Diagnoser) PrintDiagnosis(result *DiagnoseResult) {
|
func (d *Diagnoser) PrintDiagnosis(result *DiagnoseResult) {
|
||||||
fmt.Println("\n" + strings.Repeat("=", 70))
|
fmt.Println("\n" + strings.Repeat("=", 70))
|
||||||
fmt.Printf("📋 DIAGNOSIS: %s\n", result.FileName)
|
fmt.Printf("[DIAG] DIAGNOSIS: %s\n", result.FileName)
|
||||||
fmt.Println(strings.Repeat("=", 70))
|
fmt.Println(strings.Repeat("=", 70))
|
||||||
|
|
||||||
// Basic info
|
// Basic info
|
||||||
@@ -703,69 +724,69 @@ func (d *Diagnoser) PrintDiagnosis(result *DiagnoseResult) {
|
|||||||
|
|
||||||
// Status
|
// Status
|
||||||
if result.IsValid {
|
if result.IsValid {
|
||||||
fmt.Println("\n✅ STATUS: VALID")
|
fmt.Println("\n[OK] STATUS: VALID")
|
||||||
} else {
|
} else {
|
||||||
fmt.Println("\n❌ STATUS: INVALID")
|
fmt.Println("\n[FAIL] STATUS: INVALID")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.IsTruncated {
|
if result.IsTruncated {
|
||||||
fmt.Println("⚠️ TRUNCATED: Yes - file appears incomplete")
|
fmt.Println("[WARN] TRUNCATED: Yes - file appears incomplete")
|
||||||
}
|
}
|
||||||
if result.IsCorrupted {
|
if result.IsCorrupted {
|
||||||
fmt.Println("⚠️ CORRUPTED: Yes - file structure is damaged")
|
fmt.Println("[WARN] CORRUPTED: Yes - file structure is damaged")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Details
|
// Details
|
||||||
if result.Details != nil {
|
if result.Details != nil {
|
||||||
fmt.Println("\n📊 DETAILS:")
|
fmt.Println("\n[DETAILS]:")
|
||||||
|
|
||||||
if result.Details.HasPGDMPSignature {
|
if result.Details.HasPGDMPSignature {
|
||||||
fmt.Println(" ✓ Has PGDMP signature (PostgreSQL custom format)")
|
fmt.Println(" [+] Has PGDMP signature (PostgreSQL custom format)")
|
||||||
}
|
}
|
||||||
if result.Details.HasSQLHeader {
|
if result.Details.HasSQLHeader {
|
||||||
fmt.Println(" ✓ Has PostgreSQL SQL header")
|
fmt.Println(" [+] Has PostgreSQL SQL header")
|
||||||
}
|
}
|
||||||
if result.Details.GzipValid {
|
if result.Details.GzipValid {
|
||||||
fmt.Println(" ✓ Gzip compression valid")
|
fmt.Println(" [+] Gzip compression valid")
|
||||||
}
|
}
|
||||||
if result.Details.PgRestoreListable {
|
if result.Details.PgRestoreListable {
|
||||||
fmt.Printf(" ✓ pg_restore can list contents (%d tables)\n", result.Details.TableCount)
|
fmt.Printf(" [+] pg_restore can list contents (%d tables)\n", result.Details.TableCount)
|
||||||
}
|
}
|
||||||
if result.Details.CopyBlockCount > 0 {
|
if result.Details.CopyBlockCount > 0 {
|
||||||
fmt.Printf(" • Contains %d COPY blocks\n", result.Details.CopyBlockCount)
|
fmt.Printf(" [-] Contains %d COPY blocks\n", result.Details.CopyBlockCount)
|
||||||
}
|
}
|
||||||
if result.Details.UnterminatedCopy {
|
if result.Details.UnterminatedCopy {
|
||||||
fmt.Printf(" ✗ Unterminated COPY block: %s (line %d)\n",
|
fmt.Printf(" [-] Unterminated COPY block: %s (line %d)\n",
|
||||||
result.Details.LastCopyTable, result.Details.LastCopyLineNumber)
|
result.Details.LastCopyTable, result.Details.LastCopyLineNumber)
|
||||||
}
|
}
|
||||||
if result.Details.ProperlyTerminated {
|
if result.Details.ProperlyTerminated {
|
||||||
fmt.Println(" ✓ All COPY blocks properly terminated")
|
fmt.Println(" [+] All COPY blocks properly terminated")
|
||||||
}
|
}
|
||||||
if result.Details.ExpandedSize > 0 {
|
if result.Details.ExpandedSize > 0 {
|
||||||
fmt.Printf(" • Expanded size: %s (ratio: %.1fx)\n",
|
fmt.Printf(" [-] Expanded size: %s (ratio: %.1fx)\n",
|
||||||
formatBytes(result.Details.ExpandedSize), result.Details.CompressionRatio)
|
formatBytes(result.Details.ExpandedSize), result.Details.CompressionRatio)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errors
|
// Errors
|
||||||
if len(result.Errors) > 0 {
|
if len(result.Errors) > 0 {
|
||||||
fmt.Println("\n❌ ERRORS:")
|
fmt.Println("\n[ERRORS]:")
|
||||||
for _, e := range result.Errors {
|
for _, e := range result.Errors {
|
||||||
fmt.Printf(" • %s\n", e)
|
fmt.Printf(" - %s\n", e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warnings
|
// Warnings
|
||||||
if len(result.Warnings) > 0 {
|
if len(result.Warnings) > 0 {
|
||||||
fmt.Println("\n⚠️ WARNINGS:")
|
fmt.Println("\n[WARNINGS]:")
|
||||||
for _, w := range result.Warnings {
|
for _, w := range result.Warnings {
|
||||||
fmt.Printf(" • %s\n", w)
|
fmt.Printf(" - %s\n", w)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recommendations
|
// Recommendations
|
||||||
if !result.IsValid {
|
if !result.IsValid {
|
||||||
fmt.Println("\n💡 RECOMMENDATIONS:")
|
fmt.Println("\n[HINT] RECOMMENDATIONS:")
|
||||||
if result.IsTruncated {
|
if result.IsTruncated {
|
||||||
fmt.Println(" 1. Re-run the backup process for this database")
|
fmt.Println(" 1. Re-run the backup process for this database")
|
||||||
fmt.Println(" 2. Check disk space on backup server during backup")
|
fmt.Println(" 2. Check disk space on backup server during backup")
|
||||||
|
|||||||
@@ -27,8 +27,7 @@ type Engine struct {
|
|||||||
progress progress.Indicator
|
progress progress.Indicator
|
||||||
detailedReporter *progress.DetailedReporter
|
detailedReporter *progress.DetailedReporter
|
||||||
dryRun bool
|
dryRun bool
|
||||||
debugLogPath string // Path to save debug log on error
|
debugLogPath string // Path to save debug log on error
|
||||||
errorCollector *ErrorCollector // Collects detailed error info
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new restore engine
|
// New creates a new restore engine
|
||||||
@@ -128,7 +127,7 @@ func (e *Engine) RestoreSingle(ctx context.Context, archivePath, targetDB string
|
|||||||
e.log.Warn("Checksum verification failed", "error", checksumErr)
|
e.log.Warn("Checksum verification failed", "error", checksumErr)
|
||||||
e.log.Warn("Continuing restore without checksum verification (use with caution)")
|
e.log.Warn("Continuing restore without checksum verification (use with caution)")
|
||||||
} else {
|
} else {
|
||||||
e.log.Info("✓ Archive checksum verified successfully")
|
e.log.Info("[OK] Archive checksum verified successfully")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Detect archive format
|
// Detect archive format
|
||||||
@@ -357,43 +356,68 @@ func (e *Engine) executeRestoreCommandWithContext(ctx context.Context, cmdArgs [
|
|||||||
return fmt.Errorf("failed to start restore command: %w", err)
|
return fmt.Errorf("failed to start restore command: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read stderr in chunks to log errors without loading all into memory
|
// Read stderr in goroutine to avoid blocking
|
||||||
buf := make([]byte, 4096)
|
|
||||||
var lastError string
|
var lastError string
|
||||||
var errorCount int
|
var errorCount int
|
||||||
const maxErrors = 10 // Limit captured errors to prevent OOM
|
stderrDone := make(chan struct{})
|
||||||
for {
|
go func() {
|
||||||
n, err := stderr.Read(buf)
|
defer close(stderrDone)
|
||||||
if n > 0 {
|
buf := make([]byte, 4096)
|
||||||
chunk := string(buf[:n])
|
const maxErrors = 10 // Limit captured errors to prevent OOM
|
||||||
|
for {
|
||||||
// Feed to error collector if enabled
|
n, err := stderr.Read(buf)
|
||||||
if collector != nil {
|
if n > 0 {
|
||||||
collector.CaptureStderr(chunk)
|
chunk := string(buf[:n])
|
||||||
}
|
|
||||||
|
// Feed to error collector if enabled
|
||||||
// Only capture REAL errors, not verbose output
|
if collector != nil {
|
||||||
if strings.Contains(chunk, "ERROR:") || strings.Contains(chunk, "FATAL:") || strings.Contains(chunk, "error:") {
|
collector.CaptureStderr(chunk)
|
||||||
lastError = strings.TrimSpace(chunk)
|
|
||||||
errorCount++
|
|
||||||
if errorCount <= maxErrors {
|
|
||||||
e.log.Warn("Restore stderr", "output", chunk)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Only capture REAL errors, not verbose output
|
||||||
|
if strings.Contains(chunk, "ERROR:") || strings.Contains(chunk, "FATAL:") || strings.Contains(chunk, "error:") {
|
||||||
|
lastError = strings.TrimSpace(chunk)
|
||||||
|
errorCount++
|
||||||
|
if errorCount <= maxErrors {
|
||||||
|
e.log.Warn("Restore stderr", "output", chunk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Note: --verbose output is discarded to prevent OOM
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
// Note: --verbose output is discarded to prevent OOM
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for command with proper context handling
|
||||||
|
cmdDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed (success or failure)
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Context cancelled - kill process
|
||||||
|
e.log.Warn("Restore cancelled - killing process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone
|
||||||
|
cmdErr = ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cmd.Wait(); err != nil {
|
// Wait for stderr reader to finish
|
||||||
|
<-stderrDone
|
||||||
|
|
||||||
|
if cmdErr != nil {
|
||||||
// Get exit code
|
// Get exit code
|
||||||
exitCode := 1
|
exitCode := 1
|
||||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
if exitErr, ok := cmdErr.(*exec.ExitError); ok {
|
||||||
exitCode = exitErr.ExitCode()
|
exitCode = exitErr.ExitCode()
|
||||||
}
|
}
|
||||||
|
|
||||||
// PostgreSQL pg_restore returns exit code 1 even for ignorable errors
|
// PostgreSQL pg_restore returns exit code 1 even for ignorable errors
|
||||||
// Check if errors are ignorable (already exists, duplicate, etc.)
|
// Check if errors are ignorable (already exists, duplicate, etc.)
|
||||||
if lastError != "" && e.isIgnorableError(lastError) {
|
if lastError != "" && e.isIgnorableError(lastError) {
|
||||||
@@ -427,17 +451,17 @@ func (e *Engine) executeRestoreCommandWithContext(ctx context.Context, cmdArgs [
|
|||||||
errType,
|
errType,
|
||||||
errHint,
|
errHint,
|
||||||
)
|
)
|
||||||
|
|
||||||
// Print report to console
|
// Print report to console
|
||||||
collector.PrintReport(report)
|
collector.PrintReport(report)
|
||||||
|
|
||||||
// Save to file
|
// Save to file
|
||||||
if e.debugLogPath != "" {
|
if e.debugLogPath != "" {
|
||||||
if saveErr := collector.SaveReport(report, e.debugLogPath); saveErr != nil {
|
if saveErr := collector.SaveReport(report, e.debugLogPath); saveErr != nil {
|
||||||
e.log.Warn("Failed to save debug log", "error", saveErr)
|
e.log.Warn("Failed to save debug log", "error", saveErr)
|
||||||
} else {
|
} else {
|
||||||
e.log.Info("Debug log saved", "path", e.debugLogPath)
|
e.log.Info("Debug log saved", "path", e.debugLogPath)
|
||||||
fmt.Printf("\n📋 Detailed error report saved to: %s\n", e.debugLogPath)
|
fmt.Printf("\n[LOG] Detailed error report saved to: %s\n", e.debugLogPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -481,31 +505,56 @@ func (e *Engine) executeRestoreWithDecompression(ctx context.Context, archivePat
|
|||||||
return fmt.Errorf("failed to start restore command: %w", err)
|
return fmt.Errorf("failed to start restore command: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read stderr in chunks to log errors without loading all into memory
|
// Read stderr in goroutine to avoid blocking
|
||||||
buf := make([]byte, 4096)
|
|
||||||
var lastError string
|
var lastError string
|
||||||
var errorCount int
|
var errorCount int
|
||||||
const maxErrors = 10 // Limit captured errors to prevent OOM
|
stderrDone := make(chan struct{})
|
||||||
for {
|
go func() {
|
||||||
n, err := stderr.Read(buf)
|
defer close(stderrDone)
|
||||||
if n > 0 {
|
buf := make([]byte, 4096)
|
||||||
chunk := string(buf[:n])
|
const maxErrors = 10 // Limit captured errors to prevent OOM
|
||||||
// Only capture REAL errors, not verbose output
|
for {
|
||||||
if strings.Contains(chunk, "ERROR:") || strings.Contains(chunk, "FATAL:") || strings.Contains(chunk, "error:") {
|
n, err := stderr.Read(buf)
|
||||||
lastError = strings.TrimSpace(chunk)
|
if n > 0 {
|
||||||
errorCount++
|
chunk := string(buf[:n])
|
||||||
if errorCount <= maxErrors {
|
// Only capture REAL errors, not verbose output
|
||||||
e.log.Warn("Restore stderr", "output", chunk)
|
if strings.Contains(chunk, "ERROR:") || strings.Contains(chunk, "FATAL:") || strings.Contains(chunk, "error:") {
|
||||||
|
lastError = strings.TrimSpace(chunk)
|
||||||
|
errorCount++
|
||||||
|
if errorCount <= maxErrors {
|
||||||
|
e.log.Warn("Restore stderr", "output", chunk)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
// Note: --verbose output is discarded to prevent OOM
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
// Note: --verbose output is discarded to prevent OOM
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for command with proper context handling
|
||||||
|
cmdDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed (success or failure)
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Context cancelled - kill process
|
||||||
|
e.log.Warn("Restore with decompression cancelled - killing process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone
|
||||||
|
cmdErr = ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cmd.Wait(); err != nil {
|
// Wait for stderr reader to finish
|
||||||
|
<-stderrDone
|
||||||
|
|
||||||
|
if cmdErr != nil {
|
||||||
// PostgreSQL pg_restore returns exit code 1 even for ignorable errors
|
// PostgreSQL pg_restore returns exit code 1 even for ignorable errors
|
||||||
// Check if errors are ignorable (already exists, duplicate, etc.)
|
// Check if errors are ignorable (already exists, duplicate, etc.)
|
||||||
if lastError != "" && e.isIgnorableError(lastError) {
|
if lastError != "" && e.isIgnorableError(lastError) {
|
||||||
@@ -517,18 +566,18 @@ func (e *Engine) executeRestoreWithDecompression(ctx context.Context, archivePat
|
|||||||
if lastError != "" {
|
if lastError != "" {
|
||||||
classification := checks.ClassifyError(lastError)
|
classification := checks.ClassifyError(lastError)
|
||||||
e.log.Error("Restore with decompression failed",
|
e.log.Error("Restore with decompression failed",
|
||||||
"error", err,
|
"error", cmdErr,
|
||||||
"last_stderr", lastError,
|
"last_stderr", lastError,
|
||||||
"error_count", errorCount,
|
"error_count", errorCount,
|
||||||
"error_type", classification.Type,
|
"error_type", classification.Type,
|
||||||
"hint", classification.Hint,
|
"hint", classification.Hint,
|
||||||
"action", classification.Action)
|
"action", classification.Action)
|
||||||
return fmt.Errorf("restore failed: %w (last error: %s, total errors: %d) - %s",
|
return fmt.Errorf("restore failed: %w (last error: %s, total errors: %d) - %s",
|
||||||
err, lastError, errorCount, classification.Hint)
|
cmdErr, lastError, errorCount, classification.Hint)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.log.Error("Restore with decompression failed", "error", err, "last_stderr", lastError, "error_count", errorCount)
|
e.log.Error("Restore with decompression failed", "error", cmdErr, "last_stderr", lastError, "error_count", errorCount)
|
||||||
return fmt.Errorf("restore failed: %w", err)
|
return fmt.Errorf("restore failed: %w", cmdErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -563,7 +612,7 @@ func (e *Engine) previewRestore(archivePath, targetDB string, format ArchiveForm
|
|||||||
fmt.Printf(" 1. Execute: mysql %s < %s\n", targetDB, archivePath)
|
fmt.Printf(" 1. Execute: mysql %s < %s\n", targetDB, archivePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("\n⚠️ WARNING: This will restore data to the target database.")
|
fmt.Println("\n[WARN] WARNING: This will restore data to the target database.")
|
||||||
fmt.Println(" Existing data may be overwritten or merged.")
|
fmt.Println(" Existing data may be overwritten or merged.")
|
||||||
fmt.Println("\nTo execute this restore, add the --confirm flag.")
|
fmt.Println("\nTo execute this restore, add the --confirm flag.")
|
||||||
fmt.Println(strings.Repeat("=", 60) + "\n")
|
fmt.Println(strings.Repeat("=", 60) + "\n")
|
||||||
@@ -594,7 +643,7 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
e.log.Warn("Checksum verification failed", "error", checksumErr)
|
e.log.Warn("Checksum verification failed", "error", checksumErr)
|
||||||
e.log.Warn("Continuing restore without checksum verification (use with caution)")
|
e.log.Warn("Continuing restore without checksum verification (use with caution)")
|
||||||
} else {
|
} else {
|
||||||
e.log.Info("✓ Cluster archive checksum verified successfully")
|
e.log.Info("[OK] Cluster archive checksum verified successfully")
|
||||||
}
|
}
|
||||||
|
|
||||||
format := DetectArchiveFormat(archivePath)
|
format := DetectArchiveFormat(archivePath)
|
||||||
@@ -628,11 +677,12 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
|
|
||||||
e.progress.Start(fmt.Sprintf("Restoring cluster from %s", filepath.Base(archivePath)))
|
e.progress.Start(fmt.Sprintf("Restoring cluster from %s", filepath.Base(archivePath)))
|
||||||
|
|
||||||
// Create temporary extraction directory
|
// Create temporary extraction directory in configured WorkDir
|
||||||
tempDir := filepath.Join(e.cfg.BackupDir, fmt.Sprintf(".restore_%d", time.Now().Unix()))
|
workDir := e.cfg.GetEffectiveWorkDir()
|
||||||
|
tempDir := filepath.Join(workDir, fmt.Sprintf(".restore_%d", time.Now().Unix()))
|
||||||
if err := os.MkdirAll(tempDir, 0755); err != nil {
|
if err := os.MkdirAll(tempDir, 0755); err != nil {
|
||||||
operation.Fail("Failed to create temporary directory")
|
operation.Fail("Failed to create temporary directory")
|
||||||
return fmt.Errorf("failed to create temp directory: %w", err)
|
return fmt.Errorf("failed to create temp directory in %s: %w", workDir, err)
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(tempDir)
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
@@ -653,7 +703,7 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
|
|
||||||
if !isSuperuser {
|
if !isSuperuser {
|
||||||
e.log.Warn("Current user is not a superuser - database ownership may not be fully restored")
|
e.log.Warn("Current user is not a superuser - database ownership may not be fully restored")
|
||||||
e.progress.Update("⚠️ Warning: Non-superuser - ownership restoration limited")
|
e.progress.Update("[WARN] Warning: Non-superuser - ownership restoration limited")
|
||||||
time.Sleep(2 * time.Second) // Give user time to see warning
|
time.Sleep(2 * time.Second) // Give user time to see warning
|
||||||
} else {
|
} else {
|
||||||
e.log.Info("Superuser privileges confirmed - full ownership restoration enabled")
|
e.log.Info("Superuser privileges confirmed - full ownership restoration enabled")
|
||||||
@@ -726,7 +776,7 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
}
|
}
|
||||||
} else if strings.HasSuffix(dumpFile, ".dump") {
|
} else if strings.HasSuffix(dumpFile, ".dump") {
|
||||||
// Validate custom format dumps using pg_restore --list
|
// Validate custom format dumps using pg_restore --list
|
||||||
cmd := exec.Command("pg_restore", "--list", dumpFile)
|
cmd := exec.CommandContext(ctx, "pg_restore", "--list", dumpFile)
|
||||||
output, err := cmd.CombinedOutput()
|
output, err := cmd.CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dbName := strings.TrimSuffix(entry.Name(), ".dump")
|
dbName := strings.TrimSuffix(entry.Name(), ".dump")
|
||||||
@@ -752,8 +802,8 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
if len(corruptedDumps) > 0 {
|
if len(corruptedDumps) > 0 {
|
||||||
operation.Fail("Corrupted dump files detected")
|
operation.Fail("Corrupted dump files detected")
|
||||||
e.progress.Fail(fmt.Sprintf("Found %d corrupted dump files - restore aborted", len(corruptedDumps)))
|
e.progress.Fail(fmt.Sprintf("Found %d corrupted dump files - restore aborted", len(corruptedDumps)))
|
||||||
return fmt.Errorf("pre-validation failed: %d corrupted dump files detected:\n %s\n\nThe backup archive appears to be damaged. You need to restore from a different backup.",
|
return fmt.Errorf("pre-validation failed: %d corrupted dump files detected: %s - the backup archive appears to be damaged, restore from a different backup",
|
||||||
len(corruptedDumps), strings.Join(corruptedDumps, "\n "))
|
len(corruptedDumps), strings.Join(corruptedDumps, ", "))
|
||||||
}
|
}
|
||||||
e.log.Info("All dump files passed validation")
|
e.log.Info("All dump files passed validation")
|
||||||
|
|
||||||
@@ -785,7 +835,7 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
e.log.Warn("Large objects detected in dump files - reducing parallelism to avoid lock contention",
|
e.log.Warn("Large objects detected in dump files - reducing parallelism to avoid lock contention",
|
||||||
"original_parallelism", parallelism,
|
"original_parallelism", parallelism,
|
||||||
"adjusted_parallelism", 1)
|
"adjusted_parallelism", 1)
|
||||||
e.progress.Update("⚠️ Large objects detected - using sequential restore to avoid lock conflicts")
|
e.progress.Update("[WARN] Large objects detected - using sequential restore to avoid lock conflicts")
|
||||||
time.Sleep(2 * time.Second) // Give user time to see warning
|
time.Sleep(2 * time.Second) // Give user time to see warning
|
||||||
parallelism = 1
|
parallelism = 1
|
||||||
}
|
}
|
||||||
@@ -811,6 +861,14 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer func() { <-semaphore }() // Release
|
defer func() { <-semaphore }() // Release
|
||||||
|
|
||||||
|
// Panic recovery - prevent one database failure from crashing entire cluster restore
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
e.log.Error("Panic in database restore goroutine", "file", filename, "panic", r)
|
||||||
|
atomic.AddInt32(&failCount, 1)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Update estimator progress (thread-safe)
|
// Update estimator progress (thread-safe)
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
estimator.UpdateProgress(idx)
|
estimator.UpdateProgress(idx)
|
||||||
@@ -938,16 +996,39 @@ func (e *Engine) extractArchive(ctx context.Context, archivePath, destDir string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Discard stderr output in chunks to prevent memory buildup
|
// Discard stderr output in chunks to prevent memory buildup
|
||||||
buf := make([]byte, 4096)
|
stderrDone := make(chan struct{})
|
||||||
for {
|
go func() {
|
||||||
_, err := stderr.Read(buf)
|
defer close(stderrDone)
|
||||||
if err != nil {
|
buf := make([]byte, 4096)
|
||||||
break
|
for {
|
||||||
|
_, err := stderr.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for command with proper context handling
|
||||||
|
cmdDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.log.Warn("Archive extraction cancelled - killing process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone
|
||||||
|
cmdErr = ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cmd.Wait(); err != nil {
|
<-stderrDone
|
||||||
return fmt.Errorf("tar extraction failed: %w", err)
|
|
||||||
|
if cmdErr != nil {
|
||||||
|
return fmt.Errorf("tar extraction failed: %w", cmdErr)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -980,25 +1061,48 @@ func (e *Engine) restoreGlobals(ctx context.Context, globalsFile string) error {
|
|||||||
return fmt.Errorf("failed to start psql: %w", err)
|
return fmt.Errorf("failed to start psql: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read stderr in chunks
|
// Read stderr in chunks in goroutine
|
||||||
buf := make([]byte, 4096)
|
|
||||||
var lastError string
|
var lastError string
|
||||||
for {
|
stderrDone := make(chan struct{})
|
||||||
n, err := stderr.Read(buf)
|
go func() {
|
||||||
if n > 0 {
|
defer close(stderrDone)
|
||||||
chunk := string(buf[:n])
|
buf := make([]byte, 4096)
|
||||||
if strings.Contains(chunk, "ERROR") || strings.Contains(chunk, "FATAL") {
|
for {
|
||||||
lastError = chunk
|
n, err := stderr.Read(buf)
|
||||||
e.log.Warn("Globals restore stderr", "output", chunk)
|
if n > 0 {
|
||||||
|
chunk := string(buf[:n])
|
||||||
|
if strings.Contains(chunk, "ERROR") || strings.Contains(chunk, "FATAL") {
|
||||||
|
lastError = chunk
|
||||||
|
e.log.Warn("Globals restore stderr", "output", chunk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
}()
|
||||||
break
|
|
||||||
}
|
// Wait for command with proper context handling
|
||||||
|
cmdDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
cmdDone <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var cmdErr error
|
||||||
|
select {
|
||||||
|
case cmdErr = <-cmdDone:
|
||||||
|
// Command completed
|
||||||
|
case <-ctx.Done():
|
||||||
|
e.log.Warn("Globals restore cancelled - killing process")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
<-cmdDone
|
||||||
|
cmdErr = ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cmd.Wait(); err != nil {
|
<-stderrDone
|
||||||
return fmt.Errorf("failed to restore globals: %w (last error: %s)", err, lastError)
|
|
||||||
|
if cmdErr != nil {
|
||||||
|
return fmt.Errorf("failed to restore globals: %w (last error: %s)", cmdErr, lastError)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -1235,7 +1339,7 @@ func (e *Engine) previewClusterRestore(archivePath string) error {
|
|||||||
fmt.Println(" 3. Restore all databases found in archive")
|
fmt.Println(" 3. Restore all databases found in archive")
|
||||||
fmt.Println(" 4. Cleanup temporary files")
|
fmt.Println(" 4. Cleanup temporary files")
|
||||||
|
|
||||||
fmt.Println("\n⚠️ WARNING: This will restore multiple databases.")
|
fmt.Println("\n[WARN] WARNING: This will restore multiple databases.")
|
||||||
fmt.Println(" Existing databases may be overwritten or merged.")
|
fmt.Println(" Existing databases may be overwritten or merged.")
|
||||||
fmt.Println("\nTo execute this restore, add the --confirm flag.")
|
fmt.Println("\nTo execute this restore, add the --confirm flag.")
|
||||||
fmt.Println(strings.Repeat("=", 60) + "\n")
|
fmt.Println(strings.Repeat("=", 60) + "\n")
|
||||||
@@ -1262,7 +1366,8 @@ func (e *Engine) detectLargeObjectsInDumps(dumpsDir string, entries []os.DirEntr
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Use pg_restore -l to list contents (fast, doesn't restore data)
|
// Use pg_restore -l to list contents (fast, doesn't restore data)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
// 2 minutes for large dumps with many objects
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, "pg_restore", "-l", dumpFile)
|
cmd := exec.CommandContext(ctx, "pg_restore", "-l", dumpFile)
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package restore
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -20,43 +21,43 @@ import (
|
|||||||
// RestoreErrorReport contains comprehensive information about a restore failure
|
// RestoreErrorReport contains comprehensive information about a restore failure
|
||||||
type RestoreErrorReport struct {
|
type RestoreErrorReport struct {
|
||||||
// Metadata
|
// Metadata
|
||||||
Timestamp time.Time `json:"timestamp"`
|
Timestamp time.Time `json:"timestamp"`
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
GoVersion string `json:"go_version"`
|
GoVersion string `json:"go_version"`
|
||||||
OS string `json:"os"`
|
OS string `json:"os"`
|
||||||
Arch string `json:"arch"`
|
Arch string `json:"arch"`
|
||||||
|
|
||||||
// Archive info
|
// Archive info
|
||||||
ArchivePath string `json:"archive_path"`
|
ArchivePath string `json:"archive_path"`
|
||||||
ArchiveSize int64 `json:"archive_size"`
|
ArchiveSize int64 `json:"archive_size"`
|
||||||
ArchiveFormat string `json:"archive_format"`
|
ArchiveFormat string `json:"archive_format"`
|
||||||
|
|
||||||
// Database info
|
// Database info
|
||||||
TargetDB string `json:"target_db"`
|
TargetDB string `json:"target_db"`
|
||||||
DatabaseType string `json:"database_type"`
|
DatabaseType string `json:"database_type"`
|
||||||
|
|
||||||
// Error details
|
// Error details
|
||||||
ExitCode int `json:"exit_code"`
|
ExitCode int `json:"exit_code"`
|
||||||
ErrorMessage string `json:"error_message"`
|
ErrorMessage string `json:"error_message"`
|
||||||
ErrorType string `json:"error_type"`
|
ErrorType string `json:"error_type"`
|
||||||
ErrorHint string `json:"error_hint"`
|
ErrorHint string `json:"error_hint"`
|
||||||
TotalErrors int `json:"total_errors"`
|
TotalErrors int `json:"total_errors"`
|
||||||
|
|
||||||
// Captured output
|
// Captured output
|
||||||
LastStderr []string `json:"last_stderr"`
|
LastStderr []string `json:"last_stderr"`
|
||||||
FirstErrors []string `json:"first_errors"`
|
FirstErrors []string `json:"first_errors"`
|
||||||
|
|
||||||
// Context around failure
|
// Context around failure
|
||||||
FailureContext *FailureContext `json:"failure_context,omitempty"`
|
FailureContext *FailureContext `json:"failure_context,omitempty"`
|
||||||
|
|
||||||
// Diagnosis results
|
// Diagnosis results
|
||||||
DiagnosisResult *DiagnoseResult `json:"diagnosis_result,omitempty"`
|
DiagnosisResult *DiagnoseResult `json:"diagnosis_result,omitempty"`
|
||||||
|
|
||||||
// Environment (sanitized)
|
// Environment (sanitized)
|
||||||
PostgresVersion string `json:"postgres_version,omitempty"`
|
PostgresVersion string `json:"postgres_version,omitempty"`
|
||||||
PgRestoreVersion string `json:"pg_restore_version,omitempty"`
|
PgRestoreVersion string `json:"pg_restore_version,omitempty"`
|
||||||
PsqlVersion string `json:"psql_version,omitempty"`
|
PsqlVersion string `json:"psql_version,omitempty"`
|
||||||
|
|
||||||
// Recommendations
|
// Recommendations
|
||||||
Recommendations []string `json:"recommendations"`
|
Recommendations []string `json:"recommendations"`
|
||||||
}
|
}
|
||||||
@@ -67,40 +68,40 @@ type FailureContext struct {
|
|||||||
FailedLine int `json:"failed_line,omitempty"`
|
FailedLine int `json:"failed_line,omitempty"`
|
||||||
FailedStatement string `json:"failed_statement,omitempty"`
|
FailedStatement string `json:"failed_statement,omitempty"`
|
||||||
SurroundingLines []string `json:"surrounding_lines,omitempty"`
|
SurroundingLines []string `json:"surrounding_lines,omitempty"`
|
||||||
|
|
||||||
// For COPY block errors
|
// For COPY block errors
|
||||||
InCopyBlock bool `json:"in_copy_block,omitempty"`
|
InCopyBlock bool `json:"in_copy_block,omitempty"`
|
||||||
CopyTableName string `json:"copy_table_name,omitempty"`
|
CopyTableName string `json:"copy_table_name,omitempty"`
|
||||||
CopyStartLine int `json:"copy_start_line,omitempty"`
|
CopyStartLine int `json:"copy_start_line,omitempty"`
|
||||||
SampleCopyData []string `json:"sample_copy_data,omitempty"`
|
SampleCopyData []string `json:"sample_copy_data,omitempty"`
|
||||||
|
|
||||||
// File position info
|
// File position info
|
||||||
BytePosition int64 `json:"byte_position,omitempty"`
|
BytePosition int64 `json:"byte_position,omitempty"`
|
||||||
PercentComplete float64 `json:"percent_complete,omitempty"`
|
PercentComplete float64 `json:"percent_complete,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrorCollector captures detailed error information during restore
|
// ErrorCollector captures detailed error information during restore
|
||||||
type ErrorCollector struct {
|
type ErrorCollector struct {
|
||||||
log logger.Logger
|
log logger.Logger
|
||||||
cfg *config.Config
|
cfg *config.Config
|
||||||
archivePath string
|
archivePath string
|
||||||
targetDB string
|
targetDB string
|
||||||
format ArchiveFormat
|
format ArchiveFormat
|
||||||
|
|
||||||
// Captured data
|
// Captured data
|
||||||
stderrLines []string
|
stderrLines []string
|
||||||
firstErrors []string
|
firstErrors []string
|
||||||
lastErrors []string
|
lastErrors []string
|
||||||
totalErrors int
|
totalErrors int
|
||||||
exitCode int
|
exitCode int
|
||||||
|
|
||||||
// Limits
|
// Limits
|
||||||
maxStderrLines int
|
maxStderrLines int
|
||||||
maxErrorCapture int
|
maxErrorCapture int
|
||||||
|
|
||||||
// State
|
// State
|
||||||
startTime time.Time
|
startTime time.Time
|
||||||
enabled bool
|
enabled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewErrorCollector creates a new error collector
|
// NewErrorCollector creates a new error collector
|
||||||
@@ -126,30 +127,30 @@ func (ec *ErrorCollector) CaptureStderr(chunk string) {
|
|||||||
if !ec.enabled {
|
if !ec.enabled {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
lines := strings.Split(chunk, "\n")
|
lines := strings.Split(chunk, "\n")
|
||||||
for _, line := range lines {
|
for _, line := range lines {
|
||||||
line = strings.TrimSpace(line)
|
line = strings.TrimSpace(line)
|
||||||
if line == "" {
|
if line == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store last N lines of stderr
|
// Store last N lines of stderr
|
||||||
if len(ec.stderrLines) >= ec.maxStderrLines {
|
if len(ec.stderrLines) >= ec.maxStderrLines {
|
||||||
// Shift array, drop oldest
|
// Shift array, drop oldest
|
||||||
ec.stderrLines = ec.stderrLines[1:]
|
ec.stderrLines = ec.stderrLines[1:]
|
||||||
}
|
}
|
||||||
ec.stderrLines = append(ec.stderrLines, line)
|
ec.stderrLines = append(ec.stderrLines, line)
|
||||||
|
|
||||||
// Check if this is an error line
|
// Check if this is an error line
|
||||||
if isErrorLine(line) {
|
if isErrorLine(line) {
|
||||||
ec.totalErrors++
|
ec.totalErrors++
|
||||||
|
|
||||||
// Capture first N errors
|
// Capture first N errors
|
||||||
if len(ec.firstErrors) < ec.maxErrorCapture {
|
if len(ec.firstErrors) < ec.maxErrorCapture {
|
||||||
ec.firstErrors = append(ec.firstErrors, line)
|
ec.firstErrors = append(ec.firstErrors, line)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Keep last N errors (ring buffer style)
|
// Keep last N errors (ring buffer style)
|
||||||
if len(ec.lastErrors) >= ec.maxErrorCapture {
|
if len(ec.lastErrors) >= ec.maxErrorCapture {
|
||||||
ec.lastErrors = ec.lastErrors[1:]
|
ec.lastErrors = ec.lastErrors[1:]
|
||||||
@@ -184,36 +185,36 @@ func (ec *ErrorCollector) GenerateReport(errMessage string, errType string, errH
|
|||||||
LastStderr: ec.stderrLines,
|
LastStderr: ec.stderrLines,
|
||||||
FirstErrors: ec.firstErrors,
|
FirstErrors: ec.firstErrors,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get archive size
|
// Get archive size
|
||||||
if stat, err := os.Stat(ec.archivePath); err == nil {
|
if stat, err := os.Stat(ec.archivePath); err == nil {
|
||||||
report.ArchiveSize = stat.Size()
|
report.ArchiveSize = stat.Size()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get tool versions
|
// Get tool versions
|
||||||
report.PostgresVersion = getCommandVersion("postgres", "--version")
|
report.PostgresVersion = getCommandVersion("postgres", "--version")
|
||||||
report.PgRestoreVersion = getCommandVersion("pg_restore", "--version")
|
report.PgRestoreVersion = getCommandVersion("pg_restore", "--version")
|
||||||
report.PsqlVersion = getCommandVersion("psql", "--version")
|
report.PsqlVersion = getCommandVersion("psql", "--version")
|
||||||
|
|
||||||
// Analyze failure context
|
// Analyze failure context
|
||||||
report.FailureContext = ec.analyzeFailureContext()
|
report.FailureContext = ec.analyzeFailureContext()
|
||||||
|
|
||||||
// Run diagnosis if not already done
|
// Run diagnosis if not already done
|
||||||
diagnoser := NewDiagnoser(ec.log, false)
|
diagnoser := NewDiagnoser(ec.log, false)
|
||||||
if diagResult, err := diagnoser.DiagnoseFile(ec.archivePath); err == nil {
|
if diagResult, err := diagnoser.DiagnoseFile(ec.archivePath); err == nil {
|
||||||
report.DiagnosisResult = diagResult
|
report.DiagnosisResult = diagResult
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate recommendations
|
// Generate recommendations
|
||||||
report.Recommendations = ec.generateRecommendations(report)
|
report.Recommendations = ec.generateRecommendations(report)
|
||||||
|
|
||||||
return report
|
return report
|
||||||
}
|
}
|
||||||
|
|
||||||
// analyzeFailureContext extracts context around the failure
|
// analyzeFailureContext extracts context around the failure
|
||||||
func (ec *ErrorCollector) analyzeFailureContext() *FailureContext {
|
func (ec *ErrorCollector) analyzeFailureContext() *FailureContext {
|
||||||
ctx := &FailureContext{}
|
ctx := &FailureContext{}
|
||||||
|
|
||||||
// Look for line number in errors
|
// Look for line number in errors
|
||||||
for _, errLine := range ec.lastErrors {
|
for _, errLine := range ec.lastErrors {
|
||||||
if lineNum := extractLineNumber(errLine); lineNum > 0 {
|
if lineNum := extractLineNumber(errLine); lineNum > 0 {
|
||||||
@@ -221,7 +222,7 @@ func (ec *ErrorCollector) analyzeFailureContext() *FailureContext {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look for COPY-related errors
|
// Look for COPY-related errors
|
||||||
for _, errLine := range ec.lastErrors {
|
for _, errLine := range ec.lastErrors {
|
||||||
if strings.Contains(errLine, "COPY") || strings.Contains(errLine, "syntax error") {
|
if strings.Contains(errLine, "COPY") || strings.Contains(errLine, "syntax error") {
|
||||||
@@ -233,12 +234,12 @@ func (ec *ErrorCollector) analyzeFailureContext() *FailureContext {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we have a line number, try to get surrounding context from the dump
|
// If we have a line number, try to get surrounding context from the dump
|
||||||
if ctx.FailedLine > 0 && ec.archivePath != "" {
|
if ctx.FailedLine > 0 && ec.archivePath != "" {
|
||||||
ctx.SurroundingLines = ec.getSurroundingLines(ctx.FailedLine, 5)
|
ctx.SurroundingLines = ec.getSurroundingLines(ctx.FailedLine, 5)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctx
|
return ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -246,13 +247,13 @@ func (ec *ErrorCollector) analyzeFailureContext() *FailureContext {
|
|||||||
func (ec *ErrorCollector) getSurroundingLines(lineNum int, context int) []string {
|
func (ec *ErrorCollector) getSurroundingLines(lineNum int, context int) []string {
|
||||||
var reader io.Reader
|
var reader io.Reader
|
||||||
var lines []string
|
var lines []string
|
||||||
|
|
||||||
file, err := os.Open(ec.archivePath)
|
file, err := os.Open(ec.archivePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
// Handle compressed files
|
// Handle compressed files
|
||||||
if strings.HasSuffix(ec.archivePath, ".gz") {
|
if strings.HasSuffix(ec.archivePath, ".gz") {
|
||||||
gz, err := gzip.NewReader(file)
|
gz, err := gzip.NewReader(file)
|
||||||
@@ -264,19 +265,19 @@ func (ec *ErrorCollector) getSurroundingLines(lineNum int, context int) []string
|
|||||||
} else {
|
} else {
|
||||||
reader = file
|
reader = file
|
||||||
}
|
}
|
||||||
|
|
||||||
scanner := bufio.NewScanner(reader)
|
scanner := bufio.NewScanner(reader)
|
||||||
buf := make([]byte, 0, 1024*1024)
|
buf := make([]byte, 0, 1024*1024)
|
||||||
scanner.Buffer(buf, 10*1024*1024)
|
scanner.Buffer(buf, 10*1024*1024)
|
||||||
|
|
||||||
currentLine := 0
|
currentLine := 0
|
||||||
startLine := lineNum - context
|
startLine := lineNum - context
|
||||||
endLine := lineNum + context
|
endLine := lineNum + context
|
||||||
|
|
||||||
if startLine < 1 {
|
if startLine < 1 {
|
||||||
startLine = 1
|
startLine = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
currentLine++
|
currentLine++
|
||||||
if currentLine >= startLine && currentLine <= endLine {
|
if currentLine >= startLine && currentLine <= endLine {
|
||||||
@@ -290,18 +291,18 @@ func (ec *ErrorCollector) getSurroundingLines(lineNum int, context int) []string
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return lines
|
return lines
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateRecommendations provides actionable recommendations based on the error
|
// generateRecommendations provides actionable recommendations based on the error
|
||||||
func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []string {
|
func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []string {
|
||||||
var recs []string
|
var recs []string
|
||||||
|
|
||||||
// Check diagnosis results
|
// Check diagnosis results
|
||||||
if report.DiagnosisResult != nil {
|
if report.DiagnosisResult != nil {
|
||||||
if report.DiagnosisResult.IsTruncated {
|
if report.DiagnosisResult.IsTruncated {
|
||||||
recs = append(recs,
|
recs = append(recs,
|
||||||
"CRITICAL: Backup file is truncated/incomplete",
|
"CRITICAL: Backup file is truncated/incomplete",
|
||||||
"Action: Re-run the backup for the affected database",
|
"Action: Re-run the backup for the affected database",
|
||||||
"Check: Verify disk space was available during backup",
|
"Check: Verify disk space was available during backup",
|
||||||
@@ -317,14 +318,14 @@ func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []
|
|||||||
}
|
}
|
||||||
if report.DiagnosisResult.Details != nil && report.DiagnosisResult.Details.UnterminatedCopy {
|
if report.DiagnosisResult.Details != nil && report.DiagnosisResult.Details.UnterminatedCopy {
|
||||||
recs = append(recs,
|
recs = append(recs,
|
||||||
fmt.Sprintf("ISSUE: COPY block for table '%s' was not terminated",
|
fmt.Sprintf("ISSUE: COPY block for table '%s' was not terminated",
|
||||||
report.DiagnosisResult.Details.LastCopyTable),
|
report.DiagnosisResult.Details.LastCopyTable),
|
||||||
"Cause: Backup was interrupted during data export",
|
"Cause: Backup was interrupted during data export",
|
||||||
"Action: Re-run backup ensuring it completes fully",
|
"Action: Re-run backup ensuring it completes fully",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check error patterns
|
// Check error patterns
|
||||||
if report.TotalErrors > 1000000 {
|
if report.TotalErrors > 1000000 {
|
||||||
recs = append(recs,
|
recs = append(recs,
|
||||||
@@ -333,7 +334,7 @@ func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []
|
|||||||
"Check: Verify dump format matches restore command",
|
"Check: Verify dump format matches restore command",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for common error types
|
// Check for common error types
|
||||||
errLower := strings.ToLower(report.ErrorMessage)
|
errLower := strings.ToLower(report.ErrorMessage)
|
||||||
if strings.Contains(errLower, "syntax error") {
|
if strings.Contains(errLower, "syntax error") {
|
||||||
@@ -343,7 +344,7 @@ func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []
|
|||||||
"Check: Run 'dbbackup restore diagnose <archive>' for detailed analysis",
|
"Check: Run 'dbbackup restore diagnose <archive>' for detailed analysis",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.Contains(errLower, "permission denied") {
|
if strings.Contains(errLower, "permission denied") {
|
||||||
recs = append(recs,
|
recs = append(recs,
|
||||||
"ISSUE: Permission denied",
|
"ISSUE: Permission denied",
|
||||||
@@ -351,7 +352,7 @@ func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []
|
|||||||
"Action: For ownership preservation, use a superuser account",
|
"Action: For ownership preservation, use a superuser account",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.Contains(errLower, "does not exist") {
|
if strings.Contains(errLower, "does not exist") {
|
||||||
recs = append(recs,
|
recs = append(recs,
|
||||||
"ISSUE: Missing object reference",
|
"ISSUE: Missing object reference",
|
||||||
@@ -359,7 +360,7 @@ func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []
|
|||||||
"Action: Check if target database was created",
|
"Action: Check if target database was created",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(recs) == 0 {
|
if len(recs) == 0 {
|
||||||
recs = append(recs,
|
recs = append(recs,
|
||||||
"Run 'dbbackup restore diagnose <archive>' for detailed analysis",
|
"Run 'dbbackup restore diagnose <archive>' for detailed analysis",
|
||||||
@@ -367,7 +368,7 @@ func (ec *ErrorCollector) generateRecommendations(report *RestoreErrorReport) []
|
|||||||
"Review the PostgreSQL/MySQL logs on the target server",
|
"Review the PostgreSQL/MySQL logs on the target server",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
return recs
|
return recs
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -378,56 +379,56 @@ func (ec *ErrorCollector) SaveReport(report *RestoreErrorReport, outputPath stri
|
|||||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create directory: %w", err)
|
return fmt.Errorf("failed to create directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Marshal to JSON with indentation
|
// Marshal to JSON with indentation
|
||||||
data, err := json.MarshalIndent(report, "", " ")
|
data, err := json.MarshalIndent(report, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to marshal report: %w", err)
|
return fmt.Errorf("failed to marshal report: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write file
|
// Write file
|
||||||
if err := os.WriteFile(outputPath, data, 0644); err != nil {
|
if err := os.WriteFile(outputPath, data, 0644); err != nil {
|
||||||
return fmt.Errorf("failed to write report: %w", err)
|
return fmt.Errorf("failed to write report: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrintReport prints a human-readable summary of the error report
|
// PrintReport prints a human-readable summary of the error report
|
||||||
func (ec *ErrorCollector) PrintReport(report *RestoreErrorReport) {
|
func (ec *ErrorCollector) PrintReport(report *RestoreErrorReport) {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println(strings.Repeat("═", 70))
|
fmt.Println(strings.Repeat("=", 70))
|
||||||
fmt.Println(" 🔴 RESTORE ERROR REPORT")
|
fmt.Println(" [ERROR] RESTORE ERROR REPORT")
|
||||||
fmt.Println(strings.Repeat("═", 70))
|
fmt.Println(strings.Repeat("=", 70))
|
||||||
|
|
||||||
fmt.Printf("\n📅 Timestamp: %s\n", report.Timestamp.Format("2006-01-02 15:04:05"))
|
fmt.Printf("\n[TIME] Timestamp: %s\n", report.Timestamp.Format("2006-01-02 15:04:05"))
|
||||||
fmt.Printf("📦 Archive: %s\n", filepath.Base(report.ArchivePath))
|
fmt.Printf("[FILE] Archive: %s\n", filepath.Base(report.ArchivePath))
|
||||||
fmt.Printf("📊 Format: %s\n", report.ArchiveFormat)
|
fmt.Printf("[FMT] Format: %s\n", report.ArchiveFormat)
|
||||||
fmt.Printf("🎯 Target DB: %s\n", report.TargetDB)
|
fmt.Printf("[TGT] Target DB: %s\n", report.TargetDB)
|
||||||
fmt.Printf("⚠️ Exit Code: %d\n", report.ExitCode)
|
fmt.Printf("[CODE] Exit Code: %d\n", report.ExitCode)
|
||||||
fmt.Printf("❌ Total Errors: %d\n", report.TotalErrors)
|
fmt.Printf("[ERR] Total Errors: %d\n", report.TotalErrors)
|
||||||
|
|
||||||
fmt.Println("\n" + strings.Repeat("─", 70))
|
fmt.Println("\n" + strings.Repeat("-", 70))
|
||||||
fmt.Println("ERROR DETAILS:")
|
fmt.Println("ERROR DETAILS:")
|
||||||
fmt.Println(strings.Repeat("─", 70))
|
fmt.Println(strings.Repeat("-", 70))
|
||||||
|
|
||||||
fmt.Printf("\nType: %s\n", report.ErrorType)
|
fmt.Printf("\nType: %s\n", report.ErrorType)
|
||||||
fmt.Printf("Message: %s\n", report.ErrorMessage)
|
fmt.Printf("Message: %s\n", report.ErrorMessage)
|
||||||
if report.ErrorHint != "" {
|
if report.ErrorHint != "" {
|
||||||
fmt.Printf("Hint: %s\n", report.ErrorHint)
|
fmt.Printf("Hint: %s\n", report.ErrorHint)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Show failure context
|
// Show failure context
|
||||||
if report.FailureContext != nil && report.FailureContext.FailedLine > 0 {
|
if report.FailureContext != nil && report.FailureContext.FailedLine > 0 {
|
||||||
fmt.Println("\n" + strings.Repeat("─", 70))
|
fmt.Println("\n" + strings.Repeat("-", 70))
|
||||||
fmt.Println("FAILURE CONTEXT:")
|
fmt.Println("FAILURE CONTEXT:")
|
||||||
fmt.Println(strings.Repeat("─", 70))
|
fmt.Println(strings.Repeat("-", 70))
|
||||||
|
|
||||||
fmt.Printf("\nFailed at line: %d\n", report.FailureContext.FailedLine)
|
fmt.Printf("\nFailed at line: %d\n", report.FailureContext.FailedLine)
|
||||||
if report.FailureContext.InCopyBlock {
|
if report.FailureContext.InCopyBlock {
|
||||||
fmt.Printf("Inside COPY block for table: %s\n", report.FailureContext.CopyTableName)
|
fmt.Printf("Inside COPY block for table: %s\n", report.FailureContext.CopyTableName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(report.FailureContext.SurroundingLines) > 0 {
|
if len(report.FailureContext.SurroundingLines) > 0 {
|
||||||
fmt.Println("\nSurrounding lines:")
|
fmt.Println("\nSurrounding lines:")
|
||||||
for _, line := range report.FailureContext.SurroundingLines {
|
for _, line := range report.FailureContext.SurroundingLines {
|
||||||
@@ -435,13 +436,13 @@ func (ec *ErrorCollector) PrintReport(report *RestoreErrorReport) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Show first few errors
|
// Show first few errors
|
||||||
if len(report.FirstErrors) > 0 {
|
if len(report.FirstErrors) > 0 {
|
||||||
fmt.Println("\n" + strings.Repeat("─", 70))
|
fmt.Println("\n" + strings.Repeat("-", 70))
|
||||||
fmt.Println("FIRST ERRORS:")
|
fmt.Println("FIRST ERRORS:")
|
||||||
fmt.Println(strings.Repeat("─", 70))
|
fmt.Println(strings.Repeat("-", 70))
|
||||||
|
|
||||||
for i, err := range report.FirstErrors {
|
for i, err := range report.FirstErrors {
|
||||||
if i >= 5 {
|
if i >= 5 {
|
||||||
fmt.Printf("... and %d more\n", len(report.FirstErrors)-5)
|
fmt.Printf("... and %d more\n", len(report.FirstErrors)-5)
|
||||||
@@ -450,18 +451,18 @@ func (ec *ErrorCollector) PrintReport(report *RestoreErrorReport) {
|
|||||||
fmt.Printf(" %d. %s\n", i+1, truncateString(err, 100))
|
fmt.Printf(" %d. %s\n", i+1, truncateString(err, 100))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Show diagnosis summary
|
// Show diagnosis summary
|
||||||
if report.DiagnosisResult != nil && !report.DiagnosisResult.IsValid {
|
if report.DiagnosisResult != nil && !report.DiagnosisResult.IsValid {
|
||||||
fmt.Println("\n" + strings.Repeat("─", 70))
|
fmt.Println("\n" + strings.Repeat("-", 70))
|
||||||
fmt.Println("DIAGNOSIS:")
|
fmt.Println("DIAGNOSIS:")
|
||||||
fmt.Println(strings.Repeat("─", 70))
|
fmt.Println(strings.Repeat("-", 70))
|
||||||
|
|
||||||
if report.DiagnosisResult.IsTruncated {
|
if report.DiagnosisResult.IsTruncated {
|
||||||
fmt.Println(" ❌ File is TRUNCATED")
|
fmt.Println(" [FAIL] File is TRUNCATED")
|
||||||
}
|
}
|
||||||
if report.DiagnosisResult.IsCorrupted {
|
if report.DiagnosisResult.IsCorrupted {
|
||||||
fmt.Println(" ❌ File is CORRUPTED")
|
fmt.Println(" [FAIL] File is CORRUPTED")
|
||||||
}
|
}
|
||||||
for i, err := range report.DiagnosisResult.Errors {
|
for i, err := range report.DiagnosisResult.Errors {
|
||||||
if i >= 3 {
|
if i >= 3 {
|
||||||
@@ -470,21 +471,21 @@ func (ec *ErrorCollector) PrintReport(report *RestoreErrorReport) {
|
|||||||
fmt.Printf(" • %s\n", err)
|
fmt.Printf(" • %s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Show recommendations
|
// Show recommendations
|
||||||
fmt.Println("\n" + strings.Repeat("─", 70))
|
fmt.Println("\n" + strings.Repeat("-", 70))
|
||||||
fmt.Println("💡 RECOMMENDATIONS:")
|
fmt.Println("[HINT] RECOMMENDATIONS:")
|
||||||
fmt.Println(strings.Repeat("─", 70))
|
fmt.Println(strings.Repeat("-", 70))
|
||||||
|
|
||||||
for _, rec := range report.Recommendations {
|
for _, rec := range report.Recommendations {
|
||||||
fmt.Printf(" • %s\n", rec)
|
fmt.Printf(" - %s\n", rec)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Show tool versions
|
// Show tool versions
|
||||||
fmt.Println("\n" + strings.Repeat("─", 70))
|
fmt.Println("\n" + strings.Repeat("-", 70))
|
||||||
fmt.Println("ENVIRONMENT:")
|
fmt.Println("ENVIRONMENT:")
|
||||||
fmt.Println(strings.Repeat("─", 70))
|
fmt.Println(strings.Repeat("-", 70))
|
||||||
|
|
||||||
fmt.Printf(" OS: %s/%s\n", report.OS, report.Arch)
|
fmt.Printf(" OS: %s/%s\n", report.OS, report.Arch)
|
||||||
fmt.Printf(" Go: %s\n", report.GoVersion)
|
fmt.Printf(" Go: %s\n", report.GoVersion)
|
||||||
if report.PgRestoreVersion != "" {
|
if report.PgRestoreVersion != "" {
|
||||||
@@ -493,15 +494,15 @@ func (ec *ErrorCollector) PrintReport(report *RestoreErrorReport) {
|
|||||||
if report.PsqlVersion != "" {
|
if report.PsqlVersion != "" {
|
||||||
fmt.Printf(" psql: %s\n", report.PsqlVersion)
|
fmt.Printf(" psql: %s\n", report.PsqlVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(strings.Repeat("═", 70))
|
fmt.Println(strings.Repeat("=", 70))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper functions
|
// Helper functions
|
||||||
|
|
||||||
func isErrorLine(line string) bool {
|
func isErrorLine(line string) bool {
|
||||||
return strings.Contains(line, "ERROR:") ||
|
return strings.Contains(line, "ERROR:") ||
|
||||||
strings.Contains(line, "FATAL:") ||
|
strings.Contains(line, "FATAL:") ||
|
||||||
strings.Contains(line, "error:") ||
|
strings.Contains(line, "error:") ||
|
||||||
strings.Contains(line, "PANIC:")
|
strings.Contains(line, "PANIC:")
|
||||||
}
|
}
|
||||||
@@ -556,7 +557,11 @@ func getDatabaseType(format ArchiveFormat) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getCommandVersion(cmd string, arg string) string {
|
func getCommandVersion(cmd string, arg string) string {
|
||||||
output, err := exec.Command(cmd, arg).CombinedOutput()
|
// Use timeout to prevent blocking if command hangs
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
output, err := exec.CommandContext(ctx, cmd, arg).CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
"dbbackup/internal/database"
|
"dbbackup/internal/database"
|
||||||
)
|
)
|
||||||
@@ -47,8 +48,13 @@ func ParsePostgreSQLVersion(versionStr string) (*VersionInfo, error) {
|
|||||||
|
|
||||||
// GetDumpFileVersion extracts the PostgreSQL version from a dump file
|
// GetDumpFileVersion extracts the PostgreSQL version from a dump file
|
||||||
// Uses pg_restore -l to read the dump metadata
|
// Uses pg_restore -l to read the dump metadata
|
||||||
|
// Uses a 30-second timeout to avoid blocking on large files
|
||||||
func GetDumpFileVersion(dumpPath string) (*VersionInfo, error) {
|
func GetDumpFileVersion(dumpPath string) (*VersionInfo, error) {
|
||||||
cmd := exec.Command("pg_restore", "-l", dumpPath)
|
// Use a timeout context to prevent blocking on very large dump files
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, "pg_restore", "-l", dumpPath)
|
||||||
output, err := cmd.CombinedOutput()
|
output, err := cmd.CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to read dump file metadata: %w (output: %s)", err, string(output))
|
return nil, fmt.Errorf("failed to read dump file metadata: %w (output: %s)", err, string(output))
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ func (pc *PrivilegeChecker) CheckAndWarn(allowRoot bool) error {
|
|||||||
isRoot, user := pc.isRunningAsRoot()
|
isRoot, user := pc.isRunningAsRoot()
|
||||||
|
|
||||||
if isRoot {
|
if isRoot {
|
||||||
pc.log.Warn("⚠️ Running with elevated privileges (root/Administrator)")
|
pc.log.Warn("[WARN] Running with elevated privileges (root/Administrator)")
|
||||||
pc.log.Warn("Security recommendation: Create a dedicated backup user with minimal privileges")
|
pc.log.Warn("Security recommendation: Create a dedicated backup user with minimal privileges")
|
||||||
|
|
||||||
if !allowRoot {
|
if !allowRoot {
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ func (rc *ResourceChecker) ValidateResourcesForBackup(estimatedSize int64) error
|
|||||||
|
|
||||||
if len(warnings) > 0 {
|
if len(warnings) > 0 {
|
||||||
for _, warning := range warnings {
|
for _, warning := range warnings {
|
||||||
rc.log.Warn("⚠️ Resource constraint: " + warning)
|
rc.log.Warn("[WARN] Resource constraint: " + warning)
|
||||||
}
|
}
|
||||||
rc.log.Info("Continuing backup operation (warnings are informational)")
|
rc.log.Info("Continuing backup operation (warnings are informational)")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ func (rc *ResourceChecker) checkPlatformLimits() (*ResourceLimits, error) {
|
|||||||
rc.log.Debug("Resource limit: max open files", "limit", rLimit.Cur, "max", rLimit.Max)
|
rc.log.Debug("Resource limit: max open files", "limit", rLimit.Cur, "max", rLimit.Max)
|
||||||
|
|
||||||
if rLimit.Cur < 1024 {
|
if rLimit.Cur < 1024 {
|
||||||
rc.log.Warn("⚠️ Low file descriptor limit detected",
|
rc.log.Warn("[WARN] Low file descriptor limit detected",
|
||||||
"current", rLimit.Cur,
|
"current", rLimit.Cur,
|
||||||
"recommended", 4096,
|
"recommended", 4096,
|
||||||
"hint", "Increase with: ulimit -n 4096")
|
"hint", "Increase with: ulimit -n 4096")
|
||||||
|
|||||||
@@ -209,12 +209,12 @@ func (m ArchiveBrowserModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
|
|
||||||
// Validate selection based on mode
|
// Validate selection based on mode
|
||||||
if m.mode == "restore-cluster" && !selected.Format.IsClusterBackup() {
|
if m.mode == "restore-cluster" && !selected.Format.IsClusterBackup() {
|
||||||
m.message = errorStyle.Render("❌ Please select a cluster backup (.tar.gz)")
|
m.message = errorStyle.Render("[FAIL] Please select a cluster backup (.tar.gz)")
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.mode == "restore-single" && selected.Format.IsClusterBackup() {
|
if m.mode == "restore-single" && selected.Format.IsClusterBackup() {
|
||||||
m.message = errorStyle.Render("❌ Please select a single database backup")
|
m.message = errorStyle.Render("[FAIL] Please select a single database backup")
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -227,7 +227,7 @@ func (m ArchiveBrowserModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
// Show detailed info
|
// Show detailed info
|
||||||
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
||||||
selected := m.archives[m.cursor]
|
selected := m.archives[m.cursor]
|
||||||
m.message = fmt.Sprintf("📦 %s | Format: %s | Size: %s | Modified: %s",
|
m.message = fmt.Sprintf("[PKG] %s | Format: %s | Size: %s | Modified: %s",
|
||||||
selected.Name,
|
selected.Name,
|
||||||
selected.Format.String(),
|
selected.Format.String(),
|
||||||
formatSize(selected.Size),
|
formatSize(selected.Size),
|
||||||
@@ -251,13 +251,13 @@ func (m ArchiveBrowserModel) View() string {
|
|||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
// Header
|
// Header
|
||||||
title := "📦 Backup Archives"
|
title := "[PKG] Backup Archives"
|
||||||
if m.mode == "restore-single" {
|
if m.mode == "restore-single" {
|
||||||
title = "📦 Select Archive to Restore (Single Database)"
|
title = "[PKG] Select Archive to Restore (Single Database)"
|
||||||
} else if m.mode == "restore-cluster" {
|
} else if m.mode == "restore-cluster" {
|
||||||
title = "📦 Select Archive to Restore (Cluster)"
|
title = "[PKG] Select Archive to Restore (Cluster)"
|
||||||
} else if m.mode == "diagnose" {
|
} else if m.mode == "diagnose" {
|
||||||
title = "🔍 Select Archive to Diagnose"
|
title = "[SEARCH] Select Archive to Diagnose"
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString(titleStyle.Render(title))
|
s.WriteString(titleStyle.Render(title))
|
||||||
@@ -269,7 +269,7 @@ func (m ArchiveBrowserModel) View() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(errorStyle.Render(fmt.Sprintf("❌ Error: %v", m.err)))
|
s.WriteString(errorStyle.Render(fmt.Sprintf("[FAIL] Error: %v", m.err)))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(infoStyle.Render("Press Esc to go back"))
|
s.WriteString(infoStyle.Render("Press Esc to go back"))
|
||||||
return s.String()
|
return s.String()
|
||||||
@@ -293,7 +293,7 @@ func (m ArchiveBrowserModel) View() string {
|
|||||||
s.WriteString(archiveHeaderStyle.Render(fmt.Sprintf("%-40s %-25s %-12s %-20s",
|
s.WriteString(archiveHeaderStyle.Render(fmt.Sprintf("%-40s %-25s %-12s %-20s",
|
||||||
"FILENAME", "FORMAT", "SIZE", "MODIFIED")))
|
"FILENAME", "FORMAT", "SIZE", "MODIFIED")))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(strings.Repeat("─", 100))
|
s.WriteString(strings.Repeat("-", 100))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
// Show archives (limit to visible area)
|
// Show archives (limit to visible area)
|
||||||
@@ -317,13 +317,13 @@ func (m ArchiveBrowserModel) View() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Color code based on validity and age
|
// Color code based on validity and age
|
||||||
statusIcon := "✓"
|
statusIcon := "[+]"
|
||||||
if !archive.Valid {
|
if !archive.Valid {
|
||||||
statusIcon = "✗"
|
statusIcon = "[-]"
|
||||||
style = archiveInvalidStyle
|
style = archiveInvalidStyle
|
||||||
} else if time.Since(archive.Modified) > 30*24*time.Hour {
|
} else if time.Since(archive.Modified) > 30*24*time.Hour {
|
||||||
style = archiveOldStyle
|
style = archiveOldStyle
|
||||||
statusIcon = "⚠"
|
statusIcon = "[WARN]"
|
||||||
}
|
}
|
||||||
|
|
||||||
filename := truncate(archive.Name, 38)
|
filename := truncate(archive.Name, 38)
|
||||||
@@ -351,7 +351,7 @@ func (m ArchiveBrowserModel) View() string {
|
|||||||
s.WriteString(infoStyle.Render(fmt.Sprintf("Total: %d archive(s) | Selected: %d/%d",
|
s.WriteString(infoStyle.Render(fmt.Sprintf("Total: %d archive(s) | Selected: %d/%d",
|
||||||
len(m.archives), m.cursor+1, len(m.archives))))
|
len(m.archives), m.cursor+1, len(m.archives))))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(infoStyle.Render("⌨️ ↑/↓: Navigate | Enter: Select | d: Diagnose | f: Filter | i: Info | Esc: Back"))
|
s.WriteString(infoStyle.Render("[KEY] ↑/↓: Navigate | Enter: Select | d: Diagnose | f: Filter | i: Info | Esc: Back"))
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -83,10 +83,10 @@ type backupCompleteMsg struct {
|
|||||||
|
|
||||||
func executeBackupWithTUIProgress(parentCtx context.Context, cfg *config.Config, log logger.Logger, backupType, dbName string, ratio int) tea.Cmd {
|
func executeBackupWithTUIProgress(parentCtx context.Context, cfg *config.Config, log logger.Logger, backupType, dbName string, ratio int) tea.Cmd {
|
||||||
return func() tea.Msg {
|
return func() tea.Msg {
|
||||||
// Use configurable cluster timeout (minutes) from config; default set in config.New()
|
// NO TIMEOUT for backup operations - a backup takes as long as it takes
|
||||||
// Use parent context to inherit cancellation from TUI
|
// Large databases can take many hours
|
||||||
clusterTimeout := time.Duration(cfg.ClusterTimeoutMinutes) * time.Minute
|
// Only manual cancellation (Ctrl+C) should stop the backup
|
||||||
ctx, cancel := context.WithTimeout(parentCtx, clusterTimeout)
|
ctx, cancel := context.WithCancel(parentCtx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
@@ -136,11 +136,11 @@ func executeBackupWithTUIProgress(parentCtx context.Context, cfg *config.Config,
|
|||||||
var result string
|
var result string
|
||||||
switch backupType {
|
switch backupType {
|
||||||
case "single":
|
case "single":
|
||||||
result = fmt.Sprintf("✓ Single database backup of '%s' completed successfully in %v", dbName, elapsed)
|
result = fmt.Sprintf("[+] Single database backup of '%s' completed successfully in %v", dbName, elapsed)
|
||||||
case "sample":
|
case "sample":
|
||||||
result = fmt.Sprintf("✓ Sample backup of '%s' (ratio: %d) completed successfully in %v", dbName, ratio, elapsed)
|
result = fmt.Sprintf("[+] Sample backup of '%s' (ratio: %d) completed successfully in %v", dbName, ratio, elapsed)
|
||||||
case "cluster":
|
case "cluster":
|
||||||
result = fmt.Sprintf("✓ Cluster backup completed successfully in %v", elapsed)
|
result = fmt.Sprintf("[+] Cluster backup completed successfully in %v", elapsed)
|
||||||
}
|
}
|
||||||
|
|
||||||
return backupCompleteMsg{
|
return backupCompleteMsg{
|
||||||
@@ -200,9 +200,9 @@ func (m BackupExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
m.err = msg.err
|
m.err = msg.err
|
||||||
m.result = msg.result
|
m.result = msg.result
|
||||||
if m.err == nil {
|
if m.err == nil {
|
||||||
m.status = "✅ Backup completed successfully!"
|
m.status = "[OK] Backup completed successfully!"
|
||||||
} else {
|
} else {
|
||||||
m.status = fmt.Sprintf("❌ Backup failed: %v", m.err)
|
m.status = fmt.Sprintf("[FAIL] Backup failed: %v", m.err)
|
||||||
}
|
}
|
||||||
// Auto-forward in debug/auto-confirm mode
|
// Auto-forward in debug/auto-confirm mode
|
||||||
if m.config.TUIAutoConfirm {
|
if m.config.TUIAutoConfirm {
|
||||||
@@ -216,7 +216,7 @@ func (m BackupExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
if !m.done && !m.cancelling {
|
if !m.done && !m.cancelling {
|
||||||
// User requested cancellation - cancel the context
|
// User requested cancellation - cancel the context
|
||||||
m.cancelling = true
|
m.cancelling = true
|
||||||
m.status = "⏹️ Cancelling backup... (please wait)"
|
m.status = "[STOP] Cancelling backup... (please wait)"
|
||||||
if m.cancel != nil {
|
if m.cancel != nil {
|
||||||
m.cancel()
|
m.cancel()
|
||||||
}
|
}
|
||||||
@@ -240,7 +240,7 @@ func (m BackupExecutionModel) View() string {
|
|||||||
|
|
||||||
// Clear screen with newlines and render header
|
// Clear screen with newlines and render header
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
header := titleStyle.Render("🔄 Backup Execution")
|
header := titleStyle.Render("[EXEC] Backup Execution")
|
||||||
s.WriteString(header)
|
s.WriteString(header)
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
@@ -261,13 +261,13 @@ func (m BackupExecutionModel) View() string {
|
|||||||
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
||||||
s.WriteString("\n ⌨️ Press Ctrl+C or ESC to cancel\n")
|
s.WriteString("\n [KEY] Press Ctrl+C or ESC to cancel\n")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(fmt.Sprintf(" %s\n\n", m.status))
|
s.WriteString(fmt.Sprintf(" %s\n\n", m.status))
|
||||||
|
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(fmt.Sprintf(" ❌ Error: %v\n", m.err))
|
s.WriteString(fmt.Sprintf(" [FAIL] Error: %v\n", m.err))
|
||||||
} else if m.result != "" {
|
} else if m.result != "" {
|
||||||
// Parse and display result cleanly
|
// Parse and display result cleanly
|
||||||
lines := strings.Split(m.result, "\n")
|
lines := strings.Split(m.result, "\n")
|
||||||
@@ -278,7 +278,7 @@ func (m BackupExecutionModel) View() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.WriteString("\n ⌨️ Press Enter or ESC to return to menu\n")
|
s.WriteString("\n [KEY] Press Enter or ESC to return to menu\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ func (m BackupManagerModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
// Verify archive
|
// Verify archive
|
||||||
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
||||||
selected := m.archives[m.cursor]
|
selected := m.archives[m.cursor]
|
||||||
m.message = fmt.Sprintf("🔍 Verifying %s...", selected.Name)
|
m.message = fmt.Sprintf("[SEARCH] Verifying %s...", selected.Name)
|
||||||
// In real implementation, would run verification
|
// In real implementation, would run verification
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,16 +96,16 @@ func (m BackupManagerModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
selected := m.archives[m.cursor]
|
selected := m.archives[m.cursor]
|
||||||
archivePath := selected.Path
|
archivePath := selected.Path
|
||||||
confirm := NewConfirmationModelWithAction(m.config, m.logger, m,
|
confirm := NewConfirmationModelWithAction(m.config, m.logger, m,
|
||||||
"🗑️ Delete Archive",
|
"[DELETE] Delete Archive",
|
||||||
fmt.Sprintf("Delete archive '%s'? This cannot be undone.", selected.Name),
|
fmt.Sprintf("Delete archive '%s'? This cannot be undone.", selected.Name),
|
||||||
func() (tea.Model, tea.Cmd) {
|
func() (tea.Model, tea.Cmd) {
|
||||||
// Delete the archive
|
// Delete the archive
|
||||||
err := deleteArchive(archivePath)
|
err := deleteArchive(archivePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.err = fmt.Errorf("failed to delete archive: %v", err)
|
m.err = fmt.Errorf("failed to delete archive: %v", err)
|
||||||
m.message = fmt.Sprintf("❌ Failed to delete: %v", err)
|
m.message = fmt.Sprintf("[FAIL] Failed to delete: %v", err)
|
||||||
} else {
|
} else {
|
||||||
m.message = fmt.Sprintf("✅ Deleted: %s", selected.Name)
|
m.message = fmt.Sprintf("[OK] Deleted: %s", selected.Name)
|
||||||
}
|
}
|
||||||
// Refresh the archive list
|
// Refresh the archive list
|
||||||
m.loading = true
|
m.loading = true
|
||||||
@@ -118,7 +118,7 @@ func (m BackupManagerModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
// Show info
|
// Show info
|
||||||
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
if len(m.archives) > 0 && m.cursor < len(m.archives) {
|
||||||
selected := m.archives[m.cursor]
|
selected := m.archives[m.cursor]
|
||||||
m.message = fmt.Sprintf("📦 %s | %s | %s | Modified: %s",
|
m.message = fmt.Sprintf("[PKG] %s | %s | %s | Modified: %s",
|
||||||
selected.Name,
|
selected.Name,
|
||||||
selected.Format.String(),
|
selected.Format.String(),
|
||||||
formatSize(selected.Size),
|
formatSize(selected.Size),
|
||||||
@@ -152,7 +152,7 @@ func (m BackupManagerModel) View() string {
|
|||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
// Title
|
// Title
|
||||||
s.WriteString(titleStyle.Render("🗄️ Backup Archive Manager"))
|
s.WriteString(titleStyle.Render("[DB] Backup Archive Manager"))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
if m.loading {
|
if m.loading {
|
||||||
@@ -161,7 +161,7 @@ func (m BackupManagerModel) View() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(errorStyle.Render(fmt.Sprintf("❌ Error: %v", m.err)))
|
s.WriteString(errorStyle.Render(fmt.Sprintf("[FAIL] Error: %v", m.err)))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(infoStyle.Render("Press Esc to go back"))
|
s.WriteString(infoStyle.Render("Press Esc to go back"))
|
||||||
return s.String()
|
return s.String()
|
||||||
@@ -184,7 +184,7 @@ func (m BackupManagerModel) View() string {
|
|||||||
s.WriteString(archiveHeaderStyle.Render(fmt.Sprintf("%-35s %-25s %-12s %-20s",
|
s.WriteString(archiveHeaderStyle.Render(fmt.Sprintf("%-35s %-25s %-12s %-20s",
|
||||||
"FILENAME", "FORMAT", "SIZE", "MODIFIED")))
|
"FILENAME", "FORMAT", "SIZE", "MODIFIED")))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(strings.Repeat("─", 95))
|
s.WriteString(strings.Repeat("-", 95))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
// Show archives (limit to visible area)
|
// Show archives (limit to visible area)
|
||||||
@@ -208,12 +208,12 @@ func (m BackupManagerModel) View() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Status icon
|
// Status icon
|
||||||
statusIcon := "✓"
|
statusIcon := "[+]"
|
||||||
if !archive.Valid {
|
if !archive.Valid {
|
||||||
statusIcon = "✗"
|
statusIcon = "[-]"
|
||||||
style = archiveInvalidStyle
|
style = archiveInvalidStyle
|
||||||
} else if time.Since(archive.Modified) > 30*24*time.Hour {
|
} else if time.Since(archive.Modified) > 30*24*time.Hour {
|
||||||
statusIcon = "⚠"
|
statusIcon = "[WARN]"
|
||||||
}
|
}
|
||||||
|
|
||||||
filename := truncate(archive.Name, 33)
|
filename := truncate(archive.Name, 33)
|
||||||
@@ -240,7 +240,7 @@ func (m BackupManagerModel) View() string {
|
|||||||
|
|
||||||
s.WriteString(infoStyle.Render(fmt.Sprintf("Selected: %d/%d", m.cursor+1, len(m.archives))))
|
s.WriteString(infoStyle.Render(fmt.Sprintf("Selected: %d/%d", m.cursor+1, len(m.archives))))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(infoStyle.Render("⌨️ ↑/↓: Navigate | r: Restore | v: Verify | d: Delete | i: Info | R: Refresh | Esc: Back"))
|
s.WriteString(infoStyle.Render("[KEY] ↑/↓: Navigate | r: Restore | v: Verify | d: Delete | i: Info | R: Refresh | Esc: Back"))
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -67,7 +67,6 @@ func (m ConfirmationModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
switch msg := msg.(type) {
|
switch msg := msg.(type) {
|
||||||
case autoConfirmMsg:
|
case autoConfirmMsg:
|
||||||
// Auto-confirm triggered
|
// Auto-confirm triggered
|
||||||
m.confirmed = true
|
|
||||||
if m.onConfirm != nil {
|
if m.onConfirm != nil {
|
||||||
return m.onConfirm()
|
return m.onConfirm()
|
||||||
}
|
}
|
||||||
@@ -95,7 +94,6 @@ func (m ConfirmationModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
|
|
||||||
case "enter", "y":
|
case "enter", "y":
|
||||||
if msg.String() == "y" || m.cursor == 0 {
|
if msg.String() == "y" || m.cursor == 0 {
|
||||||
m.confirmed = true
|
|
||||||
// Execute the onConfirm callback if provided
|
// Execute the onConfirm callback if provided
|
||||||
if m.onConfirm != nil {
|
if m.onConfirm != nil {
|
||||||
return m.onConfirm()
|
return m.onConfirm()
|
||||||
@@ -131,7 +129,7 @@ func (m ConfirmationModel) View() string {
|
|||||||
s.WriteString(" ")
|
s.WriteString(" ")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString("\n\n⌨️ ←/→: Select • Enter/y: Confirm • n/ESC: Cancel\n")
|
s.WriteString("\n\n[KEYS] <-/->: Select | Enter/y: Confirm | n/ESC: Cancel\n")
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,7 +53,8 @@ type databaseListMsg struct {
|
|||||||
|
|
||||||
func fetchDatabases(cfg *config.Config, log logger.Logger) tea.Cmd {
|
func fetchDatabases(cfg *config.Config, log logger.Logger) tea.Cmd {
|
||||||
return func() tea.Msg {
|
return func() tea.Msg {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
// 60 seconds for database listing - busy servers may be slow
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
dbClient, err := database.New(cfg, log)
|
dbClient, err := database.New(cfg, log)
|
||||||
@@ -108,7 +109,7 @@ func (m DatabaseSelectorModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
return executor, executor.Init()
|
return executor, executor.Init()
|
||||||
}
|
}
|
||||||
inputModel := NewInputModel(m.config, m.logger, m,
|
inputModel := NewInputModel(m.config, m.logger, m,
|
||||||
"📊 Sample Ratio",
|
"[STATS] Sample Ratio",
|
||||||
"Enter sample ratio (1-100):",
|
"Enter sample ratio (1-100):",
|
||||||
"10",
|
"10",
|
||||||
ValidateInt(1, 100))
|
ValidateInt(1, 100))
|
||||||
@@ -151,7 +152,7 @@ func (m DatabaseSelectorModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
// If sample backup, ask for ratio first
|
// If sample backup, ask for ratio first
|
||||||
if m.backupType == "sample" {
|
if m.backupType == "sample" {
|
||||||
inputModel := NewInputModel(m.config, m.logger, m,
|
inputModel := NewInputModel(m.config, m.logger, m,
|
||||||
"📊 Sample Ratio",
|
"[STATS] Sample Ratio",
|
||||||
"Enter sample ratio (1-100):",
|
"Enter sample ratio (1-100):",
|
||||||
"10",
|
"10",
|
||||||
ValidateInt(1, 100))
|
ValidateInt(1, 100))
|
||||||
@@ -175,12 +176,12 @@ func (m DatabaseSelectorModel) View() string {
|
|||||||
s.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
s.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
||||||
|
|
||||||
if m.loading {
|
if m.loading {
|
||||||
s.WriteString("⏳ Loading databases...\n")
|
s.WriteString("[WAIT] Loading databases...\n")
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(fmt.Sprintf("❌ Error: %v\n", m.err))
|
s.WriteString(fmt.Sprintf("[FAIL] Error: %v\n", m.err))
|
||||||
s.WriteString("\nPress ESC to go back\n")
|
s.WriteString("\nPress ESC to go back\n")
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
@@ -202,7 +203,7 @@ func (m DatabaseSelectorModel) View() string {
|
|||||||
s.WriteString(fmt.Sprintf("\n%s\n", m.message))
|
s.WriteString(fmt.Sprintf("\n%s\n", m.message))
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString("\n⌨️ ↑/↓: Navigate • Enter: Select • ESC: Back • q: Quit\n")
|
s.WriteString("\n[KEYS] Up/Down: Navigate | Enter: Select | ESC: Back | q: Quit\n")
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -160,7 +160,7 @@ func (m DiagnoseViewModel) View() string {
|
|||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
// Header
|
// Header
|
||||||
s.WriteString(titleStyle.Render("🔍 Backup Diagnosis"))
|
s.WriteString(titleStyle.Render("[SEARCH] Backup Diagnosis"))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
// Archive info
|
// Archive info
|
||||||
@@ -175,14 +175,14 @@ func (m DiagnoseViewModel) View() string {
|
|||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
if m.running {
|
if m.running {
|
||||||
s.WriteString(infoStyle.Render("⏳ " + m.progress))
|
s.WriteString(infoStyle.Render("[WAIT] " + m.progress))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(diagnoseInfoStyle.Render("This may take a while for large archives..."))
|
s.WriteString(diagnoseInfoStyle.Render("This may take a while for large archives..."))
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(errorStyle.Render(fmt.Sprintf("❌ Diagnosis failed: %v", m.err)))
|
s.WriteString(errorStyle.Render(fmt.Sprintf("[FAIL] Diagnosis failed: %v", m.err)))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(infoStyle.Render("Press Enter or Esc to go back"))
|
s.WriteString(infoStyle.Render("Press Enter or Esc to go back"))
|
||||||
return s.String()
|
return s.String()
|
||||||
@@ -205,72 +205,72 @@ func (m DiagnoseViewModel) renderSingleResult(result *restore.DiagnoseResult) st
|
|||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
// Status
|
// Status
|
||||||
s.WriteString(strings.Repeat("─", 60))
|
s.WriteString(strings.Repeat("-", 60))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
if result.IsValid {
|
if result.IsValid {
|
||||||
s.WriteString(diagnosePassStyle.Render("✅ STATUS: VALID"))
|
s.WriteString(diagnosePassStyle.Render("[OK] STATUS: VALID"))
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(diagnoseFailStyle.Render("❌ STATUS: INVALID"))
|
s.WriteString(diagnoseFailStyle.Render("[FAIL] STATUS: INVALID"))
|
||||||
}
|
}
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
if result.IsTruncated {
|
if result.IsTruncated {
|
||||||
s.WriteString(diagnoseFailStyle.Render("⚠️ TRUNCATED: File appears incomplete"))
|
s.WriteString(diagnoseFailStyle.Render("[WARN] TRUNCATED: File appears incomplete"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.IsCorrupted {
|
if result.IsCorrupted {
|
||||||
s.WriteString(diagnoseFailStyle.Render("⚠️ CORRUPTED: File structure is damaged"))
|
s.WriteString(diagnoseFailStyle.Render("[WARN] CORRUPTED: File structure is damaged"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString(strings.Repeat("─", 60))
|
s.WriteString(strings.Repeat("-", 60))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
// Details
|
// Details
|
||||||
if result.Details != nil {
|
if result.Details != nil {
|
||||||
s.WriteString(diagnoseHeaderStyle.Render("📊 DETAILS:"))
|
s.WriteString(diagnoseHeaderStyle.Render("[STATS] DETAILS:"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
if result.Details.HasPGDMPSignature {
|
if result.Details.HasPGDMPSignature {
|
||||||
s.WriteString(diagnosePassStyle.Render(" ✓ "))
|
s.WriteString(diagnosePassStyle.Render(" [+] "))
|
||||||
s.WriteString("Has PGDMP signature (custom format)\n")
|
s.WriteString("Has PGDMP signature (custom format)\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.HasSQLHeader {
|
if result.Details.HasSQLHeader {
|
||||||
s.WriteString(diagnosePassStyle.Render(" ✓ "))
|
s.WriteString(diagnosePassStyle.Render(" [+] "))
|
||||||
s.WriteString("Has PostgreSQL SQL header\n")
|
s.WriteString("Has PostgreSQL SQL header\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.GzipValid {
|
if result.Details.GzipValid {
|
||||||
s.WriteString(diagnosePassStyle.Render(" ✓ "))
|
s.WriteString(diagnosePassStyle.Render(" [+] "))
|
||||||
s.WriteString("Gzip compression valid\n")
|
s.WriteString("Gzip compression valid\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.PgRestoreListable {
|
if result.Details.PgRestoreListable {
|
||||||
s.WriteString(diagnosePassStyle.Render(" ✓ "))
|
s.WriteString(diagnosePassStyle.Render(" [+] "))
|
||||||
s.WriteString(fmt.Sprintf("pg_restore can list contents (%d tables)\n", result.Details.TableCount))
|
s.WriteString(fmt.Sprintf("pg_restore can list contents (%d tables)\n", result.Details.TableCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.CopyBlockCount > 0 {
|
if result.Details.CopyBlockCount > 0 {
|
||||||
s.WriteString(diagnoseInfoStyle.Render(" • "))
|
s.WriteString(diagnoseInfoStyle.Render(" - "))
|
||||||
s.WriteString(fmt.Sprintf("Contains %d COPY blocks\n", result.Details.CopyBlockCount))
|
s.WriteString(fmt.Sprintf("Contains %d COPY blocks\n", result.Details.CopyBlockCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.UnterminatedCopy {
|
if result.Details.UnterminatedCopy {
|
||||||
s.WriteString(diagnoseFailStyle.Render(" ✗ "))
|
s.WriteString(diagnoseFailStyle.Render(" [-] "))
|
||||||
s.WriteString(fmt.Sprintf("Unterminated COPY block: %s (line %d)\n",
|
s.WriteString(fmt.Sprintf("Unterminated COPY block: %s (line %d)\n",
|
||||||
result.Details.LastCopyTable, result.Details.LastCopyLineNumber))
|
result.Details.LastCopyTable, result.Details.LastCopyLineNumber))
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.ProperlyTerminated {
|
if result.Details.ProperlyTerminated {
|
||||||
s.WriteString(diagnosePassStyle.Render(" ✓ "))
|
s.WriteString(diagnosePassStyle.Render(" [+] "))
|
||||||
s.WriteString("All COPY blocks properly terminated\n")
|
s.WriteString("All COPY blocks properly terminated\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.ExpandedSize > 0 {
|
if result.Details.ExpandedSize > 0 {
|
||||||
s.WriteString(diagnoseInfoStyle.Render(" • "))
|
s.WriteString(diagnoseInfoStyle.Render(" - "))
|
||||||
s.WriteString(fmt.Sprintf("Expanded size: %s (ratio: %.1fx)\n",
|
s.WriteString(fmt.Sprintf("Expanded size: %s (ratio: %.1fx)\n",
|
||||||
formatSize(result.Details.ExpandedSize), result.Details.CompressionRatio))
|
formatSize(result.Details.ExpandedSize), result.Details.CompressionRatio))
|
||||||
}
|
}
|
||||||
@@ -279,14 +279,14 @@ func (m DiagnoseViewModel) renderSingleResult(result *restore.DiagnoseResult) st
|
|||||||
// Errors
|
// Errors
|
||||||
if len(result.Errors) > 0 {
|
if len(result.Errors) > 0 {
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(diagnoseFailStyle.Render("❌ ERRORS:"))
|
s.WriteString(diagnoseFailStyle.Render("[FAIL] ERRORS:"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
for i, e := range result.Errors {
|
for i, e := range result.Errors {
|
||||||
if i >= 5 {
|
if i >= 5 {
|
||||||
s.WriteString(diagnoseInfoStyle.Render(fmt.Sprintf(" ... and %d more\n", len(result.Errors)-5)))
|
s.WriteString(diagnoseInfoStyle.Render(fmt.Sprintf(" ... and %d more\n", len(result.Errors)-5)))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
s.WriteString(diagnoseFailStyle.Render(" • "))
|
s.WriteString(diagnoseFailStyle.Render(" - "))
|
||||||
s.WriteString(truncate(e, 70))
|
s.WriteString(truncate(e, 70))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
@@ -295,14 +295,14 @@ func (m DiagnoseViewModel) renderSingleResult(result *restore.DiagnoseResult) st
|
|||||||
// Warnings
|
// Warnings
|
||||||
if len(result.Warnings) > 0 {
|
if len(result.Warnings) > 0 {
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(diagnoseWarnStyle.Render("⚠️ WARNINGS:"))
|
s.WriteString(diagnoseWarnStyle.Render("[WARN] WARNINGS:"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
for i, w := range result.Warnings {
|
for i, w := range result.Warnings {
|
||||||
if i >= 3 {
|
if i >= 3 {
|
||||||
s.WriteString(diagnoseInfoStyle.Render(fmt.Sprintf(" ... and %d more\n", len(result.Warnings)-3)))
|
s.WriteString(diagnoseInfoStyle.Render(fmt.Sprintf(" ... and %d more\n", len(result.Warnings)-3)))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
s.WriteString(diagnoseWarnStyle.Render(" • "))
|
s.WriteString(diagnoseWarnStyle.Render(" - "))
|
||||||
s.WriteString(truncate(w, 70))
|
s.WriteString(truncate(w, 70))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
@@ -311,7 +311,7 @@ func (m DiagnoseViewModel) renderSingleResult(result *restore.DiagnoseResult) st
|
|||||||
// Recommendations
|
// Recommendations
|
||||||
if !result.IsValid {
|
if !result.IsValid {
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(diagnoseHeaderStyle.Render("💡 RECOMMENDATIONS:"))
|
s.WriteString(diagnoseHeaderStyle.Render("[HINT] RECOMMENDATIONS:"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
if result.IsTruncated {
|
if result.IsTruncated {
|
||||||
s.WriteString(" 1. Re-run the backup process for this database\n")
|
s.WriteString(" 1. Re-run the backup process for this database\n")
|
||||||
@@ -341,17 +341,17 @@ func (m DiagnoseViewModel) renderClusterResults() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString(strings.Repeat("─", 60))
|
s.WriteString(strings.Repeat("-", 60))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(diagnoseHeaderStyle.Render(fmt.Sprintf("📊 CLUSTER SUMMARY: %d databases\n", len(m.results))))
|
s.WriteString(diagnoseHeaderStyle.Render(fmt.Sprintf("[STATS] CLUSTER SUMMARY: %d databases\n", len(m.results))))
|
||||||
s.WriteString(strings.Repeat("─", 60))
|
s.WriteString(strings.Repeat("-", 60))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
if invalidCount == 0 {
|
if invalidCount == 0 {
|
||||||
s.WriteString(diagnosePassStyle.Render("✅ All dumps are valid"))
|
s.WriteString(diagnosePassStyle.Render("[OK] All dumps are valid"))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(diagnoseFailStyle.Render(fmt.Sprintf("❌ %d/%d dumps have issues", invalidCount, len(m.results))))
|
s.WriteString(diagnoseFailStyle.Render(fmt.Sprintf("[FAIL] %d/%d dumps have issues", invalidCount, len(m.results))))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -378,13 +378,13 @@ func (m DiagnoseViewModel) renderClusterResults() string {
|
|||||||
|
|
||||||
var status string
|
var status string
|
||||||
if r.IsValid {
|
if r.IsValid {
|
||||||
status = diagnosePassStyle.Render("✓")
|
status = diagnosePassStyle.Render("[+]")
|
||||||
} else if r.IsTruncated {
|
} else if r.IsTruncated {
|
||||||
status = diagnoseFailStyle.Render("✗ TRUNCATED")
|
status = diagnoseFailStyle.Render("[-] TRUNCATED")
|
||||||
} else if r.IsCorrupted {
|
} else if r.IsCorrupted {
|
||||||
status = diagnoseFailStyle.Render("✗ CORRUPTED")
|
status = diagnoseFailStyle.Render("[-] CORRUPTED")
|
||||||
} else {
|
} else {
|
||||||
status = diagnoseFailStyle.Render("✗ INVALID")
|
status = diagnoseFailStyle.Render("[-] INVALID")
|
||||||
}
|
}
|
||||||
|
|
||||||
line := fmt.Sprintf("%s %s %-35s %s",
|
line := fmt.Sprintf("%s %s %-35s %s",
|
||||||
@@ -405,7 +405,7 @@ func (m DiagnoseViewModel) renderClusterResults() string {
|
|||||||
if m.cursor < len(m.results) {
|
if m.cursor < len(m.results) {
|
||||||
selected := m.results[m.cursor]
|
selected := m.results[m.cursor]
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(strings.Repeat("─", 60))
|
s.WriteString(strings.Repeat("-", 60))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(diagnoseHeaderStyle.Render("Selected: " + selected.FileName))
|
s.WriteString(diagnoseHeaderStyle.Render("Selected: " + selected.FileName))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
@@ -413,7 +413,7 @@ func (m DiagnoseViewModel) renderClusterResults() string {
|
|||||||
// Show condensed details for selected
|
// Show condensed details for selected
|
||||||
if selected.Details != nil {
|
if selected.Details != nil {
|
||||||
if selected.Details.UnterminatedCopy {
|
if selected.Details.UnterminatedCopy {
|
||||||
s.WriteString(diagnoseFailStyle.Render(" ✗ Unterminated COPY: "))
|
s.WriteString(diagnoseFailStyle.Render(" [-] Unterminated COPY: "))
|
||||||
s.WriteString(selected.Details.LastCopyTable)
|
s.WriteString(selected.Details.LastCopyTable)
|
||||||
s.WriteString(fmt.Sprintf(" (line %d)\n", selected.Details.LastCopyLineNumber))
|
s.WriteString(fmt.Sprintf(" (line %d)\n", selected.Details.LastCopyLineNumber))
|
||||||
}
|
}
|
||||||
@@ -429,7 +429,7 @@ func (m DiagnoseViewModel) renderClusterResults() string {
|
|||||||
if i >= 2 {
|
if i >= 2 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
s.WriteString(diagnoseFailStyle.Render(" • "))
|
s.WriteString(diagnoseFailStyle.Render(" - "))
|
||||||
s.WriteString(truncate(e, 55))
|
s.WriteString(truncate(e, 55))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -208,7 +208,7 @@ func (dp *DirectoryPicker) View() string {
|
|||||||
if dp.allowFiles {
|
if dp.allowFiles {
|
||||||
pickerType = "File/Directory"
|
pickerType = "File/Directory"
|
||||||
}
|
}
|
||||||
header := fmt.Sprintf("📁 %s Picker - %s", pickerType, dp.currentPath)
|
header := fmt.Sprintf("[DIR] %s Picker - %s", pickerType, dp.currentPath)
|
||||||
content.WriteString(dp.styles.Header.Render(header))
|
content.WriteString(dp.styles.Header.Render(header))
|
||||||
content.WriteString("\n\n")
|
content.WriteString("\n\n")
|
||||||
|
|
||||||
@@ -216,13 +216,13 @@ func (dp *DirectoryPicker) View() string {
|
|||||||
for i, item := range dp.items {
|
for i, item := range dp.items {
|
||||||
var prefix string
|
var prefix string
|
||||||
if item.Name == ".." {
|
if item.Name == ".." {
|
||||||
prefix = "⬆️ "
|
prefix = "[UP] "
|
||||||
} else if item.Name == "Error reading directory" {
|
} else if item.Name == "Error reading directory" {
|
||||||
prefix = "❌ "
|
prefix = "[X] "
|
||||||
} else if item.IsDir {
|
} else if item.IsDir {
|
||||||
prefix = "📁 "
|
prefix = "[DIR] "
|
||||||
} else {
|
} else {
|
||||||
prefix = "📄 "
|
prefix = "[FILE] "
|
||||||
}
|
}
|
||||||
|
|
||||||
line := prefix + item.Name
|
line := prefix + item.Name
|
||||||
@@ -235,9 +235,9 @@ func (dp *DirectoryPicker) View() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Help text
|
// Help text
|
||||||
help := "\n↑/↓: Navigate • Enter: Open/Select File • s: Select Directory • q/Esc: Cancel"
|
help := "\nUp/Down: Navigate | Enter: Open/Select File | s: Select Directory | q/Esc: Cancel"
|
||||||
if !dp.allowFiles {
|
if !dp.allowFiles {
|
||||||
help = "\n↑/↓: Navigate • Enter: Open • s: Select Directory • q/Esc: Cancel"
|
help = "\nUp/Down: Navigate | Enter: Open | s: Select Directory | q/Esc: Cancel"
|
||||||
}
|
}
|
||||||
content.WriteString(dp.styles.Help.Render(help))
|
content.WriteString(dp.styles.Help.Render(help))
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package tui
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -59,7 +59,7 @@ func loadHistory(cfg *config.Config) []HistoryEntry {
|
|||||||
var entries []HistoryEntry
|
var entries []HistoryEntry
|
||||||
|
|
||||||
// Read backup files from backup directory
|
// Read backup files from backup directory
|
||||||
files, err := ioutil.ReadDir(cfg.BackupDir)
|
files, err := os.ReadDir(cfg.BackupDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return entries
|
return entries
|
||||||
}
|
}
|
||||||
@@ -74,6 +74,12 @@ func loadHistory(cfg *config.Config) []HistoryEntry {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get file info for ModTime
|
||||||
|
info, err := file.Info()
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
var backupType string
|
var backupType string
|
||||||
var database string
|
var database string
|
||||||
|
|
||||||
@@ -97,8 +103,8 @@ func loadHistory(cfg *config.Config) []HistoryEntry {
|
|||||||
entries = append(entries, HistoryEntry{
|
entries = append(entries, HistoryEntry{
|
||||||
Type: backupType,
|
Type: backupType,
|
||||||
Database: database,
|
Database: database,
|
||||||
Timestamp: file.ModTime(),
|
Timestamp: info.ModTime(),
|
||||||
Status: "✅ Completed",
|
Status: "[OK] Completed",
|
||||||
Filename: name,
|
Filename: name,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -185,11 +191,11 @@ func (m HistoryViewModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
func (m HistoryViewModel) View() string {
|
func (m HistoryViewModel) View() string {
|
||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
header := titleStyle.Render("📜 Operation History")
|
header := titleStyle.Render("[HISTORY] Operation History")
|
||||||
s.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
s.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
||||||
|
|
||||||
if len(m.history) == 0 {
|
if len(m.history) == 0 {
|
||||||
s.WriteString("📭 No backup history found\n\n")
|
s.WriteString("[EMPTY] No backup history found\n\n")
|
||||||
} else {
|
} else {
|
||||||
maxVisible := 15 // Show max 15 items at once
|
maxVisible := 15 // Show max 15 items at once
|
||||||
|
|
||||||
@@ -205,7 +211,7 @@ func (m HistoryViewModel) View() string {
|
|||||||
|
|
||||||
// Show scroll indicators
|
// Show scroll indicators
|
||||||
if start > 0 {
|
if start > 0 {
|
||||||
s.WriteString(" ▲ More entries above...\n")
|
s.WriteString(" [^] More entries above...\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Display only visible entries
|
// Display only visible entries
|
||||||
@@ -227,13 +233,13 @@ func (m HistoryViewModel) View() string {
|
|||||||
|
|
||||||
// Show scroll indicator if more entries below
|
// Show scroll indicator if more entries below
|
||||||
if end < len(m.history) {
|
if end < len(m.history) {
|
||||||
s.WriteString(fmt.Sprintf(" ▼ %d more entries below...\n", len(m.history)-end))
|
s.WriteString(fmt.Sprintf(" [v] %d more entries below...\n", len(m.history)-end))
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString("⌨️ ↑/↓: Navigate • PgUp/PgDn: Jump • Home/End: First/Last • ESC: Back • q: Quit\n")
|
s.WriteString("[KEYS] Up/Down: Navigate - PgUp/PgDn: Jump - Home/End: First/Last - ESC: Back - q: Quit\n")
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -137,10 +137,10 @@ func (m InputModel) View() string {
|
|||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(errorStyle.Render(fmt.Sprintf("❌ Error: %v\n\n", m.err)))
|
s.WriteString(errorStyle.Render(fmt.Sprintf("[FAIL] Error: %v\n\n", m.err)))
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString("⌨️ Type value • Enter: Confirm • ESC: Cancel\n")
|
s.WriteString("[KEYS] Type value | Enter: Confirm | ESC: Cancel\n")
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -89,12 +89,12 @@ func NewMenuModel(cfg *config.Config, log logger.Logger) *MenuModel {
|
|||||||
"Single Database Backup",
|
"Single Database Backup",
|
||||||
"Sample Database Backup (with ratio)",
|
"Sample Database Backup (with ratio)",
|
||||||
"Cluster Backup (all databases)",
|
"Cluster Backup (all databases)",
|
||||||
"────────────────────────────────",
|
"--------------------------------",
|
||||||
"Restore Single Database",
|
"Restore Single Database",
|
||||||
"Restore Cluster Backup",
|
"Restore Cluster Backup",
|
||||||
"Diagnose Backup File",
|
"Diagnose Backup File",
|
||||||
"List & Manage Backups",
|
"List & Manage Backups",
|
||||||
"────────────────────────────────",
|
"--------------------------------",
|
||||||
"View Active Operations",
|
"View Active Operations",
|
||||||
"Show Operation History",
|
"Show Operation History",
|
||||||
"Database Status & Health Check",
|
"Database Status & Health Check",
|
||||||
@@ -177,7 +177,7 @@ func (m *MenuModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
case 12: // Settings
|
case 12: // Settings
|
||||||
return m.handleSettings()
|
return m.handleSettings()
|
||||||
case 13: // Clear History
|
case 13: // Clear History
|
||||||
m.message = "🗑️ History cleared"
|
m.message = "[DEL] History cleared"
|
||||||
case 14: // Quit
|
case 14: // Quit
|
||||||
if m.cancel != nil {
|
if m.cancel != nil {
|
||||||
m.cancel()
|
m.cancel()
|
||||||
@@ -262,7 +262,7 @@ func (m *MenuModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
case 12: // Settings
|
case 12: // Settings
|
||||||
return m.handleSettings()
|
return m.handleSettings()
|
||||||
case 13: // Clear History
|
case 13: // Clear History
|
||||||
m.message = "🗑️ History cleared"
|
m.message = "[DEL] History cleared"
|
||||||
case 14: // Quit
|
case 14: // Quit
|
||||||
if m.cancel != nil {
|
if m.cancel != nil {
|
||||||
m.cancel()
|
m.cancel()
|
||||||
@@ -285,7 +285,7 @@ func (m *MenuModel) View() string {
|
|||||||
var s string
|
var s string
|
||||||
|
|
||||||
// Header
|
// Header
|
||||||
header := titleStyle.Render("🗄️ Database Backup Tool - Interactive Menu")
|
header := titleStyle.Render("[DB] Database Backup Tool - Interactive Menu")
|
||||||
s += fmt.Sprintf("\n%s\n\n", header)
|
s += fmt.Sprintf("\n%s\n\n", header)
|
||||||
|
|
||||||
if len(m.dbTypes) > 0 {
|
if len(m.dbTypes) > 0 {
|
||||||
@@ -299,7 +299,7 @@ func (m *MenuModel) View() string {
|
|||||||
}
|
}
|
||||||
selector := fmt.Sprintf("Target Engine: %s", strings.Join(options, menuStyle.Render(" | ")))
|
selector := fmt.Sprintf("Target Engine: %s", strings.Join(options, menuStyle.Render(" | ")))
|
||||||
s += dbSelectorLabelStyle.Render(selector) + "\n"
|
s += dbSelectorLabelStyle.Render(selector) + "\n"
|
||||||
hint := infoStyle.Render("Switch with ←/→ or t • Cluster backup requires PostgreSQL")
|
hint := infoStyle.Render("Switch with <-/-> or t | Cluster backup requires PostgreSQL")
|
||||||
s += hint + "\n"
|
s += hint + "\n"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -326,7 +326,7 @@ func (m *MenuModel) View() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Footer
|
// Footer
|
||||||
footer := infoStyle.Render("\n⌨️ Press ↑/↓ to navigate • Enter to select • q to quit")
|
footer := infoStyle.Render("\n[KEYS] Press Up/Down to navigate | Enter to select | q to quit")
|
||||||
s += footer
|
s += footer
|
||||||
|
|
||||||
return s
|
return s
|
||||||
@@ -334,20 +334,20 @@ func (m *MenuModel) View() string {
|
|||||||
|
|
||||||
// handleSingleBackup opens database selector for single backup
|
// handleSingleBackup opens database selector for single backup
|
||||||
func (m *MenuModel) handleSingleBackup() (tea.Model, tea.Cmd) {
|
func (m *MenuModel) handleSingleBackup() (tea.Model, tea.Cmd) {
|
||||||
selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "🗄️ Single Database Backup", "single")
|
selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "[DB] Single Database Backup", "single")
|
||||||
return selector, selector.Init()
|
return selector, selector.Init()
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleSampleBackup opens database selector for sample backup
|
// handleSampleBackup opens database selector for sample backup
|
||||||
func (m *MenuModel) handleSampleBackup() (tea.Model, tea.Cmd) {
|
func (m *MenuModel) handleSampleBackup() (tea.Model, tea.Cmd) {
|
||||||
selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "📊 Sample Database Backup", "sample")
|
selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "[STATS] Sample Database Backup", "sample")
|
||||||
return selector, selector.Init()
|
return selector, selector.Init()
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleClusterBackup shows confirmation and executes cluster backup
|
// handleClusterBackup shows confirmation and executes cluster backup
|
||||||
func (m *MenuModel) handleClusterBackup() (tea.Model, tea.Cmd) {
|
func (m *MenuModel) handleClusterBackup() (tea.Model, tea.Cmd) {
|
||||||
if !m.config.IsPostgreSQL() {
|
if !m.config.IsPostgreSQL() {
|
||||||
m.message = errorStyle.Render("❌ Cluster backup is available only for PostgreSQL targets")
|
m.message = errorStyle.Render("[FAIL] Cluster backup is available only for PostgreSQL targets")
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
// Skip confirmation in auto-confirm mode
|
// Skip confirmation in auto-confirm mode
|
||||||
@@ -356,7 +356,7 @@ func (m *MenuModel) handleClusterBackup() (tea.Model, tea.Cmd) {
|
|||||||
return executor, executor.Init()
|
return executor, executor.Init()
|
||||||
}
|
}
|
||||||
confirm := NewConfirmationModelWithAction(m.config, m.logger, m,
|
confirm := NewConfirmationModelWithAction(m.config, m.logger, m,
|
||||||
"🗄️ Cluster Backup",
|
"[DB] Cluster Backup",
|
||||||
"This will backup ALL databases in the cluster. Continue?",
|
"This will backup ALL databases in the cluster. Continue?",
|
||||||
func() (tea.Model, tea.Cmd) {
|
func() (tea.Model, tea.Cmd) {
|
||||||
executor := NewBackupExecution(m.config, m.logger, m, m.ctx, "cluster", "", 0)
|
executor := NewBackupExecution(m.config, m.logger, m, m.ctx, "cluster", "", 0)
|
||||||
@@ -399,7 +399,7 @@ func (m *MenuModel) handleRestoreSingle() (tea.Model, tea.Cmd) {
|
|||||||
// handleRestoreCluster opens archive browser for cluster restore
|
// handleRestoreCluster opens archive browser for cluster restore
|
||||||
func (m *MenuModel) handleRestoreCluster() (tea.Model, tea.Cmd) {
|
func (m *MenuModel) handleRestoreCluster() (tea.Model, tea.Cmd) {
|
||||||
if !m.config.IsPostgreSQL() {
|
if !m.config.IsPostgreSQL() {
|
||||||
m.message = errorStyle.Render("❌ Cluster restore is available only for PostgreSQL")
|
m.message = errorStyle.Render("[FAIL] Cluster restore is available only for PostgreSQL")
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
browser := NewArchiveBrowser(m.config, m.logger, m, m.ctx, "restore-cluster")
|
browser := NewArchiveBrowser(m.config, m.logger, m, m.ctx, "restore-cluster")
|
||||||
@@ -428,7 +428,7 @@ func (m *MenuModel) applyDatabaseSelection() {
|
|||||||
|
|
||||||
selection := m.dbTypes[m.dbTypeCursor]
|
selection := m.dbTypes[m.dbTypeCursor]
|
||||||
if err := m.config.SetDatabaseType(selection.value); err != nil {
|
if err := m.config.SetDatabaseType(selection.value); err != nil {
|
||||||
m.message = errorStyle.Render(fmt.Sprintf("❌ %v", err))
|
m.message = errorStyle.Render(fmt.Sprintf("[FAIL] %v", err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -437,7 +437,7 @@ func (m *MenuModel) applyDatabaseSelection() {
|
|||||||
m.config.Port = m.config.GetDefaultPort()
|
m.config.Port = m.config.GetDefaultPort()
|
||||||
}
|
}
|
||||||
|
|
||||||
m.message = successStyle.Render(fmt.Sprintf("🔀 Target database set to %s", m.config.DisplayDatabaseType()))
|
m.message = successStyle.Render(fmt.Sprintf("[SWITCH] Target database set to %s", m.config.DisplayDatabaseType()))
|
||||||
if m.logger != nil {
|
if m.logger != nil {
|
||||||
m.logger.Info("updated target database type", "type", m.config.DatabaseType, "port", m.config.Port)
|
m.logger.Info("updated target database type", "type", m.config.DatabaseType, "port", m.config.Port)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -49,14 +49,14 @@ func (m OperationsViewModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
func (m OperationsViewModel) View() string {
|
func (m OperationsViewModel) View() string {
|
||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
header := titleStyle.Render("📊 Active Operations")
|
header := titleStyle.Render("[STATS] Active Operations")
|
||||||
s.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
s.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
||||||
|
|
||||||
s.WriteString("Currently running operations:\n\n")
|
s.WriteString("Currently running operations:\n\n")
|
||||||
s.WriteString(infoStyle.Render("📭 No active operations"))
|
s.WriteString(infoStyle.Render("[NONE] No active operations"))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
s.WriteString("⌨️ Press any key to return to menu\n")
|
s.WriteString("[KEYS] Press any key to return to menu\n")
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -110,10 +111,10 @@ type restoreCompleteMsg struct {
|
|||||||
|
|
||||||
func executeRestoreWithTUIProgress(parentCtx context.Context, cfg *config.Config, log logger.Logger, archive ArchiveInfo, targetDB string, cleanFirst, createIfMissing bool, restoreType string, cleanClusterFirst bool, existingDBs []string, saveDebugLog bool) tea.Cmd {
|
func executeRestoreWithTUIProgress(parentCtx context.Context, cfg *config.Config, log logger.Logger, archive ArchiveInfo, targetDB string, cleanFirst, createIfMissing bool, restoreType string, cleanClusterFirst bool, existingDBs []string, saveDebugLog bool) tea.Cmd {
|
||||||
return func() tea.Msg {
|
return func() tea.Msg {
|
||||||
// Use configurable cluster timeout (minutes) from config; default set in config.New()
|
// NO TIMEOUT for restore operations - a restore takes as long as it takes
|
||||||
// Use parent context to inherit cancellation from TUI
|
// Large databases with large objects can take many hours
|
||||||
restoreTimeout := time.Duration(cfg.ClusterTimeoutMinutes) * time.Minute
|
// Only manual cancellation (Ctrl+C) should stop the restore
|
||||||
ctx, cancel := context.WithTimeout(parentCtx, restoreTimeout)
|
ctx, cancel := context.WithCancel(parentCtx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
@@ -137,8 +138,8 @@ func executeRestoreWithTUIProgress(parentCtx context.Context, cfg *config.Config
|
|||||||
// This matches how cluster restore works - uses CLI tools, not database connections
|
// This matches how cluster restore works - uses CLI tools, not database connections
|
||||||
droppedCount := 0
|
droppedCount := 0
|
||||||
for _, dbName := range existingDBs {
|
for _, dbName := range existingDBs {
|
||||||
// Create timeout context for each database drop (30 seconds per DB)
|
// Create timeout context for each database drop (5 minutes per DB - large DBs take time)
|
||||||
dropCtx, dropCancel := context.WithTimeout(ctx, 30*time.Second)
|
dropCtx, dropCancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||||
if err := dropDatabaseCLI(dropCtx, cfg, dbName); err != nil {
|
if err := dropDatabaseCLI(dropCtx, cfg, dbName); err != nil {
|
||||||
log.Warn("Failed to drop database", "name", dbName, "error", err)
|
log.Warn("Failed to drop database", "name", dbName, "error", err)
|
||||||
// Continue with other databases
|
// Continue with other databases
|
||||||
@@ -157,8 +158,9 @@ func executeRestoreWithTUIProgress(parentCtx context.Context, cfg *config.Config
|
|||||||
|
|
||||||
// Enable debug logging if requested
|
// Enable debug logging if requested
|
||||||
if saveDebugLog {
|
if saveDebugLog {
|
||||||
// Generate debug log path based on archive name and timestamp
|
// Generate debug log path using configured WorkDir
|
||||||
debugLogPath := fmt.Sprintf("/tmp/dbbackup-restore-debug-%s.json", time.Now().Format("20060102-150405"))
|
workDir := cfg.GetEffectiveWorkDir()
|
||||||
|
debugLogPath := filepath.Join(workDir, fmt.Sprintf("dbbackup-restore-debug-%s.json", time.Now().Format("20060102-150405")))
|
||||||
engine.SetDebugLogPath(debugLogPath)
|
engine.SetDebugLogPath(debugLogPath)
|
||||||
log.Info("Debug logging enabled", "path", debugLogPath)
|
log.Info("Debug logging enabled", "path", debugLogPath)
|
||||||
}
|
}
|
||||||
@@ -283,7 +285,7 @@ func (m RestoreExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
if !m.done && !m.cancelling {
|
if !m.done && !m.cancelling {
|
||||||
// User requested cancellation - cancel the context
|
// User requested cancellation - cancel the context
|
||||||
m.cancelling = true
|
m.cancelling = true
|
||||||
m.status = "⏹️ Cancelling restore... (please wait)"
|
m.status = "[STOP] Cancelling restore... (please wait)"
|
||||||
m.phase = "Cancelling"
|
m.phase = "Cancelling"
|
||||||
if m.cancel != nil {
|
if m.cancel != nil {
|
||||||
m.cancel()
|
m.cancel()
|
||||||
@@ -295,7 +297,7 @@ func (m RestoreExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
case "q":
|
case "q":
|
||||||
if !m.done && !m.cancelling {
|
if !m.done && !m.cancelling {
|
||||||
m.cancelling = true
|
m.cancelling = true
|
||||||
m.status = "⏹️ Cancelling restore... (please wait)"
|
m.status = "[STOP] Cancelling restore... (please wait)"
|
||||||
m.phase = "Cancelling"
|
m.phase = "Cancelling"
|
||||||
if m.cancel != nil {
|
if m.cancel != nil {
|
||||||
m.cancel()
|
m.cancel()
|
||||||
@@ -319,9 +321,9 @@ func (m RestoreExecutionModel) View() string {
|
|||||||
s.Grow(512) // Pre-allocate estimated capacity for better performance
|
s.Grow(512) // Pre-allocate estimated capacity for better performance
|
||||||
|
|
||||||
// Title
|
// Title
|
||||||
title := "💾 Restoring Database"
|
title := "[RESTORE] Restoring Database"
|
||||||
if m.restoreType == "restore-cluster" {
|
if m.restoreType == "restore-cluster" {
|
||||||
title = "💾 Restoring Cluster"
|
title = "[RESTORE] Restoring Cluster"
|
||||||
}
|
}
|
||||||
s.WriteString(titleStyle.Render(title))
|
s.WriteString(titleStyle.Render(title))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
@@ -336,12 +338,12 @@ func (m RestoreExecutionModel) View() string {
|
|||||||
if m.done {
|
if m.done {
|
||||||
// Show result
|
// Show result
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(errorStyle.Render("❌ Restore Failed"))
|
s.WriteString(errorStyle.Render("[FAIL] Restore Failed"))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(errorStyle.Render(fmt.Sprintf("Error: %v", m.err)))
|
s.WriteString(errorStyle.Render(fmt.Sprintf("Error: %v", m.err)))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(successStyle.Render("✅ Restore Completed Successfully"))
|
s.WriteString(successStyle.Render("[OK] Restore Completed Successfully"))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(successStyle.Render(m.result))
|
s.WriteString(successStyle.Render(m.result))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
@@ -349,7 +351,7 @@ func (m RestoreExecutionModel) View() string {
|
|||||||
|
|
||||||
s.WriteString(fmt.Sprintf("\nElapsed Time: %s\n", formatDuration(m.elapsed)))
|
s.WriteString(fmt.Sprintf("\nElapsed Time: %s\n", formatDuration(m.elapsed)))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(infoStyle.Render("⌨️ Press Enter to continue"))
|
s.WriteString(infoStyle.Render("[KEYS] Press Enter to continue"))
|
||||||
} else {
|
} else {
|
||||||
// Show progress
|
// Show progress
|
||||||
s.WriteString(fmt.Sprintf("Phase: %s\n", m.phase))
|
s.WriteString(fmt.Sprintf("Phase: %s\n", m.phase))
|
||||||
@@ -371,7 +373,7 @@ func (m RestoreExecutionModel) View() string {
|
|||||||
// Elapsed time
|
// Elapsed time
|
||||||
s.WriteString(fmt.Sprintf("Elapsed: %s\n", formatDuration(m.elapsed)))
|
s.WriteString(fmt.Sprintf("Elapsed: %s\n", formatDuration(m.elapsed)))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(infoStyle.Render("⌨️ Press Ctrl+C to cancel"))
|
s.WriteString(infoStyle.Render("[KEYS] Press Ctrl+C to cancel"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
|
|||||||
@@ -106,7 +106,8 @@ type safetyCheckCompleteMsg struct {
|
|||||||
|
|
||||||
func runSafetyChecks(cfg *config.Config, log logger.Logger, archive ArchiveInfo, targetDB string) tea.Cmd {
|
func runSafetyChecks(cfg *config.Config, log logger.Logger, archive ArchiveInfo, targetDB string) tea.Cmd {
|
||||||
return func() tea.Msg {
|
return func() tea.Msg {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
// 10 minutes for safety checks - large archives can take a long time to diagnose
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
safety := restore.NewSafety(cfg, log)
|
safety := restore.NewSafety(cfg, log)
|
||||||
@@ -263,7 +264,7 @@ func (m RestorePreviewModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
// Toggle cluster cleanup
|
// Toggle cluster cleanup
|
||||||
m.cleanClusterFirst = !m.cleanClusterFirst
|
m.cleanClusterFirst = !m.cleanClusterFirst
|
||||||
if m.cleanClusterFirst {
|
if m.cleanClusterFirst {
|
||||||
m.message = checkWarningStyle.Render(fmt.Sprintf("⚠️ Will drop %d existing database(s) before restore", m.existingDBCount))
|
m.message = checkWarningStyle.Render(fmt.Sprintf("[WARN] Will drop %d existing database(s) before restore", m.existingDBCount))
|
||||||
} else {
|
} else {
|
||||||
m.message = fmt.Sprintf("Clean cluster first: disabled")
|
m.message = fmt.Sprintf("Clean cluster first: disabled")
|
||||||
}
|
}
|
||||||
@@ -277,7 +278,7 @@ func (m RestorePreviewModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
// Toggle debug log saving
|
// Toggle debug log saving
|
||||||
m.saveDebugLog = !m.saveDebugLog
|
m.saveDebugLog = !m.saveDebugLog
|
||||||
if m.saveDebugLog {
|
if m.saveDebugLog {
|
||||||
m.message = infoStyle.Render("📋 Debug log: enabled (will save detailed report on failure)")
|
m.message = infoStyle.Render("[DEBUG] Debug log: enabled (will save detailed report on failure)")
|
||||||
} else {
|
} else {
|
||||||
m.message = "Debug log: disabled"
|
m.message = "Debug log: disabled"
|
||||||
}
|
}
|
||||||
@@ -287,7 +288,7 @@ func (m RestorePreviewModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
if m.workDir == "" {
|
if m.workDir == "" {
|
||||||
// Set to backup directory as default alternative
|
// Set to backup directory as default alternative
|
||||||
m.workDir = m.config.BackupDir
|
m.workDir = m.config.BackupDir
|
||||||
m.message = infoStyle.Render(fmt.Sprintf("📁 Work directory set to: %s", m.workDir))
|
m.message = infoStyle.Render(fmt.Sprintf("[DIR] Work directory set to: %s", m.workDir))
|
||||||
} else {
|
} else {
|
||||||
// Clear work directory (use system temp)
|
// Clear work directory (use system temp)
|
||||||
m.workDir = ""
|
m.workDir = ""
|
||||||
@@ -301,7 +302,7 @@ func (m RestorePreviewModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !m.canProceed {
|
if !m.canProceed {
|
||||||
m.message = errorStyle.Render("❌ Cannot proceed - critical safety checks failed")
|
m.message = errorStyle.Render("[FAIL] Cannot proceed - critical safety checks failed")
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -318,15 +319,15 @@ func (m RestorePreviewModel) View() string {
|
|||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
// Title
|
// Title
|
||||||
title := "🔍 Restore Preview"
|
title := "Restore Preview"
|
||||||
if m.mode == "restore-cluster" {
|
if m.mode == "restore-cluster" {
|
||||||
title = "🔍 Cluster Restore Preview"
|
title = "Cluster Restore Preview"
|
||||||
}
|
}
|
||||||
s.WriteString(titleStyle.Render(title))
|
s.WriteString(titleStyle.Render(title))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
// Archive Information
|
// Archive Information
|
||||||
s.WriteString(archiveHeaderStyle.Render("📦 Archive Information"))
|
s.WriteString(archiveHeaderStyle.Render("[ARCHIVE] Information"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(fmt.Sprintf(" File: %s\n", m.archive.Name))
|
s.WriteString(fmt.Sprintf(" File: %s\n", m.archive.Name))
|
||||||
s.WriteString(fmt.Sprintf(" Format: %s\n", m.archive.Format.String()))
|
s.WriteString(fmt.Sprintf(" Format: %s\n", m.archive.Format.String()))
|
||||||
@@ -339,25 +340,25 @@ func (m RestorePreviewModel) View() string {
|
|||||||
|
|
||||||
// Target Information
|
// Target Information
|
||||||
if m.mode == "restore-single" {
|
if m.mode == "restore-single" {
|
||||||
s.WriteString(archiveHeaderStyle.Render("🎯 Target Information"))
|
s.WriteString(archiveHeaderStyle.Render("[TARGET] Information"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(fmt.Sprintf(" Database: %s\n", m.targetDB))
|
s.WriteString(fmt.Sprintf(" Database: %s\n", m.targetDB))
|
||||||
s.WriteString(fmt.Sprintf(" Host: %s:%d\n", m.config.Host, m.config.Port))
|
s.WriteString(fmt.Sprintf(" Host: %s:%d\n", m.config.Host, m.config.Port))
|
||||||
|
|
||||||
cleanIcon := "✗"
|
cleanIcon := "[N]"
|
||||||
if m.cleanFirst {
|
if m.cleanFirst {
|
||||||
cleanIcon = "✓"
|
cleanIcon = "[Y]"
|
||||||
}
|
}
|
||||||
s.WriteString(fmt.Sprintf(" Clean First: %s %v\n", cleanIcon, m.cleanFirst))
|
s.WriteString(fmt.Sprintf(" Clean First: %s %v\n", cleanIcon, m.cleanFirst))
|
||||||
|
|
||||||
createIcon := "✗"
|
createIcon := "[N]"
|
||||||
if m.createIfMissing {
|
if m.createIfMissing {
|
||||||
createIcon = "✓"
|
createIcon = "[Y]"
|
||||||
}
|
}
|
||||||
s.WriteString(fmt.Sprintf(" Create If Missing: %s %v\n", createIcon, m.createIfMissing))
|
s.WriteString(fmt.Sprintf(" Create If Missing: %s %v\n", createIcon, m.createIfMissing))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
} else if m.mode == "restore-cluster" {
|
} else if m.mode == "restore-cluster" {
|
||||||
s.WriteString(archiveHeaderStyle.Render("🎯 Cluster Restore Options"))
|
s.WriteString(archiveHeaderStyle.Render("[CLUSTER] Restore Options"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(fmt.Sprintf(" Host: %s:%d\n", m.config.Host, m.config.Port))
|
s.WriteString(fmt.Sprintf(" Host: %s:%d\n", m.config.Host, m.config.Port))
|
||||||
|
|
||||||
@@ -375,10 +376,10 @@ func (m RestorePreviewModel) View() string {
|
|||||||
s.WriteString(fmt.Sprintf(" - %s\n", db))
|
s.WriteString(fmt.Sprintf(" - %s\n", db))
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanIcon := "✗"
|
cleanIcon := "[N]"
|
||||||
cleanStyle := infoStyle
|
cleanStyle := infoStyle
|
||||||
if m.cleanClusterFirst {
|
if m.cleanClusterFirst {
|
||||||
cleanIcon = "✓"
|
cleanIcon = "[Y]"
|
||||||
cleanStyle = checkWarningStyle
|
cleanStyle = checkWarningStyle
|
||||||
}
|
}
|
||||||
s.WriteString(cleanStyle.Render(fmt.Sprintf(" Clean All First: %s %v (press 'c' to toggle)\n", cleanIcon, m.cleanClusterFirst)))
|
s.WriteString(cleanStyle.Render(fmt.Sprintf(" Clean All First: %s %v (press 'c' to toggle)\n", cleanIcon, m.cleanClusterFirst)))
|
||||||
@@ -389,7 +390,7 @@ func (m RestorePreviewModel) View() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Safety Checks
|
// Safety Checks
|
||||||
s.WriteString(archiveHeaderStyle.Render("🛡️ Safety Checks"))
|
s.WriteString(archiveHeaderStyle.Render("[SAFETY] Checks"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
if m.checking {
|
if m.checking {
|
||||||
@@ -397,21 +398,21 @@ func (m RestorePreviewModel) View() string {
|
|||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
} else {
|
} else {
|
||||||
for _, check := range m.safetyChecks {
|
for _, check := range m.safetyChecks {
|
||||||
icon := "○"
|
icon := "[ ]"
|
||||||
style := checkPendingStyle
|
style := checkPendingStyle
|
||||||
|
|
||||||
switch check.Status {
|
switch check.Status {
|
||||||
case "passed":
|
case "passed":
|
||||||
icon = "✓"
|
icon = "[+]"
|
||||||
style = checkPassedStyle
|
style = checkPassedStyle
|
||||||
case "failed":
|
case "failed":
|
||||||
icon = "✗"
|
icon = "[-]"
|
||||||
style = checkFailedStyle
|
style = checkFailedStyle
|
||||||
case "warning":
|
case "warning":
|
||||||
icon = "⚠"
|
icon = "[!]"
|
||||||
style = checkWarningStyle
|
style = checkWarningStyle
|
||||||
case "checking":
|
case "checking":
|
||||||
icon = "⟳"
|
icon = "[~]"
|
||||||
style = checkPendingStyle
|
style = checkPendingStyle
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -427,13 +428,13 @@ func (m RestorePreviewModel) View() string {
|
|||||||
|
|
||||||
// Warnings
|
// Warnings
|
||||||
if m.cleanFirst {
|
if m.cleanFirst {
|
||||||
s.WriteString(checkWarningStyle.Render("⚠️ Warning: Clean-first enabled"))
|
s.WriteString(checkWarningStyle.Render("[WARN] Warning: Clean-first enabled"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(infoStyle.Render(" All existing data in target database will be dropped!"))
|
s.WriteString(infoStyle.Render(" All existing data in target database will be dropped!"))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
}
|
}
|
||||||
if m.cleanClusterFirst && m.existingDBCount > 0 {
|
if m.cleanClusterFirst && m.existingDBCount > 0 {
|
||||||
s.WriteString(checkWarningStyle.Render("🔥 WARNING: Cluster cleanup enabled"))
|
s.WriteString(checkWarningStyle.Render("[DANGER] WARNING: Cluster cleanup enabled"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(checkWarningStyle.Render(fmt.Sprintf(" %d existing database(s) will be DROPPED before restore!", m.existingDBCount)))
|
s.WriteString(checkWarningStyle.Render(fmt.Sprintf(" %d existing database(s) will be DROPPED before restore!", m.existingDBCount)))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
@@ -442,36 +443,36 @@ func (m RestorePreviewModel) View() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Advanced Options
|
// Advanced Options
|
||||||
s.WriteString(archiveHeaderStyle.Render("⚙️ Advanced Options"))
|
s.WriteString(archiveHeaderStyle.Render("[OPTIONS] Advanced"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
// Work directory option
|
// Work directory option
|
||||||
workDirIcon := "✗"
|
workDirIcon := "[-]"
|
||||||
workDirStyle := infoStyle
|
workDirStyle := infoStyle
|
||||||
workDirValue := "(system temp)"
|
workDirValue := "(system temp)"
|
||||||
if m.workDir != "" {
|
if m.workDir != "" {
|
||||||
workDirIcon = "✓"
|
workDirIcon = "[+]"
|
||||||
workDirStyle = checkPassedStyle
|
workDirStyle = checkPassedStyle
|
||||||
workDirValue = m.workDir
|
workDirValue = m.workDir
|
||||||
}
|
}
|
||||||
s.WriteString(workDirStyle.Render(fmt.Sprintf(" %s Work Dir: %s (press 'w' to toggle)", workDirIcon, workDirValue)))
|
s.WriteString(workDirStyle.Render(fmt.Sprintf(" %s Work Dir: %s (press 'w' to toggle)", workDirIcon, workDirValue)))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
if m.workDir == "" {
|
if m.workDir == "" {
|
||||||
s.WriteString(infoStyle.Render(" ⚠️ Large archives need more space than /tmp may have"))
|
s.WriteString(infoStyle.Render(" [WARN] Large archives need more space than /tmp may have"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debug log option
|
// Debug log option
|
||||||
debugIcon := "✗"
|
debugIcon := "[-]"
|
||||||
debugStyle := infoStyle
|
debugStyle := infoStyle
|
||||||
if m.saveDebugLog {
|
if m.saveDebugLog {
|
||||||
debugIcon = "✓"
|
debugIcon = "[+]"
|
||||||
debugStyle = checkPassedStyle
|
debugStyle = checkPassedStyle
|
||||||
}
|
}
|
||||||
s.WriteString(debugStyle.Render(fmt.Sprintf(" %s Debug Log: %v (press 'd' to toggle)", debugIcon, m.saveDebugLog)))
|
s.WriteString(debugStyle.Render(fmt.Sprintf(" %s Debug Log: %v (press 'd' to toggle)", debugIcon, m.saveDebugLog)))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
if m.saveDebugLog {
|
if m.saveDebugLog {
|
||||||
s.WriteString(infoStyle.Render(" Saves detailed error report to /tmp on failure"))
|
s.WriteString(infoStyle.Render(fmt.Sprintf(" Saves detailed error report to %s on failure", m.config.GetEffectiveWorkDir())))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
@@ -484,25 +485,25 @@ func (m RestorePreviewModel) View() string {
|
|||||||
|
|
||||||
// Footer
|
// Footer
|
||||||
if m.checking {
|
if m.checking {
|
||||||
s.WriteString(infoStyle.Render("⌨️ Please wait..."))
|
s.WriteString(infoStyle.Render("Please wait..."))
|
||||||
} else if m.canProceed {
|
} else if m.canProceed {
|
||||||
s.WriteString(successStyle.Render("✅ Ready to restore"))
|
s.WriteString(successStyle.Render("[OK] Ready to restore"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
if m.mode == "restore-single" {
|
if m.mode == "restore-single" {
|
||||||
s.WriteString(infoStyle.Render("⌨️ t: Clean-first | c: Create | w: WorkDir | d: Debug | Enter: Proceed | Esc: Cancel"))
|
s.WriteString(infoStyle.Render("t: Clean-first | c: Create | w: WorkDir | d: Debug | Enter: Proceed | Esc: Cancel"))
|
||||||
} else if m.mode == "restore-cluster" {
|
} else if m.mode == "restore-cluster" {
|
||||||
if m.existingDBCount > 0 {
|
if m.existingDBCount > 0 {
|
||||||
s.WriteString(infoStyle.Render("⌨️ c: Cleanup | w: WorkDir | d: Debug | Enter: Proceed | Esc: Cancel"))
|
s.WriteString(infoStyle.Render("c: Cleanup | w: WorkDir | d: Debug | Enter: Proceed | Esc: Cancel"))
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(infoStyle.Render("⌨️ w: WorkDir | d: Debug | Enter: Proceed | Esc: Cancel"))
|
s.WriteString(infoStyle.Render("w: WorkDir | d: Debug | Enter: Proceed | Esc: Cancel"))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(infoStyle.Render("⌨️ w: WorkDir | d: Debug | Enter: Proceed | Esc: Cancel"))
|
s.WriteString(infoStyle.Render("w: WorkDir | d: Debug | Enter: Proceed | Esc: Cancel"))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(errorStyle.Render("❌ Cannot proceed - please fix errors above"))
|
s.WriteString(errorStyle.Render("[FAIL] Cannot proceed - please fix errors above"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(infoStyle.Render("⌨️ Esc: Go back"))
|
s.WriteString(infoStyle.Render("Esc: Go back"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
|
|||||||
@@ -459,9 +459,9 @@ func (m SettingsModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
if m.cursor < len(m.settings) {
|
if m.cursor < len(m.settings) {
|
||||||
setting := m.settings[m.cursor]
|
setting := m.settings[m.cursor]
|
||||||
if err := setting.Update(m.config, selectedPath); err != nil {
|
if err := setting.Update(m.config, selectedPath); err != nil {
|
||||||
m.message = "❌ Error: " + err.Error()
|
m.message = "[FAIL] Error: " + err.Error()
|
||||||
} else {
|
} else {
|
||||||
m.message = "✅ Directory updated: " + selectedPath
|
m.message = "[OK] Directory updated: " + selectedPath
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m.browsingDir = false
|
m.browsingDir = false
|
||||||
@@ -482,7 +482,6 @@ func (m SettingsModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
|
|
||||||
switch msg.String() {
|
switch msg.String() {
|
||||||
case "ctrl+c", "q", "esc":
|
case "ctrl+c", "q", "esc":
|
||||||
m.quitting = true
|
|
||||||
return m.parent, nil
|
return m.parent, nil
|
||||||
|
|
||||||
case "up", "k":
|
case "up", "k":
|
||||||
@@ -501,9 +500,9 @@ func (m SettingsModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
currentSetting := m.settings[m.cursor]
|
currentSetting := m.settings[m.cursor]
|
||||||
if currentSetting.Type == "selector" {
|
if currentSetting.Type == "selector" {
|
||||||
if err := currentSetting.Update(m.config, ""); err != nil {
|
if err := currentSetting.Update(m.config, ""); err != nil {
|
||||||
m.message = errorStyle.Render(fmt.Sprintf("❌ %s", err.Error()))
|
m.message = errorStyle.Render(fmt.Sprintf("[FAIL] %s", err.Error()))
|
||||||
} else {
|
} else {
|
||||||
m.message = successStyle.Render(fmt.Sprintf("✅ Updated %s", currentSetting.DisplayName))
|
m.message = successStyle.Render(fmt.Sprintf("[OK] Updated %s", currentSetting.DisplayName))
|
||||||
}
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
@@ -516,11 +515,11 @@ func (m SettingsModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
if m.settings[m.cursor].Type == "path" {
|
if m.settings[m.cursor].Type == "path" {
|
||||||
return m.openDirectoryBrowser()
|
return m.openDirectoryBrowser()
|
||||||
} else {
|
} else {
|
||||||
m.message = "❌ Tab key only works on directory path fields"
|
m.message = "[FAIL] Tab key only works on directory path fields"
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
m.message = "❌ Invalid selection"
|
m.message = "[FAIL] Invalid selection"
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -598,18 +597,18 @@ func (m SettingsModel) saveEditedValue() (tea.Model, tea.Cmd) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if setting == nil {
|
if setting == nil {
|
||||||
m.message = errorStyle.Render("❌ Setting not found")
|
m.message = errorStyle.Render("[FAIL] Setting not found")
|
||||||
m.editing = false
|
m.editing = false
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the configuration
|
// Update the configuration
|
||||||
if err := setting.Update(m.config, m.editingValue); err != nil {
|
if err := setting.Update(m.config, m.editingValue); err != nil {
|
||||||
m.message = errorStyle.Render(fmt.Sprintf("❌ %s", err.Error()))
|
m.message = errorStyle.Render(fmt.Sprintf("[FAIL] %s", err.Error()))
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
m.message = successStyle.Render(fmt.Sprintf("✅ Updated %s", setting.DisplayName))
|
m.message = successStyle.Render(fmt.Sprintf("[OK] Updated %s", setting.DisplayName))
|
||||||
m.editing = false
|
m.editing = false
|
||||||
m.editingField = ""
|
m.editingField = ""
|
||||||
m.editingValue = ""
|
m.editingValue = ""
|
||||||
@@ -629,7 +628,7 @@ func (m SettingsModel) resetToDefaults() (tea.Model, tea.Cmd) {
|
|||||||
newConfig.DatabaseType = m.config.DatabaseType
|
newConfig.DatabaseType = m.config.DatabaseType
|
||||||
|
|
||||||
*m.config = *newConfig
|
*m.config = *newConfig
|
||||||
m.message = successStyle.Render("✅ Settings reset to defaults")
|
m.message = successStyle.Render("[OK] Settings reset to defaults")
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
@@ -637,19 +636,19 @@ func (m SettingsModel) resetToDefaults() (tea.Model, tea.Cmd) {
|
|||||||
// saveSettings validates and saves current settings
|
// saveSettings validates and saves current settings
|
||||||
func (m SettingsModel) saveSettings() (tea.Model, tea.Cmd) {
|
func (m SettingsModel) saveSettings() (tea.Model, tea.Cmd) {
|
||||||
if err := m.config.Validate(); err != nil {
|
if err := m.config.Validate(); err != nil {
|
||||||
m.message = errorStyle.Render(fmt.Sprintf("❌ Validation failed: %s", err.Error()))
|
m.message = errorStyle.Render(fmt.Sprintf("[FAIL] Validation failed: %s", err.Error()))
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Optimize CPU settings if auto-detect is enabled
|
// Optimize CPU settings if auto-detect is enabled
|
||||||
if m.config.AutoDetectCores {
|
if m.config.AutoDetectCores {
|
||||||
if err := m.config.OptimizeForCPU(); err != nil {
|
if err := m.config.OptimizeForCPU(); err != nil {
|
||||||
m.message = errorStyle.Render(fmt.Sprintf("❌ CPU optimization failed: %s", err.Error()))
|
m.message = errorStyle.Render(fmt.Sprintf("[FAIL] CPU optimization failed: %s", err.Error()))
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m.message = successStyle.Render("✅ Settings validated and saved")
|
m.message = successStyle.Render("[OK] Settings validated and saved")
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -672,11 +671,11 @@ func (m SettingsModel) cycleDatabaseType() (tea.Model, tea.Cmd) {
|
|||||||
|
|
||||||
// Update config
|
// Update config
|
||||||
if err := m.config.SetDatabaseType(newType); err != nil {
|
if err := m.config.SetDatabaseType(newType); err != nil {
|
||||||
m.message = errorStyle.Render(fmt.Sprintf("❌ Failed to set database type: %s", err.Error()))
|
m.message = errorStyle.Render(fmt.Sprintf("[FAIL] Failed to set database type: %s", err.Error()))
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
m.message = successStyle.Render(fmt.Sprintf("✅ Database type set to %s", m.config.DisplayDatabaseType()))
|
m.message = successStyle.Render(fmt.Sprintf("[OK] Database type set to %s", m.config.DisplayDatabaseType()))
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -689,7 +688,7 @@ func (m SettingsModel) View() string {
|
|||||||
var b strings.Builder
|
var b strings.Builder
|
||||||
|
|
||||||
// Header
|
// Header
|
||||||
header := titleStyle.Render("⚙️ Configuration Settings")
|
header := titleStyle.Render("[CFG] Configuration Settings")
|
||||||
b.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
b.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
||||||
|
|
||||||
// Settings list
|
// Settings list
|
||||||
@@ -711,7 +710,7 @@ func (m SettingsModel) View() string {
|
|||||||
}
|
}
|
||||||
line := fmt.Sprintf("%s %s: %s", cursor, setting.DisplayName, editValue)
|
line := fmt.Sprintf("%s %s: %s", cursor, setting.DisplayName, editValue)
|
||||||
b.WriteString(selectedStyle.Render(line))
|
b.WriteString(selectedStyle.Render(line))
|
||||||
b.WriteString(" ✏️")
|
b.WriteString(" [EDIT]")
|
||||||
} else {
|
} else {
|
||||||
line := fmt.Sprintf("%s %s: %s", cursor, setting.DisplayName, displayValue)
|
line := fmt.Sprintf("%s %s: %s", cursor, setting.DisplayName, displayValue)
|
||||||
b.WriteString(selectedStyle.Render(line))
|
b.WriteString(selectedStyle.Render(line))
|
||||||
@@ -748,7 +747,7 @@ func (m SettingsModel) View() string {
|
|||||||
// Current configuration summary
|
// Current configuration summary
|
||||||
if !m.editing {
|
if !m.editing {
|
||||||
b.WriteString("\n")
|
b.WriteString("\n")
|
||||||
b.WriteString(infoStyle.Render("📋 Current Configuration:"))
|
b.WriteString(infoStyle.Render("[LOG] Current Configuration:"))
|
||||||
b.WriteString("\n")
|
b.WriteString("\n")
|
||||||
|
|
||||||
summary := []string{
|
summary := []string{
|
||||||
@@ -776,16 +775,16 @@ func (m SettingsModel) View() string {
|
|||||||
// Footer with instructions
|
// Footer with instructions
|
||||||
var footer string
|
var footer string
|
||||||
if m.editing {
|
if m.editing {
|
||||||
footer = infoStyle.Render("\n⌨️ Type new value • Enter to save • Esc to cancel")
|
footer = infoStyle.Render("\n[KEYS] Type new value | Enter to save | Esc to cancel")
|
||||||
} else {
|
} else {
|
||||||
if m.browsingDir {
|
if m.browsingDir {
|
||||||
footer = infoStyle.Render("\n⌨️ ↑/↓ navigate directories • Enter open • Space select • Tab/Esc back to settings")
|
footer = infoStyle.Render("\n[KEYS] Up/Down navigate directories | Enter open | Space select | Tab/Esc back to settings")
|
||||||
} else {
|
} else {
|
||||||
// Show different help based on current selection
|
// Show different help based on current selection
|
||||||
if m.cursor >= 0 && m.cursor < len(m.settings) && m.settings[m.cursor].Type == "path" {
|
if m.cursor >= 0 && m.cursor < len(m.settings) && m.settings[m.cursor].Type == "path" {
|
||||||
footer = infoStyle.Render("\n⌨️ ↑/↓ navigate • Enter edit • Tab browse directories • 's' save • 'r' reset • 'q' menu")
|
footer = infoStyle.Render("\n[KEYS] Up/Down navigate | Enter edit | Tab browse directories | 's' save | 'r' reset | 'q' menu")
|
||||||
} else {
|
} else {
|
||||||
footer = infoStyle.Render("\n⌨️ ↑/↓ navigate • Enter edit • 's' save • 'r' reset • 'q' menu • Tab=dirs on path fields only")
|
footer = infoStyle.Render("\n[KEYS] Up/Down navigate | Enter edit | 's' save | 'r' reset | 'q' menu | Tab=dirs on path fields only")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -802,7 +801,7 @@ func (m SettingsModel) openDirectoryBrowser() (tea.Model, tea.Cmd) {
|
|||||||
setting := m.settings[m.cursor]
|
setting := m.settings[m.cursor]
|
||||||
currentValue := setting.Value(m.config)
|
currentValue := setting.Value(m.config)
|
||||||
if currentValue == "" {
|
if currentValue == "" {
|
||||||
currentValue = "/tmp"
|
currentValue = m.config.GetEffectiveWorkDir()
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.dirBrowser == nil {
|
if m.dirBrowser == nil {
|
||||||
|
|||||||
@@ -70,7 +70,8 @@ type statusMsg struct {
|
|||||||
|
|
||||||
func fetchStatus(cfg *config.Config, log logger.Logger) tea.Cmd {
|
func fetchStatus(cfg *config.Config, log logger.Logger) tea.Cmd {
|
||||||
return func() tea.Msg {
|
return func() tea.Msg {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
// 30 seconds for status check - slow networks or SSL negotiation
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
dbClient, err := database.New(cfg, log)
|
dbClient, err := database.New(cfg, log)
|
||||||
@@ -159,25 +160,25 @@ func (m StatusViewModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
func (m StatusViewModel) View() string {
|
func (m StatusViewModel) View() string {
|
||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
header := titleStyle.Render("📊 Database Status & Health Check")
|
header := titleStyle.Render("[STATS] Database Status & Health Check")
|
||||||
s.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
s.WriteString(fmt.Sprintf("\n%s\n\n", header))
|
||||||
|
|
||||||
if m.loading {
|
if m.loading {
|
||||||
spinner := []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
|
spinner := []string{"-", "\\", "|", "/"}
|
||||||
frame := int(time.Now().UnixMilli()/100) % len(spinner)
|
frame := int(time.Now().UnixMilli()/100) % len(spinner)
|
||||||
s.WriteString(fmt.Sprintf("%s Loading status information...\n", spinner[frame]))
|
s.WriteString(fmt.Sprintf("%s Loading status information...\n", spinner[frame]))
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(errorStyle.Render(fmt.Sprintf("❌ Error: %v\n", m.err)))
|
s.WriteString(errorStyle.Render(fmt.Sprintf("[FAIL] Error: %v\n", m.err)))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
} else {
|
} else {
|
||||||
s.WriteString("Connection Status:\n")
|
s.WriteString("Connection Status:\n")
|
||||||
if m.connected {
|
if m.connected {
|
||||||
s.WriteString(successStyle.Render(" ✓ Connected\n"))
|
s.WriteString(successStyle.Render(" [+] Connected\n"))
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(errorStyle.Render(" ✗ Disconnected\n"))
|
s.WriteString(errorStyle.Render(" [-] Disconnected\n"))
|
||||||
}
|
}
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
@@ -192,9 +193,9 @@ func (m StatusViewModel) View() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(successStyle.Render("✓ All systems operational\n"))
|
s.WriteString(successStyle.Render("[+] All systems operational\n"))
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString("\n⌨️ Press any key to return to menu\n")
|
s.WriteString("\n[KEYS] Press any key to return to menu\n")
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -99,8 +99,8 @@ func (pm *PITRManager) EnablePITR(ctx context.Context, archiveDir string) error
|
|||||||
return fmt.Errorf("failed to update postgresql.conf: %w", err)
|
return fmt.Errorf("failed to update postgresql.conf: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pm.log.Info("✅ PITR configuration updated successfully")
|
pm.log.Info("[OK] PITR configuration updated successfully")
|
||||||
pm.log.Warn("⚠️ PostgreSQL restart required for changes to take effect")
|
pm.log.Warn("[WARN] PostgreSQL restart required for changes to take effect")
|
||||||
pm.log.Info("To restart PostgreSQL:")
|
pm.log.Info("To restart PostgreSQL:")
|
||||||
pm.log.Info(" sudo systemctl restart postgresql")
|
pm.log.Info(" sudo systemctl restart postgresql")
|
||||||
pm.log.Info(" OR: sudo pg_ctlcluster <version> <cluster> restart")
|
pm.log.Info(" OR: sudo pg_ctlcluster <version> <cluster> restart")
|
||||||
@@ -132,8 +132,8 @@ func (pm *PITRManager) DisablePITR(ctx context.Context) error {
|
|||||||
return fmt.Errorf("failed to update postgresql.conf: %w", err)
|
return fmt.Errorf("failed to update postgresql.conf: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pm.log.Info("✅ PITR disabled successfully")
|
pm.log.Info("[OK] PITR disabled successfully")
|
||||||
pm.log.Warn("⚠️ PostgreSQL restart required")
|
pm.log.Warn("[WARN] PostgreSQL restart required")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -361,7 +361,7 @@ func (tm *TimelineManager) FormatTimelineTree(history *TimelineHistory) string {
|
|||||||
|
|
||||||
var sb strings.Builder
|
var sb strings.Builder
|
||||||
sb.WriteString("Timeline Branching Structure:\n")
|
sb.WriteString("Timeline Branching Structure:\n")
|
||||||
sb.WriteString("═════════════════════════════\n\n")
|
sb.WriteString("=============================\n\n")
|
||||||
|
|
||||||
// Build tree recursively
|
// Build tree recursively
|
||||||
tm.formatTimelineNode(&sb, history, 1, 0, "")
|
tm.formatTimelineNode(&sb, history, 1, 0, "")
|
||||||
@@ -378,9 +378,9 @@ func (tm *TimelineManager) formatTimelineNode(sb *strings.Builder, history *Time
|
|||||||
|
|
||||||
// Format current node
|
// Format current node
|
||||||
indent := strings.Repeat(" ", depth)
|
indent := strings.Repeat(" ", depth)
|
||||||
marker := "├─"
|
marker := "+-"
|
||||||
if depth == 0 {
|
if depth == 0 {
|
||||||
marker = "●"
|
marker = "*"
|
||||||
}
|
}
|
||||||
|
|
||||||
sb.WriteString(fmt.Sprintf("%s%s Timeline %d", indent, marker, tl.TimelineID))
|
sb.WriteString(fmt.Sprintf("%s%s Timeline %d", indent, marker, tl.TimelineID))
|
||||||
|
|||||||
4
main.go
4
main.go
@@ -16,7 +16,7 @@ import (
|
|||||||
|
|
||||||
// Build information (set by ldflags)
|
// Build information (set by ldflags)
|
||||||
var (
|
var (
|
||||||
version = "3.40.0"
|
version = "3.42.10"
|
||||||
buildTime = "unknown"
|
buildTime = "unknown"
|
||||||
gitCommit = "unknown"
|
gitCommit = "unknown"
|
||||||
)
|
)
|
||||||
@@ -52,7 +52,7 @@ func main() {
|
|||||||
if metrics.GlobalMetrics != nil {
|
if metrics.GlobalMetrics != nil {
|
||||||
avgs := metrics.GlobalMetrics.GetAverages()
|
avgs := metrics.GlobalMetrics.GetAverages()
|
||||||
if ops, ok := avgs["total_operations"].(int); ok && ops > 0 {
|
if ops, ok := avgs["total_operations"].(int); ok && ops > 0 {
|
||||||
fmt.Printf("\n📊 Session Summary: %d operations, %.1f%% success rate\n",
|
fmt.Printf("\n[INFO] Session Summary: %d operations, %.1f%% success rate\n",
|
||||||
ops, avgs["success_rate"])
|
ops, avgs["success_rate"])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
171
scripts/remove_all_unicode.sh
Executable file
171
scripts/remove_all_unicode.sh
Executable file
@@ -0,0 +1,171 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# COMPLETE emoji/Unicode removal - Replace ALL non-ASCII with ASCII equivalents
|
||||||
|
# Date: January 8, 2026
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
echo "[INFO] Starting COMPLETE Unicode->ASCII replacement..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Create backup
|
||||||
|
BACKUP_DIR="backup_unicode_removal_$(date +%Y%m%d_%H%M%S)"
|
||||||
|
mkdir -p "$BACKUP_DIR"
|
||||||
|
echo "[INFO] Creating backup in $BACKUP_DIR..."
|
||||||
|
find . -name "*.go" -type f -not -path "*/vendor/*" -not -path "*/.git/*" -exec bash -c 'mkdir -p "$1/$(dirname "$2")" && cp "$2" "$1/$2"' -- "$BACKUP_DIR" {} \;
|
||||||
|
echo "[OK] Backup created"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Find all affected files
|
||||||
|
echo "[SEARCH] Finding files with Unicode..."
|
||||||
|
FILES=$(find . -name "*.go" -type f -not -path "*/vendor/*" -not -path "*/.git/*")
|
||||||
|
|
||||||
|
PROCESSED=0
|
||||||
|
TOTAL=$(echo "$FILES" | wc -l)
|
||||||
|
|
||||||
|
for file in $FILES; do
|
||||||
|
PROCESSED=$((PROCESSED + 1))
|
||||||
|
|
||||||
|
if ! grep -qP '[\x{80}-\x{FFFF}]' "$file" 2>/dev/null; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "[$PROCESSED/$TOTAL] Processing: $file"
|
||||||
|
|
||||||
|
# Create temp file for atomic replacements
|
||||||
|
TMPFILE="${file}.tmp"
|
||||||
|
cp "$file" "$TMPFILE"
|
||||||
|
|
||||||
|
# Box drawing / decorative (used in TUI borders)
|
||||||
|
sed -i 's/─/-/g' "$TMPFILE"
|
||||||
|
sed -i 's/━/-/g' "$TMPFILE"
|
||||||
|
sed -i 's/│/|/g' "$TMPFILE"
|
||||||
|
sed -i 's/║/|/g' "$TMPFILE"
|
||||||
|
sed -i 's/├/+/g' "$TMPFILE"
|
||||||
|
sed -i 's/└/+/g' "$TMPFILE"
|
||||||
|
sed -i 's/╔/+/g' "$TMPFILE"
|
||||||
|
sed -i 's/╗/+/g' "$TMPFILE"
|
||||||
|
sed -i 's/╚/+/g' "$TMPFILE"
|
||||||
|
sed -i 's/╝/+/g' "$TMPFILE"
|
||||||
|
sed -i 's/╠/+/g' "$TMPFILE"
|
||||||
|
sed -i 's/╣/+/g' "$TMPFILE"
|
||||||
|
sed -i 's/═/=/g' "$TMPFILE"
|
||||||
|
|
||||||
|
# Status symbols
|
||||||
|
sed -i 's/✅/[OK]/g' "$TMPFILE"
|
||||||
|
sed -i 's/❌/[FAIL]/g' "$TMPFILE"
|
||||||
|
sed -i 's/✓/[+]/g' "$TMPFILE"
|
||||||
|
sed -i 's/✗/[-]/g' "$TMPFILE"
|
||||||
|
sed -i 's/⚠️/[WARN]/g' "$TMPFILE"
|
||||||
|
sed -i 's/⚠/[!]/g' "$TMPFILE"
|
||||||
|
sed -i 's/❓/[?]/g' "$TMPFILE"
|
||||||
|
|
||||||
|
# Arrows
|
||||||
|
sed -i 's/←/</g' "$TMPFILE"
|
||||||
|
sed -i 's/→/>/g' "$TMPFILE"
|
||||||
|
sed -i 's/↑/^/g' "$TMPFILE"
|
||||||
|
sed -i 's/↓/v/g' "$TMPFILE"
|
||||||
|
sed -i 's/▲/^/g' "$TMPFILE"
|
||||||
|
sed -i 's/▼/v/g' "$TMPFILE"
|
||||||
|
sed -i 's/▶/>/g' "$TMPFILE"
|
||||||
|
|
||||||
|
# Shapes
|
||||||
|
sed -i 's/●/*\*/g' "$TMPFILE"
|
||||||
|
sed -i 's/○/o/g' "$TMPFILE"
|
||||||
|
sed -i 's/⚪/o/g' "$TMPFILE"
|
||||||
|
sed -i 's/•/-/g' "$TMPFILE"
|
||||||
|
sed -i 's/█/#/g' "$TMPFILE"
|
||||||
|
sed -i 's/▎/|/g' "$TMPFILE"
|
||||||
|
sed -i 's/░/./g' "$TMPFILE"
|
||||||
|
sed -i 's/➖/-/g' "$TMPFILE"
|
||||||
|
|
||||||
|
# Emojis - Info/Data
|
||||||
|
sed -i 's/📊/[INFO]/g' "$TMPFILE"
|
||||||
|
sed -i 's/📋/[LIST]/g' "$TMPFILE"
|
||||||
|
sed -i 's/📁/[DIR]/g' "$TMPFILE"
|
||||||
|
sed -i 's/📦/[PKG]/g' "$TMPFILE"
|
||||||
|
sed -i 's/📜/[LOG]/g' "$TMPFILE"
|
||||||
|
sed -i 's/📭/[EMPTY]/g' "$TMPFILE"
|
||||||
|
sed -i 's/📝/[NOTE]/g' "$TMPFILE"
|
||||||
|
sed -i 's/💡/[TIP]/g' "$TMPFILE"
|
||||||
|
|
||||||
|
# Emojis - Actions/Objects
|
||||||
|
sed -i 's/🎯/[TARGET]/g' "$TMPFILE"
|
||||||
|
sed -i 's/🛡️/[SECURE]/g' "$TMPFILE"
|
||||||
|
sed -i 's/🔒/[LOCK]/g' "$TMPFILE"
|
||||||
|
sed -i 's/🔓/[UNLOCK]/g' "$TMPFILE"
|
||||||
|
sed -i 's/🔍/[SEARCH]/g' "$TMPFILE"
|
||||||
|
sed -i 's/🔀/[SWITCH]/g' "$TMPFILE"
|
||||||
|
sed -i 's/🔥/[FIRE]/g' "$TMPFILE"
|
||||||
|
sed -i 's/💾/[SAVE]/g' "$TMPFILE"
|
||||||
|
sed -i 's/🗄️/[DB]/g' "$TMPFILE"
|
||||||
|
sed -i 's/🗄/[DB]/g' "$TMPFILE"
|
||||||
|
|
||||||
|
# Emojis - Time/Status
|
||||||
|
sed -i 's/⏱️/[TIME]/g' "$TMPFILE"
|
||||||
|
sed -i 's/⏱/[TIME]/g' "$TMPFILE"
|
||||||
|
sed -i 's/⏳/[WAIT]/g' "$TMPFILE"
|
||||||
|
sed -i 's/⏪/[REW]/g' "$TMPFILE"
|
||||||
|
sed -i 's/⏹️/[STOP]/g' "$TMPFILE"
|
||||||
|
sed -i 's/⏹/[STOP]/g' "$TMPFILE"
|
||||||
|
sed -i 's/⟳/[SYNC]/g' "$TMPFILE"
|
||||||
|
|
||||||
|
# Emojis - Cloud
|
||||||
|
sed -i 's/☁️/[CLOUD]/g' "$TMPFILE"
|
||||||
|
sed -i 's/☁/[CLOUD]/g' "$TMPFILE"
|
||||||
|
sed -i 's/📤/[UPLOAD]/g' "$TMPFILE"
|
||||||
|
sed -i 's/📥/[DOWNLOAD]/g' "$TMPFILE"
|
||||||
|
sed -i 's/🗑️/[DELETE]/g' "$TMPFILE"
|
||||||
|
|
||||||
|
# Emojis - Misc
|
||||||
|
sed -i 's/📈/[UP]/g' "$TMPFILE"
|
||||||
|
sed -i 's/📉/[DOWN]/g' "$TMPFILE"
|
||||||
|
sed -i 's/⌨️/[KEY]/g' "$TMPFILE"
|
||||||
|
sed -i 's/⌨/[KEY]/g' "$TMPFILE"
|
||||||
|
sed -i 's/⚙️/[CONFIG]/g' "$TMPFILE"
|
||||||
|
sed -i 's/⚙/[CONFIG]/g' "$TMPFILE"
|
||||||
|
sed -i 's/✏️/[EDIT]/g' "$TMPFILE"
|
||||||
|
sed -i 's/✏/[EDIT]/g' "$TMPFILE"
|
||||||
|
sed -i 's/⚡/[FAST]/g' "$TMPFILE"
|
||||||
|
|
||||||
|
# Spinner characters (braille patterns for loading animations)
|
||||||
|
sed -i 's/⠋/|/g' "$TMPFILE"
|
||||||
|
sed -i 's/⠙/\//g' "$TMPFILE"
|
||||||
|
sed -i 's/⠹/-/g' "$TMPFILE"
|
||||||
|
sed -i 's/⠸/\\/g' "$TMPFILE"
|
||||||
|
sed -i 's/⠼/|/g' "$TMPFILE"
|
||||||
|
sed -i 's/⠴/\//g' "$TMPFILE"
|
||||||
|
sed -i 's/⠦/-/g' "$TMPFILE"
|
||||||
|
sed -i 's/⠧/\\/g' "$TMPFILE"
|
||||||
|
sed -i 's/⠇/|/g' "$TMPFILE"
|
||||||
|
sed -i 's/⠏/\//g' "$TMPFILE"
|
||||||
|
|
||||||
|
# Move temp file over original
|
||||||
|
mv "$TMPFILE" "$file"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[OK] Replacement complete!"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Verify
|
||||||
|
REMAINING=$(grep -roP '[\x{80}-\x{FFFF}]' --include="*.go" . 2>/dev/null | wc -l || echo "0")
|
||||||
|
|
||||||
|
echo "[INFO] Unicode characters remaining: $REMAINING"
|
||||||
|
if [ "$REMAINING" -gt 0 ]; then
|
||||||
|
echo "[WARN] Some Unicode still exists (might be in comments or safe locations)"
|
||||||
|
echo "[INFO] Unique remaining characters:"
|
||||||
|
grep -roP '[\x{80}-\x{FFFF}]' --include="*.go" . 2>/dev/null | grep -oP '[\x{80}-\x{FFFF}]' | sort -u | head -20
|
||||||
|
else
|
||||||
|
echo "[OK] All Unicode characters replaced with ASCII!"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[INFO] Backup: $BACKUP_DIR"
|
||||||
|
echo "[INFO] To restore: cp -r $BACKUP_DIR/* ."
|
||||||
|
echo ""
|
||||||
|
echo "[INFO] Next steps:"
|
||||||
|
echo " 1. go build"
|
||||||
|
echo " 2. go test ./..."
|
||||||
|
echo " 3. Test TUI: ./dbbackup"
|
||||||
|
echo " 4. Commit: git add . && git commit -m 'v3.42.11: Replace all Unicode with ASCII'"
|
||||||
|
echo ""
|
||||||
130
scripts/remove_emojis.sh
Executable file
130
scripts/remove_emojis.sh
Executable file
@@ -0,0 +1,130 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Remove ALL emojis/unicode symbols from Go code and replace with ASCII
|
||||||
|
# Date: January 8, 2026
|
||||||
|
# Issue: 638 lines contain Unicode emojis causing display issues
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
echo "[INFO] Starting emoji removal process..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Find all Go files with emojis (expanded emoji list)
|
||||||
|
echo "[SEARCH] Finding affected files..."
|
||||||
|
FILES=$(find . -name "*.go" -type f -not -path "*/vendor/*" -not -path "*/.git/*" | xargs grep -l -P '[\x{1F000}-\x{1FFFF}]|[\x{2300}-\x{27BF}]|[\x{2600}-\x{26FF}]' 2>/dev/null || true)
|
||||||
|
|
||||||
|
if [ -z "$FILES" ]; then
|
||||||
|
echo "[WARN] No files with emojis found!"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
FILECOUNT=$(echo "$FILES" | wc -l)
|
||||||
|
echo "[INFO] Found $FILECOUNT files containing emojis"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Count total emojis before
|
||||||
|
BEFORE=$(find . -name "*.go" -type f -not -path "*/vendor/*" | xargs grep -oP '[\x{1F000}-\x{1FFFF}]|[\x{2300}-\x{27BF}]|[\x{2600}-\x{26FF}]' 2>/dev/null | wc -l || echo "0")
|
||||||
|
echo "[INFO] Total emojis found: $BEFORE"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Create backup
|
||||||
|
BACKUP_DIR="backup_before_emoji_removal_$(date +%Y%m%d_%H%M%S)"
|
||||||
|
mkdir -p "$BACKUP_DIR"
|
||||||
|
echo "[INFO] Creating backup in $BACKUP_DIR..."
|
||||||
|
for file in $FILES; do
|
||||||
|
mkdir -p "$BACKUP_DIR/$(dirname "$file")"
|
||||||
|
cp "$file" "$BACKUP_DIR/$file"
|
||||||
|
done
|
||||||
|
echo "[OK] Backup created"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Process each file
|
||||||
|
echo "[INFO] Replacing emojis with ASCII equivalents..."
|
||||||
|
PROCESSED=0
|
||||||
|
|
||||||
|
for file in $FILES; do
|
||||||
|
PROCESSED=$((PROCESSED + 1))
|
||||||
|
echo "[$PROCESSED/$FILECOUNT] Processing: $file"
|
||||||
|
|
||||||
|
# Create temp file
|
||||||
|
TMPFILE="${file}.tmp"
|
||||||
|
|
||||||
|
# Status indicators
|
||||||
|
sed 's/✅/[OK]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/❌/[FAIL]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/✓/[+]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/✗/[-]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
|
||||||
|
# Warning symbols (⚠️ has variant selector, handle both)
|
||||||
|
sed 's/⚠️/[WARN]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/⚠/[!]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
|
||||||
|
# Info/Data symbols
|
||||||
|
sed 's/📊/[INFO]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/📋/[LIST]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/📁/[DIR]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/📦/[PKG]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
|
||||||
|
# Target/Security
|
||||||
|
sed 's/🎯/[TARGET]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/🛡️/[SECURE]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/🔒/[LOCK]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/🔓/[UNLOCK]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
|
||||||
|
# Actions
|
||||||
|
sed 's/🔍/[SEARCH]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/⏱️/[TIME]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
|
||||||
|
# Cloud operations (☁️ has variant selector, handle both)
|
||||||
|
sed 's/☁️/[CLOUD]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/☁/[CLOUD]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/📤/[UPLOAD]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/📥/[DOWNLOAD]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/🗑️/[DELETE]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
|
||||||
|
# Other
|
||||||
|
sed 's/📈/[UP]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/📉/[DOWN]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
|
||||||
|
# Additional emojis found
|
||||||
|
sed 's/⌨️/[KEY]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/⌨/[KEY]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/🗄️/[DB]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/🗄/[DB]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/⚙️/[CONFIG]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/⚙/[CONFIG]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/✏️/[EDIT]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
sed 's/✏/[EDIT]/g' "$file" > "$TMPFILE" && mv "$TMPFILE" "$file"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[OK] Replacement complete!"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Count remaining emojis
|
||||||
|
AFTER=$(find . -name "*.go" -type f -not -path "*/vendor/*" | xargs grep -oP '[\x{1F000}-\x{1FFFF}]|[\x{2300}-\x{27BF}]|[\x{2600}-\x{26FF}]' 2>/dev/null | wc -l || echo "0")
|
||||||
|
|
||||||
|
echo "[INFO] Emojis before: $BEFORE"
|
||||||
|
echo "[INFO] Emojis after: $AFTER"
|
||||||
|
echo "[INFO] Emojis removed: $((BEFORE - AFTER))"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if [ "$AFTER" -gt 0 ]; then
|
||||||
|
echo "[WARN] $AFTER emojis still remaining!"
|
||||||
|
echo "[INFO] Listing remaining emojis:"
|
||||||
|
find . -name "*.go" -type f -not -path "*/vendor/*" | xargs grep -nP '[\x{1F000}-\x{1FFFF}]|[\x{2300}-\x{27BF}]|[\x{2600}-\x{26FF}]' 2>/dev/null | head -20
|
||||||
|
else
|
||||||
|
echo "[OK] All emojis successfully removed!"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[INFO] Backup location: $BACKUP_DIR"
|
||||||
|
echo "[INFO] To restore: cp -r $BACKUP_DIR/* ."
|
||||||
|
echo ""
|
||||||
|
echo "[INFO] Next steps:"
|
||||||
|
echo " 1. Build: go build"
|
||||||
|
echo " 2. Test: go test ./..."
|
||||||
|
echo " 3. Manual testing: ./dbbackup status"
|
||||||
|
echo " 4. If OK, commit: git add . && git commit -m 'Replace emojis with ASCII'"
|
||||||
|
echo " 5. If broken, restore: cp -r $BACKUP_DIR/* ."
|
||||||
|
echo ""
|
||||||
|
echo "[OK] Emoji removal script completed!"
|
||||||
Reference in New Issue
Block a user