Compare commits

..

2 Commits

Author SHA1 Message Date
ec33959e3e v3.42.18: Unify archive verification - backup manager uses same checks as restore
All checks were successful
CI/CD / Test (push) Successful in 1m13s
CI/CD / Lint (push) Successful in 1m22s
CI/CD / Build & Release (push) Successful in 3m12s
- verifyArchiveCmd now uses restore.Safety and restore.Diagnoser
- Same validation logic in backup manager verify and restore safety checks
- No more discrepancy between verify showing valid and restore failing
2026-01-08 12:10:45 +01:00
92402f0fdb v3.42.17: Fix systemd service templates - remove invalid --config flag
All checks were successful
CI/CD / Test (push) Successful in 1m15s
CI/CD / Lint (push) Successful in 1m21s
CI/CD / Build & Release (push) Successful in 3m12s
- Service templates now use WorkingDirectory for config loading
- Config is read from .dbbackup.conf in /var/lib/dbbackup
- Updated SYSTEMD.md documentation to match actual CLI
- Removed non-existent --config flag from ExecStart
2026-01-08 11:57:16 +01:00
5 changed files with 82 additions and 86 deletions

View File

@@ -116,8 +116,9 @@ sudo chmod 755 /usr/local/bin/dbbackup
### Step 2: Create Configuration ### Step 2: Create Configuration
```bash ```bash
# Main configuration # Main configuration in working directory (where service runs from)
sudo tee /etc/dbbackup/dbbackup.conf << 'EOF' # dbbackup reads .dbbackup.conf from WorkingDirectory
sudo tee /var/lib/dbbackup/.dbbackup.conf << 'EOF'
# DBBackup Configuration # DBBackup Configuration
db-type=postgres db-type=postgres
host=localhost host=localhost
@@ -128,6 +129,8 @@ compression=6
retention-days=30 retention-days=30
min-backups=7 min-backups=7
EOF EOF
sudo chown dbbackup:dbbackup /var/lib/dbbackup/.dbbackup.conf
sudo chmod 600 /var/lib/dbbackup/.dbbackup.conf
# Instance credentials (secure permissions) # Instance credentials (secure permissions)
sudo tee /etc/dbbackup/env.d/cluster.conf << 'EOF' sudo tee /etc/dbbackup/env.d/cluster.conf << 'EOF'
@@ -157,13 +160,15 @@ Group=dbbackup
# Load configuration # Load configuration
EnvironmentFile=-/etc/dbbackup/env.d/cluster.conf EnvironmentFile=-/etc/dbbackup/env.d/cluster.conf
# Working directory # Working directory (config is loaded from .dbbackup.conf here)
WorkingDirectory=/var/lib/dbbackup WorkingDirectory=/var/lib/dbbackup
# Execute backup # Execute backup (reads .dbbackup.conf from WorkingDirectory)
ExecStart=/usr/local/bin/dbbackup backup cluster \ ExecStart=/usr/local/bin/dbbackup backup cluster \
--config /etc/dbbackup/dbbackup.conf \
--backup-dir /var/lib/dbbackup/backups \ --backup-dir /var/lib/dbbackup/backups \
--host localhost \
--port 5432 \
--user postgres \
--allow-root --allow-root
# Security hardening # Security hardening
@@ -443,12 +448,12 @@ sudo systemctl status dbbackup-cluster.service
# View detailed error # View detailed error
sudo journalctl -u dbbackup-cluster.service -n 50 --no-pager sudo journalctl -u dbbackup-cluster.service -n 50 --no-pager
# Test manually as dbbackup user # Test manually as dbbackup user (run from working directory with .dbbackup.conf)
sudo -u dbbackup /usr/local/bin/dbbackup backup cluster --config /etc/dbbackup/dbbackup.conf cd /var/lib/dbbackup && sudo -u dbbackup /usr/local/bin/dbbackup backup cluster
# Check permissions # Check permissions
ls -la /var/lib/dbbackup/ ls -la /var/lib/dbbackup/
ls -la /etc/dbbackup/ ls -la /var/lib/dbbackup/.dbbackup.conf
``` ```
### Permission Denied ### Permission Denied

View File

@@ -4,8 +4,8 @@ This directory contains pre-compiled binaries for the DB Backup Tool across mult
## Build Information ## Build Information
- **Version**: 3.42.10 - **Version**: 3.42.10
- **Build Time**: 2026-01-08_09:54:02_UTC - **Build Time**: 2026-01-08_10:59:00_UTC
- **Git Commit**: 83ad62b - **Git Commit**: 92402f0
## Recent Updates (v1.1.0) ## Recent Updates (v1.1.0)
- ✅ Fixed TUI progress display with line-by-line output - ✅ Fixed TUI progress display with line-by-line output

View File

@@ -33,8 +33,11 @@ RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6
# Environment # Environment
EnvironmentFile=-/etc/dbbackup/env.d/cluster.conf EnvironmentFile=-/etc/dbbackup/env.d/cluster.conf
# Working directory (config is loaded from .dbbackup.conf here)
WorkingDirectory=/var/lib/dbbackup
# Execution - cluster backup (all databases) # Execution - cluster backup (all databases)
ExecStart={{.BinaryPath}} backup cluster --config {{.ConfigPath}} ExecStart={{.BinaryPath}} backup cluster --backup-dir {{.BackupDir}}
TimeoutStartSec={{.TimeoutSeconds}} TimeoutStartSec={{.TimeoutSeconds}}
# Post-backup metrics export # Post-backup metrics export

View File

@@ -33,8 +33,11 @@ RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6
# Environment # Environment
EnvironmentFile=-/etc/dbbackup/env.d/%i.conf EnvironmentFile=-/etc/dbbackup/env.d/%i.conf
# Working directory (config is loaded from .dbbackup.conf here)
WorkingDirectory=/var/lib/dbbackup
# Execution # Execution
ExecStart={{.BinaryPath}} backup {{.BackupType}} %i --config {{.ConfigPath}} ExecStart={{.BinaryPath}} backup {{.BackupType}} %i --backup-dir {{.BackupDir}}
TimeoutStartSec={{.TimeoutSeconds}} TimeoutStartSec={{.TimeoutSeconds}}
# Post-backup metrics export # Post-backup metrics export

View File

@@ -4,7 +4,6 @@ import (
"context" "context"
"fmt" "fmt"
"os" "os"
"os/exec"
"strings" "strings"
"time" "time"
@@ -12,6 +11,7 @@ import (
"dbbackup/internal/config" "dbbackup/internal/config"
"dbbackup/internal/logger" "dbbackup/internal/logger"
"dbbackup/internal/restore"
) )
// OperationState represents the current operation state // OperationState represents the current operation state
@@ -349,85 +349,70 @@ func (m BackupManagerModel) View() string {
return s.String() return s.String()
} }
// verifyArchiveCmd runs actual archive verification // verifyArchiveCmd runs the SAME verification as restore safety checks
// This ensures consistency between backup manager verify and restore preview
func verifyArchiveCmd(archive ArchiveInfo) tea.Cmd { func verifyArchiveCmd(archive ArchiveInfo) tea.Cmd {
return func() tea.Msg { return func() tea.Msg {
// Determine verification method based on format var issues []string
var valid bool
var details string
var err error
switch { // 1. Run the same archive integrity check as restore
case strings.HasSuffix(archive.Path, ".tar.gz") || strings.HasSuffix(archive.Path, ".tgz"): safety := restore.NewSafety(nil, nil) // Doesn't need config/log for validation
// Verify tar.gz archive if err := safety.ValidateArchive(archive.Path); err != nil {
cmd := exec.Command("tar", "-tzf", archive.Path) return verifyResultMsg{
output, cmdErr := cmd.CombinedOutput() archive: archive.Name,
if cmdErr != nil { valid: false,
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "Archive corrupt or incomplete"} err: nil,
details: fmt.Sprintf("Archive integrity: %v", err),
} }
lines := strings.Split(string(output), "\n")
fileCount := 0
for _, l := range lines {
if l != "" {
fileCount++
}
}
valid = true
details = fmt.Sprintf("%d files in archive", fileCount)
case strings.HasSuffix(archive.Path, ".dump") || strings.HasSuffix(archive.Path, ".sql"):
// Verify PostgreSQL dump with pg_restore --list
cmd := exec.Command("pg_restore", "--list", archive.Path)
output, cmdErr := cmd.CombinedOutput()
if cmdErr != nil {
// Try as plain SQL
if strings.HasSuffix(archive.Path, ".sql") {
// Just check file is readable and has content
fi, statErr := os.Stat(archive.Path)
if statErr == nil && fi.Size() > 0 {
valid = true
details = "Plain SQL file readable"
} else {
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "File empty or unreadable"}
}
} else {
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "pg_restore cannot read dump"}
}
} else {
lines := strings.Split(string(output), "\n")
objectCount := 0
for _, l := range lines {
if l != "" && !strings.HasPrefix(l, ";") {
objectCount++
}
}
valid = true
details = fmt.Sprintf("%d objects in dump", objectCount)
}
case strings.HasSuffix(archive.Path, ".sql.gz"):
// Verify gzipped SQL
cmd := exec.Command("gzip", "-t", archive.Path)
if cmdErr := cmd.Run(); cmdErr != nil {
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "Gzip archive corrupt"}
}
valid = true
details = "Gzip integrity OK"
default:
// Unknown format - just check file exists and has size
fi, statErr := os.Stat(archive.Path)
if statErr != nil {
return verifyResultMsg{archive: archive.Name, valid: false, err: statErr, details: "Cannot access file"}
}
if fi.Size() == 0 {
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "File is empty"}
}
valid = true
details = "File exists and has content"
} }
return verifyResultMsg{archive: archive.Name, valid: valid, err: err, details: details} // 2. Run the same deep diagnosis as restore
diagnoser := restore.NewDiagnoser(nil, false)
diagResult, diagErr := diagnoser.DiagnoseFile(archive.Path)
if diagErr != nil {
return verifyResultMsg{
archive: archive.Name,
valid: false,
err: diagErr,
details: "Cannot diagnose archive",
}
}
if !diagResult.IsValid {
// Collect error details
if diagResult.IsTruncated {
issues = append(issues, "TRUNCATED")
}
if diagResult.IsCorrupted {
issues = append(issues, "CORRUPTED")
}
if len(diagResult.Errors) > 0 {
issues = append(issues, diagResult.Errors[0])
}
return verifyResultMsg{
archive: archive.Name,
valid: false,
err: nil,
details: strings.Join(issues, "; "),
}
}
// Build success details
details := "Verified"
if diagResult.Details != nil {
if diagResult.Details.TableCount > 0 {
details = fmt.Sprintf("%d databases in archive", diagResult.Details.TableCount)
} else if diagResult.Details.PgRestoreListable {
details = "pg_restore verified"
}
}
// Add any warnings
if len(diagResult.Warnings) > 0 {
details += fmt.Sprintf(" [%d warnings]", len(diagResult.Warnings))
}
return verifyResultMsg{archive: archive.Name, valid: true, err: nil, details: details}
} }
} }