Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| ec33959e3e | |||
| 92402f0fdb |
21
SYSTEMD.md
21
SYSTEMD.md
@@ -116,8 +116,9 @@ sudo chmod 755 /usr/local/bin/dbbackup
|
||||
### Step 2: Create Configuration
|
||||
|
||||
```bash
|
||||
# Main configuration
|
||||
sudo tee /etc/dbbackup/dbbackup.conf << 'EOF'
|
||||
# Main configuration in working directory (where service runs from)
|
||||
# dbbackup reads .dbbackup.conf from WorkingDirectory
|
||||
sudo tee /var/lib/dbbackup/.dbbackup.conf << 'EOF'
|
||||
# DBBackup Configuration
|
||||
db-type=postgres
|
||||
host=localhost
|
||||
@@ -128,6 +129,8 @@ compression=6
|
||||
retention-days=30
|
||||
min-backups=7
|
||||
EOF
|
||||
sudo chown dbbackup:dbbackup /var/lib/dbbackup/.dbbackup.conf
|
||||
sudo chmod 600 /var/lib/dbbackup/.dbbackup.conf
|
||||
|
||||
# Instance credentials (secure permissions)
|
||||
sudo tee /etc/dbbackup/env.d/cluster.conf << 'EOF'
|
||||
@@ -157,13 +160,15 @@ Group=dbbackup
|
||||
# Load configuration
|
||||
EnvironmentFile=-/etc/dbbackup/env.d/cluster.conf
|
||||
|
||||
# Working directory
|
||||
# Working directory (config is loaded from .dbbackup.conf here)
|
||||
WorkingDirectory=/var/lib/dbbackup
|
||||
|
||||
# Execute backup
|
||||
# Execute backup (reads .dbbackup.conf from WorkingDirectory)
|
||||
ExecStart=/usr/local/bin/dbbackup backup cluster \
|
||||
--config /etc/dbbackup/dbbackup.conf \
|
||||
--backup-dir /var/lib/dbbackup/backups \
|
||||
--host localhost \
|
||||
--port 5432 \
|
||||
--user postgres \
|
||||
--allow-root
|
||||
|
||||
# Security hardening
|
||||
@@ -443,12 +448,12 @@ sudo systemctl status dbbackup-cluster.service
|
||||
# View detailed error
|
||||
sudo journalctl -u dbbackup-cluster.service -n 50 --no-pager
|
||||
|
||||
# Test manually as dbbackup user
|
||||
sudo -u dbbackup /usr/local/bin/dbbackup backup cluster --config /etc/dbbackup/dbbackup.conf
|
||||
# Test manually as dbbackup user (run from working directory with .dbbackup.conf)
|
||||
cd /var/lib/dbbackup && sudo -u dbbackup /usr/local/bin/dbbackup backup cluster
|
||||
|
||||
# Check permissions
|
||||
ls -la /var/lib/dbbackup/
|
||||
ls -la /etc/dbbackup/
|
||||
ls -la /var/lib/dbbackup/.dbbackup.conf
|
||||
```
|
||||
|
||||
### Permission Denied
|
||||
|
||||
@@ -4,8 +4,8 @@ This directory contains pre-compiled binaries for the DB Backup Tool across mult
|
||||
|
||||
## Build Information
|
||||
- **Version**: 3.42.10
|
||||
- **Build Time**: 2026-01-08_09:54:02_UTC
|
||||
- **Git Commit**: 83ad62b
|
||||
- **Build Time**: 2026-01-08_10:59:00_UTC
|
||||
- **Git Commit**: 92402f0
|
||||
|
||||
## Recent Updates (v1.1.0)
|
||||
- ✅ Fixed TUI progress display with line-by-line output
|
||||
|
||||
@@ -33,8 +33,11 @@ RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6
|
||||
# Environment
|
||||
EnvironmentFile=-/etc/dbbackup/env.d/cluster.conf
|
||||
|
||||
# Working directory (config is loaded from .dbbackup.conf here)
|
||||
WorkingDirectory=/var/lib/dbbackup
|
||||
|
||||
# Execution - cluster backup (all databases)
|
||||
ExecStart={{.BinaryPath}} backup cluster --config {{.ConfigPath}}
|
||||
ExecStart={{.BinaryPath}} backup cluster --backup-dir {{.BackupDir}}
|
||||
TimeoutStartSec={{.TimeoutSeconds}}
|
||||
|
||||
# Post-backup metrics export
|
||||
|
||||
@@ -33,8 +33,11 @@ RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6
|
||||
# Environment
|
||||
EnvironmentFile=-/etc/dbbackup/env.d/%i.conf
|
||||
|
||||
# Working directory (config is loaded from .dbbackup.conf here)
|
||||
WorkingDirectory=/var/lib/dbbackup
|
||||
|
||||
# Execution
|
||||
ExecStart={{.BinaryPath}} backup {{.BackupType}} %i --config {{.ConfigPath}}
|
||||
ExecStart={{.BinaryPath}} backup {{.BackupType}} %i --backup-dir {{.BackupDir}}
|
||||
TimeoutStartSec={{.TimeoutSeconds}}
|
||||
|
||||
# Post-backup metrics export
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -12,6 +11,7 @@ import (
|
||||
|
||||
"dbbackup/internal/config"
|
||||
"dbbackup/internal/logger"
|
||||
"dbbackup/internal/restore"
|
||||
)
|
||||
|
||||
// OperationState represents the current operation state
|
||||
@@ -349,85 +349,70 @@ func (m BackupManagerModel) View() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// verifyArchiveCmd runs actual archive verification
|
||||
// verifyArchiveCmd runs the SAME verification as restore safety checks
|
||||
// This ensures consistency between backup manager verify and restore preview
|
||||
func verifyArchiveCmd(archive ArchiveInfo) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
// Determine verification method based on format
|
||||
var valid bool
|
||||
var details string
|
||||
var err error
|
||||
var issues []string
|
||||
|
||||
switch {
|
||||
case strings.HasSuffix(archive.Path, ".tar.gz") || strings.HasSuffix(archive.Path, ".tgz"):
|
||||
// Verify tar.gz archive
|
||||
cmd := exec.Command("tar", "-tzf", archive.Path)
|
||||
output, cmdErr := cmd.CombinedOutput()
|
||||
if cmdErr != nil {
|
||||
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "Archive corrupt or incomplete"}
|
||||
// 1. Run the same archive integrity check as restore
|
||||
safety := restore.NewSafety(nil, nil) // Doesn't need config/log for validation
|
||||
if err := safety.ValidateArchive(archive.Path); err != nil {
|
||||
return verifyResultMsg{
|
||||
archive: archive.Name,
|
||||
valid: false,
|
||||
err: nil,
|
||||
details: fmt.Sprintf("Archive integrity: %v", err),
|
||||
}
|
||||
lines := strings.Split(string(output), "\n")
|
||||
fileCount := 0
|
||||
for _, l := range lines {
|
||||
if l != "" {
|
||||
fileCount++
|
||||
}
|
||||
}
|
||||
valid = true
|
||||
details = fmt.Sprintf("%d files in archive", fileCount)
|
||||
|
||||
case strings.HasSuffix(archive.Path, ".dump") || strings.HasSuffix(archive.Path, ".sql"):
|
||||
// Verify PostgreSQL dump with pg_restore --list
|
||||
cmd := exec.Command("pg_restore", "--list", archive.Path)
|
||||
output, cmdErr := cmd.CombinedOutput()
|
||||
if cmdErr != nil {
|
||||
// Try as plain SQL
|
||||
if strings.HasSuffix(archive.Path, ".sql") {
|
||||
// Just check file is readable and has content
|
||||
fi, statErr := os.Stat(archive.Path)
|
||||
if statErr == nil && fi.Size() > 0 {
|
||||
valid = true
|
||||
details = "Plain SQL file readable"
|
||||
} else {
|
||||
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "File empty or unreadable"}
|
||||
}
|
||||
} else {
|
||||
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "pg_restore cannot read dump"}
|
||||
}
|
||||
} else {
|
||||
lines := strings.Split(string(output), "\n")
|
||||
objectCount := 0
|
||||
for _, l := range lines {
|
||||
if l != "" && !strings.HasPrefix(l, ";") {
|
||||
objectCount++
|
||||
}
|
||||
}
|
||||
valid = true
|
||||
details = fmt.Sprintf("%d objects in dump", objectCount)
|
||||
}
|
||||
|
||||
case strings.HasSuffix(archive.Path, ".sql.gz"):
|
||||
// Verify gzipped SQL
|
||||
cmd := exec.Command("gzip", "-t", archive.Path)
|
||||
if cmdErr := cmd.Run(); cmdErr != nil {
|
||||
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "Gzip archive corrupt"}
|
||||
// 2. Run the same deep diagnosis as restore
|
||||
diagnoser := restore.NewDiagnoser(nil, false)
|
||||
diagResult, diagErr := diagnoser.DiagnoseFile(archive.Path)
|
||||
if diagErr != nil {
|
||||
return verifyResultMsg{
|
||||
archive: archive.Name,
|
||||
valid: false,
|
||||
err: diagErr,
|
||||
details: "Cannot diagnose archive",
|
||||
}
|
||||
valid = true
|
||||
details = "Gzip integrity OK"
|
||||
|
||||
default:
|
||||
// Unknown format - just check file exists and has size
|
||||
fi, statErr := os.Stat(archive.Path)
|
||||
if statErr != nil {
|
||||
return verifyResultMsg{archive: archive.Name, valid: false, err: statErr, details: "Cannot access file"}
|
||||
}
|
||||
if fi.Size() == 0 {
|
||||
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "File is empty"}
|
||||
}
|
||||
valid = true
|
||||
details = "File exists and has content"
|
||||
}
|
||||
|
||||
return verifyResultMsg{archive: archive.Name, valid: valid, err: err, details: details}
|
||||
if !diagResult.IsValid {
|
||||
// Collect error details
|
||||
if diagResult.IsTruncated {
|
||||
issues = append(issues, "TRUNCATED")
|
||||
}
|
||||
if diagResult.IsCorrupted {
|
||||
issues = append(issues, "CORRUPTED")
|
||||
}
|
||||
if len(diagResult.Errors) > 0 {
|
||||
issues = append(issues, diagResult.Errors[0])
|
||||
}
|
||||
return verifyResultMsg{
|
||||
archive: archive.Name,
|
||||
valid: false,
|
||||
err: nil,
|
||||
details: strings.Join(issues, "; "),
|
||||
}
|
||||
}
|
||||
|
||||
// Build success details
|
||||
details := "Verified"
|
||||
if diagResult.Details != nil {
|
||||
if diagResult.Details.TableCount > 0 {
|
||||
details = fmt.Sprintf("%d databases in archive", diagResult.Details.TableCount)
|
||||
} else if diagResult.Details.PgRestoreListable {
|
||||
details = "pg_restore verified"
|
||||
}
|
||||
}
|
||||
|
||||
// Add any warnings
|
||||
if len(diagResult.Warnings) > 0 {
|
||||
details += fmt.Sprintf(" [%d warnings]", len(diagResult.Warnings))
|
||||
}
|
||||
|
||||
return verifyResultMsg{archive: archive.Name, valid: true, err: nil, details: details}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user