feat: Add enterprise DBA features for production reliability

New features implemented:

1. Backup Catalog (internal/catalog/)
   - SQLite-based backup tracking
   - Gap detection and RPO monitoring
   - Search and statistics
   - Filesystem sync

2. DR Drill Testing (internal/drill/)
   - Automated restore testing in Docker containers
   - Database validation with custom queries
   - Catalog integration for drill-tested status

3. Smart Notifications (internal/notify/)
   - Event batching with configurable intervals
   - Time-based escalation policies
   - HTML/text/Slack templates

4. Compliance Reports (internal/report/)
   - SOC2, GDPR, HIPAA, PCI-DSS, ISO27001 frameworks
   - Evidence collection from catalog
   - JSON, Markdown, HTML output formats

5. RTO/RPO Calculator (internal/rto/)
   - Recovery objective analysis
   - RTO breakdown by phase
   - Recommendations for improvement

6. Replica-Aware Backup (internal/replica/)
   - Topology detection for PostgreSQL/MySQL
   - Automatic replica selection
   - Configurable selection strategies

7. Parallel Table Backup (internal/parallel/)
   - Concurrent table dumps
   - Worker pool with progress tracking
   - Large table optimization

8. MySQL/MariaDB PITR (internal/pitr/)
   - Binary log parsing and replay
   - Point-in-time recovery support
   - Transaction filtering

CLI commands added: catalog, drill, report, rto

All changes support the goal: reliable 3 AM database recovery.
This commit is contained in:
2025-12-13 20:28:55 +01:00
parent d0d83b61ef
commit f69bfe7071
34 changed files with 13469 additions and 41 deletions

725
cmd/catalog.go Normal file
View File

@@ -0,0 +1,725 @@
package cmd
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"dbbackup/internal/catalog"
"github.com/spf13/cobra"
)
var (
catalogDBPath string
catalogFormat string
catalogLimit int
catalogDatabase string
catalogStartDate string
catalogEndDate string
catalogInterval string
catalogVerbose bool
)
// catalogCmd represents the catalog command group
var catalogCmd = &cobra.Command{
Use: "catalog",
Short: "Backup catalog management",
Long: `Manage the backup catalog - a SQLite database tracking all backups.
The catalog provides:
- Searchable history of all backups
- Gap detection for backup schedules
- Statistics and reporting
- Integration with DR drill testing
Examples:
# Sync backups from a directory
dbbackup catalog sync /backups
# List all backups
dbbackup catalog list
# Show catalog statistics
dbbackup catalog stats
# Detect gaps in backup schedule
dbbackup catalog gaps mydb --interval 24h
# Search backups
dbbackup catalog search --database mydb --after 2024-01-01`,
}
// catalogSyncCmd syncs backups from directory
var catalogSyncCmd = &cobra.Command{
Use: "sync [directory]",
Short: "Sync backups from directory into catalog",
Long: `Scan a directory for backup files and import them into the catalog.
This command:
- Finds all .meta.json files
- Imports backup metadata into SQLite catalog
- Detects removed backups
- Updates changed entries
Examples:
# Sync from backup directory
dbbackup catalog sync /backups
# Sync with verbose output
dbbackup catalog sync /backups --verbose`,
Args: cobra.MinimumNArgs(1),
RunE: runCatalogSync,
}
// catalogListCmd lists backups
var catalogListCmd = &cobra.Command{
Use: "list",
Short: "List backups in catalog",
Long: `List all backups in the catalog with optional filtering.
Examples:
# List all backups
dbbackup catalog list
# List backups for specific database
dbbackup catalog list --database mydb
# List last 10 backups
dbbackup catalog list --limit 10
# Output as JSON
dbbackup catalog list --format json`,
RunE: runCatalogList,
}
// catalogStatsCmd shows statistics
var catalogStatsCmd = &cobra.Command{
Use: "stats",
Short: "Show catalog statistics",
Long: `Display comprehensive backup statistics.
Shows:
- Total backup count and size
- Backups by database
- Backups by type and status
- Verification and drill test coverage
Examples:
# Show overall stats
dbbackup catalog stats
# Stats for specific database
dbbackup catalog stats --database mydb
# Output as JSON
dbbackup catalog stats --format json`,
RunE: runCatalogStats,
}
// catalogGapsCmd detects schedule gaps
var catalogGapsCmd = &cobra.Command{
Use: "gaps [database]",
Short: "Detect gaps in backup schedule",
Long: `Analyze backup history and detect schedule gaps.
This helps identify:
- Missed backups
- Schedule irregularities
- RPO violations
Examples:
# Check all databases for gaps (24h expected interval)
dbbackup catalog gaps
# Check specific database with custom interval
dbbackup catalog gaps mydb --interval 6h
# Check gaps in date range
dbbackup catalog gaps --after 2024-01-01 --before 2024-02-01`,
RunE: runCatalogGaps,
}
// catalogSearchCmd searches backups
var catalogSearchCmd = &cobra.Command{
Use: "search",
Short: "Search backups in catalog",
Long: `Search for backups matching specific criteria.
Examples:
# Search by database name (supports wildcards)
dbbackup catalog search --database "prod*"
# Search by date range
dbbackup catalog search --after 2024-01-01 --before 2024-02-01
# Search verified backups only
dbbackup catalog search --verified
# Search encrypted backups
dbbackup catalog search --encrypted`,
RunE: runCatalogSearch,
}
// catalogInfoCmd shows entry details
var catalogInfoCmd = &cobra.Command{
Use: "info [backup-path]",
Short: "Show detailed info for a backup",
Long: `Display detailed information about a specific backup.
Examples:
# Show info by path
dbbackup catalog info /backups/mydb_20240115.dump.gz`,
Args: cobra.ExactArgs(1),
RunE: runCatalogInfo,
}
func init() {
rootCmd.AddCommand(catalogCmd)
// Default catalog path
defaultCatalogPath := filepath.Join(getDefaultConfigDir(), "catalog.db")
// Global catalog flags
catalogCmd.PersistentFlags().StringVar(&catalogDBPath, "catalog-db", defaultCatalogPath,
"Path to catalog SQLite database")
catalogCmd.PersistentFlags().StringVar(&catalogFormat, "format", "table",
"Output format: table, json, csv")
// Add subcommands
catalogCmd.AddCommand(catalogSyncCmd)
catalogCmd.AddCommand(catalogListCmd)
catalogCmd.AddCommand(catalogStatsCmd)
catalogCmd.AddCommand(catalogGapsCmd)
catalogCmd.AddCommand(catalogSearchCmd)
catalogCmd.AddCommand(catalogInfoCmd)
// Sync flags
catalogSyncCmd.Flags().BoolVarP(&catalogVerbose, "verbose", "v", false, "Show detailed output")
// List flags
catalogListCmd.Flags().IntVar(&catalogLimit, "limit", 50, "Maximum entries to show")
catalogListCmd.Flags().StringVar(&catalogDatabase, "database", "", "Filter by database name")
// Stats flags
catalogStatsCmd.Flags().StringVar(&catalogDatabase, "database", "", "Show stats for specific database")
// Gaps flags
catalogGapsCmd.Flags().StringVar(&catalogInterval, "interval", "24h", "Expected backup interval")
catalogGapsCmd.Flags().StringVar(&catalogStartDate, "after", "", "Start date (YYYY-MM-DD)")
catalogGapsCmd.Flags().StringVar(&catalogEndDate, "before", "", "End date (YYYY-MM-DD)")
// Search flags
catalogSearchCmd.Flags().StringVar(&catalogDatabase, "database", "", "Filter by database name (supports wildcards)")
catalogSearchCmd.Flags().StringVar(&catalogStartDate, "after", "", "Backups after date (YYYY-MM-DD)")
catalogSearchCmd.Flags().StringVar(&catalogEndDate, "before", "", "Backups before date (YYYY-MM-DD)")
catalogSearchCmd.Flags().IntVar(&catalogLimit, "limit", 100, "Maximum results")
catalogSearchCmd.Flags().Bool("verified", false, "Only verified backups")
catalogSearchCmd.Flags().Bool("encrypted", false, "Only encrypted backups")
catalogSearchCmd.Flags().Bool("drill-tested", false, "Only drill-tested backups")
}
func getDefaultConfigDir() string {
home, _ := os.UserHomeDir()
return filepath.Join(home, ".dbbackup")
}
func openCatalog() (*catalog.SQLiteCatalog, error) {
return catalog.NewSQLiteCatalog(catalogDBPath)
}
func runCatalogSync(cmd *cobra.Command, args []string) error {
dir := args[0]
// Validate directory
info, err := os.Stat(dir)
if err != nil {
return fmt.Errorf("directory not found: %s", dir)
}
if !info.IsDir() {
return fmt.Errorf("not a directory: %s", dir)
}
absDir, _ := filepath.Abs(dir)
cat, err := openCatalog()
if err != nil {
return err
}
defer cat.Close()
fmt.Printf("📁 Syncing backups from: %s\n", absDir)
fmt.Printf("📊 Catalog database: %s\n\n", catalogDBPath)
ctx := context.Background()
result, err := cat.SyncFromDirectory(ctx, absDir)
if err != nil {
return err
}
// Update last sync time
cat.SetLastSync(ctx)
// Show results
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
fmt.Printf(" Sync Results\n")
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
fmt.Printf(" ✅ Added: %d\n", result.Added)
fmt.Printf(" 🔄 Updated: %d\n", result.Updated)
fmt.Printf(" 🗑️ Removed: %d\n", result.Removed)
if result.Errors > 0 {
fmt.Printf(" ❌ Errors: %d\n", result.Errors)
}
fmt.Printf(" ⏱️ Duration: %.2fs\n", result.Duration)
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
// Show details if verbose
if catalogVerbose && len(result.Details) > 0 {
fmt.Printf("\nDetails:\n")
for _, detail := range result.Details {
fmt.Printf(" %s\n", detail)
}
}
return nil
}
func runCatalogList(cmd *cobra.Command, args []string) error {
cat, err := openCatalog()
if err != nil {
return err
}
defer cat.Close()
ctx := context.Background()
query := &catalog.SearchQuery{
Database: catalogDatabase,
Limit: catalogLimit,
OrderBy: "created_at",
OrderDesc: true,
}
entries, err := cat.Search(ctx, query)
if err != nil {
return err
}
if len(entries) == 0 {
fmt.Println("No backups in catalog. Run 'dbbackup catalog sync <directory>' to import backups.")
return nil
}
if catalogFormat == "json" {
data, _ := json.MarshalIndent(entries, "", " ")
fmt.Println(string(data))
return nil
}
// Table format
fmt.Printf("%-30s %-12s %-10s %-20s %-10s %s\n",
"DATABASE", "TYPE", "SIZE", "CREATED", "STATUS", "PATH")
fmt.Println(strings.Repeat("─", 120))
for _, entry := range entries {
dbName := truncateString(entry.Database, 28)
backupPath := truncateString(filepath.Base(entry.BackupPath), 40)
status := string(entry.Status)
if entry.VerifyValid != nil && *entry.VerifyValid {
status = "✓ verified"
}
if entry.DrillSuccess != nil && *entry.DrillSuccess {
status = "✓ tested"
}
fmt.Printf("%-30s %-12s %-10s %-20s %-10s %s\n",
dbName,
entry.DatabaseType,
catalog.FormatSize(entry.SizeBytes),
entry.CreatedAt.Format("2006-01-02 15:04"),
status,
backupPath,
)
}
fmt.Printf("\nShowing %d of %d total backups\n", len(entries), len(entries))
return nil
}
func runCatalogStats(cmd *cobra.Command, args []string) error {
cat, err := openCatalog()
if err != nil {
return err
}
defer cat.Close()
ctx := context.Background()
var stats *catalog.Stats
if catalogDatabase != "" {
stats, err = cat.StatsByDatabase(ctx, catalogDatabase)
} else {
stats, err = cat.Stats(ctx)
}
if err != nil {
return err
}
if catalogFormat == "json" {
data, _ := json.MarshalIndent(stats, "", " ")
fmt.Println(string(data))
return nil
}
// Table format
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
if catalogDatabase != "" {
fmt.Printf(" Catalog Statistics: %s\n", catalogDatabase)
} else {
fmt.Printf(" Catalog Statistics\n")
}
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\n")
fmt.Printf("📊 Total Backups: %d\n", stats.TotalBackups)
fmt.Printf("💾 Total Size: %s\n", stats.TotalSizeHuman)
fmt.Printf("📏 Average Size: %s\n", catalog.FormatSize(stats.AvgSize))
fmt.Printf("⏱️ Average Duration: %.1fs\n", stats.AvgDuration)
fmt.Printf("✅ Verified: %d\n", stats.VerifiedCount)
fmt.Printf("🧪 Drill Tested: %d\n", stats.DrillTestedCount)
if stats.OldestBackup != nil {
fmt.Printf("📅 Oldest Backup: %s\n", stats.OldestBackup.Format("2006-01-02 15:04"))
}
if stats.NewestBackup != nil {
fmt.Printf("📅 Newest Backup: %s\n", stats.NewestBackup.Format("2006-01-02 15:04"))
}
if len(stats.ByDatabase) > 0 && catalogDatabase == "" {
fmt.Printf("\n📁 By Database:\n")
for db, count := range stats.ByDatabase {
fmt.Printf(" %-30s %d\n", db, count)
}
}
if len(stats.ByType) > 0 {
fmt.Printf("\n📦 By Type:\n")
for t, count := range stats.ByType {
fmt.Printf(" %-15s %d\n", t, count)
}
}
if len(stats.ByStatus) > 0 {
fmt.Printf("\n📋 By Status:\n")
for s, count := range stats.ByStatus {
fmt.Printf(" %-15s %d\n", s, count)
}
}
fmt.Printf("\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
return nil
}
func runCatalogGaps(cmd *cobra.Command, args []string) error {
cat, err := openCatalog()
if err != nil {
return err
}
defer cat.Close()
ctx := context.Background()
// Parse interval
interval, err := time.ParseDuration(catalogInterval)
if err != nil {
return fmt.Errorf("invalid interval: %w", err)
}
config := &catalog.GapDetectionConfig{
ExpectedInterval: interval,
Tolerance: interval / 4, // 25% tolerance
RPOThreshold: interval * 2, // 2x interval = critical
}
// Parse date range
if catalogStartDate != "" {
t, err := time.Parse("2006-01-02", catalogStartDate)
if err != nil {
return fmt.Errorf("invalid start date: %w", err)
}
config.StartDate = &t
}
if catalogEndDate != "" {
t, err := time.Parse("2006-01-02", catalogEndDate)
if err != nil {
return fmt.Errorf("invalid end date: %w", err)
}
config.EndDate = &t
}
var allGaps map[string][]*catalog.Gap
if len(args) > 0 {
// Specific database
database := args[0]
gaps, err := cat.DetectGaps(ctx, database, config)
if err != nil {
return err
}
if len(gaps) > 0 {
allGaps = map[string][]*catalog.Gap{database: gaps}
}
} else {
// All databases
allGaps, err = cat.DetectAllGaps(ctx, config)
if err != nil {
return err
}
}
if catalogFormat == "json" {
data, _ := json.MarshalIndent(allGaps, "", " ")
fmt.Println(string(data))
return nil
}
if len(allGaps) == 0 {
fmt.Printf("✅ No backup gaps detected (expected interval: %s)\n", interval)
return nil
}
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
fmt.Printf(" Backup Gaps Detected (expected interval: %s)\n", interval)
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\n")
totalGaps := 0
criticalGaps := 0
for database, gaps := range allGaps {
fmt.Printf("📁 %s (%d gaps)\n", database, len(gaps))
for _, gap := range gaps {
totalGaps++
icon := ""
switch gap.Severity {
case catalog.SeverityWarning:
icon = "⚠️"
case catalog.SeverityCritical:
icon = "🚨"
criticalGaps++
}
fmt.Printf(" %s %s\n", icon, gap.Description)
fmt.Printf(" Gap: %s → %s (%s)\n",
gap.GapStart.Format("2006-01-02 15:04"),
gap.GapEnd.Format("2006-01-02 15:04"),
catalog.FormatDuration(gap.Duration))
fmt.Printf(" Expected at: %s\n", gap.ExpectedAt.Format("2006-01-02 15:04"))
}
fmt.Println()
}
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
fmt.Printf("Total: %d gaps detected", totalGaps)
if criticalGaps > 0 {
fmt.Printf(" (%d critical)", criticalGaps)
}
fmt.Println()
return nil
}
func runCatalogSearch(cmd *cobra.Command, args []string) error {
cat, err := openCatalog()
if err != nil {
return err
}
defer cat.Close()
ctx := context.Background()
query := &catalog.SearchQuery{
Database: catalogDatabase,
Limit: catalogLimit,
OrderBy: "created_at",
OrderDesc: true,
}
// Parse date range
if catalogStartDate != "" {
t, err := time.Parse("2006-01-02", catalogStartDate)
if err != nil {
return fmt.Errorf("invalid start date: %w", err)
}
query.StartDate = &t
}
if catalogEndDate != "" {
t, err := time.Parse("2006-01-02", catalogEndDate)
if err != nil {
return fmt.Errorf("invalid end date: %w", err)
}
query.EndDate = &t
}
// Boolean filters
if verified, _ := cmd.Flags().GetBool("verified"); verified {
t := true
query.Verified = &t
}
if encrypted, _ := cmd.Flags().GetBool("encrypted"); encrypted {
t := true
query.Encrypted = &t
}
if drillTested, _ := cmd.Flags().GetBool("drill-tested"); drillTested {
t := true
query.DrillTested = &t
}
entries, err := cat.Search(ctx, query)
if err != nil {
return err
}
if len(entries) == 0 {
fmt.Println("No matching backups found.")
return nil
}
if catalogFormat == "json" {
data, _ := json.MarshalIndent(entries, "", " ")
fmt.Println(string(data))
return nil
}
fmt.Printf("Found %d matching backups:\n\n", len(entries))
for _, entry := range entries {
fmt.Printf("📁 %s\n", entry.Database)
fmt.Printf(" Path: %s\n", entry.BackupPath)
fmt.Printf(" Type: %s | Size: %s | Created: %s\n",
entry.DatabaseType,
catalog.FormatSize(entry.SizeBytes),
entry.CreatedAt.Format("2006-01-02 15:04:05"))
if entry.Encrypted {
fmt.Printf(" 🔒 Encrypted\n")
}
if entry.VerifyValid != nil && *entry.VerifyValid {
fmt.Printf(" ✅ Verified: %s\n", entry.VerifiedAt.Format("2006-01-02 15:04"))
}
if entry.DrillSuccess != nil && *entry.DrillSuccess {
fmt.Printf(" 🧪 Drill Tested: %s\n", entry.DrillTestedAt.Format("2006-01-02 15:04"))
}
fmt.Println()
}
return nil
}
func runCatalogInfo(cmd *cobra.Command, args []string) error {
backupPath := args[0]
cat, err := openCatalog()
if err != nil {
return err
}
defer cat.Close()
ctx := context.Background()
// Try absolute path
absPath, _ := filepath.Abs(backupPath)
entry, err := cat.GetByPath(ctx, absPath)
if err != nil {
return err
}
if entry == nil {
// Try as provided
entry, err = cat.GetByPath(ctx, backupPath)
if err != nil {
return err
}
}
if entry == nil {
return fmt.Errorf("backup not found in catalog: %s", backupPath)
}
if catalogFormat == "json" {
data, _ := json.MarshalIndent(entry, "", " ")
fmt.Println(string(data))
return nil
}
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
fmt.Printf(" Backup Details\n")
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\n")
fmt.Printf("📁 Database: %s\n", entry.Database)
fmt.Printf("🔧 Type: %s\n", entry.DatabaseType)
fmt.Printf("🖥️ Host: %s:%d\n", entry.Host, entry.Port)
fmt.Printf("📂 Path: %s\n", entry.BackupPath)
fmt.Printf("📦 Backup Type: %s\n", entry.BackupType)
fmt.Printf("💾 Size: %s (%d bytes)\n", catalog.FormatSize(entry.SizeBytes), entry.SizeBytes)
fmt.Printf("🔐 SHA256: %s\n", entry.SHA256)
fmt.Printf("📅 Created: %s\n", entry.CreatedAt.Format("2006-01-02 15:04:05 MST"))
fmt.Printf("⏱️ Duration: %.2fs\n", entry.Duration)
fmt.Printf("📋 Status: %s\n", entry.Status)
if entry.Compression != "" {
fmt.Printf("📦 Compression: %s\n", entry.Compression)
}
if entry.Encrypted {
fmt.Printf("🔒 Encrypted: yes\n")
}
if entry.CloudLocation != "" {
fmt.Printf("☁️ Cloud: %s\n", entry.CloudLocation)
}
if entry.RetentionPolicy != "" {
fmt.Printf("📆 Retention: %s\n", entry.RetentionPolicy)
}
fmt.Printf("\n📊 Verification:\n")
if entry.VerifiedAt != nil {
status := "❌ Failed"
if entry.VerifyValid != nil && *entry.VerifyValid {
status = "✅ Valid"
}
fmt.Printf(" Status: %s (checked %s)\n", status, entry.VerifiedAt.Format("2006-01-02 15:04"))
} else {
fmt.Printf(" Status: ⏳ Not verified\n")
}
fmt.Printf("\n🧪 DR Drill Test:\n")
if entry.DrillTestedAt != nil {
status := "❌ Failed"
if entry.DrillSuccess != nil && *entry.DrillSuccess {
status = "✅ Passed"
}
fmt.Printf(" Status: %s (tested %s)\n", status, entry.DrillTestedAt.Format("2006-01-02 15:04"))
} else {
fmt.Printf(" Status: ⏳ Not tested\n")
}
if len(entry.Metadata) > 0 {
fmt.Printf("\n📝 Additional Metadata:\n")
for k, v := range entry.Metadata {
fmt.Printf(" %s: %s\n", k, v)
}
}
fmt.Printf("\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
return nil
}
func truncateString(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen-3] + "..."
}

500
cmd/drill.go Normal file
View File

@@ -0,0 +1,500 @@
package cmd
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"dbbackup/internal/catalog"
"dbbackup/internal/drill"
"github.com/spf13/cobra"
)
var (
drillBackupPath string
drillDatabaseName string
drillDatabaseType string
drillImage string
drillPort int
drillTimeout int
drillRTOTarget int
drillKeepContainer bool
drillOutputDir string
drillFormat string
drillVerbose bool
drillExpectedTables string
drillMinRows int64
drillQueries string
)
// drillCmd represents the drill command group
var drillCmd = &cobra.Command{
Use: "drill",
Short: "Disaster Recovery drill testing",
Long: `Run DR drills to verify backup restorability.
A DR drill:
1. Spins up a temporary Docker container
2. Restores the backup into the container
3. Runs validation queries
4. Generates a detailed report
5. Cleans up the container
This answers the critical question: "Can I restore this backup at 3 AM?"
Examples:
# Run a drill on a PostgreSQL backup
dbbackup drill run backup.dump.gz --database mydb --type postgresql
# Run with validation queries
dbbackup drill run backup.dump.gz --database mydb --type postgresql \
--validate "SELECT COUNT(*) FROM users" \
--min-rows 1000
# Quick test with minimal validation
dbbackup drill quick backup.dump.gz --database mydb
# List all drill containers
dbbackup drill list
# Cleanup old drill containers
dbbackup drill cleanup`,
}
// drillRunCmd runs a DR drill
var drillRunCmd = &cobra.Command{
Use: "run [backup-file]",
Short: "Run a DR drill on a backup",
Long: `Execute a complete DR drill on a backup file.
This will:
1. Pull the appropriate database Docker image
2. Start a temporary container
3. Restore the backup
4. Run validation queries
5. Calculate RTO metrics
6. Generate a report
Examples:
# Basic drill
dbbackup drill run /backups/mydb_20240115.dump.gz --database mydb --type postgresql
# With RTO target (5 minutes)
dbbackup drill run /backups/mydb.dump.gz --database mydb --type postgresql --rto 300
# With expected tables validation
dbbackup drill run /backups/mydb.dump.gz --database mydb --type postgresql \
--tables "users,orders,products"
# Keep container on failure for debugging
dbbackup drill run /backups/mydb.dump.gz --database mydb --type postgresql --keep`,
Args: cobra.ExactArgs(1),
RunE: runDrill,
}
// drillQuickCmd runs a quick test
var drillQuickCmd = &cobra.Command{
Use: "quick [backup-file]",
Short: "Quick restore test with minimal validation",
Long: `Run a quick DR test that only verifies the backup can be restored.
This is faster than a full drill but provides less validation.
Examples:
# Quick test a PostgreSQL backup
dbbackup drill quick /backups/mydb.dump.gz --database mydb --type postgresql
# Quick test a MySQL backup
dbbackup drill quick /backups/mydb.sql.gz --database mydb --type mysql`,
Args: cobra.ExactArgs(1),
RunE: runQuickDrill,
}
// drillListCmd lists drill containers
var drillListCmd = &cobra.Command{
Use: "list",
Short: "List DR drill containers",
Long: `List all Docker containers created by DR drills.
Shows containers that may still be running or stopped from previous drills.`,
RunE: runDrillList,
}
// drillCleanupCmd cleans up drill resources
var drillCleanupCmd = &cobra.Command{
Use: "cleanup [drill-id]",
Short: "Cleanup DR drill containers",
Long: `Remove containers created by DR drills.
If no drill ID is specified, removes all drill containers.
Examples:
# Cleanup all drill containers
dbbackup drill cleanup
# Cleanup specific drill
dbbackup drill cleanup drill_20240115_120000`,
RunE: runDrillCleanup,
}
// drillReportCmd shows a drill report
var drillReportCmd = &cobra.Command{
Use: "report [report-file]",
Short: "Display a DR drill report",
Long: `Display a previously saved DR drill report.
Examples:
# Show report
dbbackup drill report drill_20240115_120000_report.json
# Show as JSON
dbbackup drill report drill_20240115_120000_report.json --format json`,
Args: cobra.ExactArgs(1),
RunE: runDrillReport,
}
func init() {
rootCmd.AddCommand(drillCmd)
// Add subcommands
drillCmd.AddCommand(drillRunCmd)
drillCmd.AddCommand(drillQuickCmd)
drillCmd.AddCommand(drillListCmd)
drillCmd.AddCommand(drillCleanupCmd)
drillCmd.AddCommand(drillReportCmd)
// Run command flags
drillRunCmd.Flags().StringVar(&drillDatabaseName, "database", "", "Target database name (required)")
drillRunCmd.Flags().StringVar(&drillDatabaseType, "type", "", "Database type: postgresql, mysql, mariadb (required)")
drillRunCmd.Flags().StringVar(&drillImage, "image", "", "Docker image (default: auto-detect)")
drillRunCmd.Flags().IntVar(&drillPort, "port", 0, "Host port for container (default: 15432/13306)")
drillRunCmd.Flags().IntVar(&drillTimeout, "timeout", 60, "Container startup timeout in seconds")
drillRunCmd.Flags().IntVar(&drillRTOTarget, "rto", 300, "RTO target in seconds")
drillRunCmd.Flags().BoolVar(&drillKeepContainer, "keep", false, "Keep container after drill")
drillRunCmd.Flags().StringVar(&drillOutputDir, "output", "", "Output directory for reports")
drillRunCmd.Flags().StringVar(&drillFormat, "format", "table", "Output format: table, json")
drillRunCmd.Flags().BoolVarP(&drillVerbose, "verbose", "v", false, "Verbose output")
drillRunCmd.Flags().StringVar(&drillExpectedTables, "tables", "", "Expected tables (comma-separated)")
drillRunCmd.Flags().Int64Var(&drillMinRows, "min-rows", 0, "Minimum expected row count")
drillRunCmd.Flags().StringVar(&drillQueries, "validate", "", "Validation SQL query")
drillRunCmd.MarkFlagRequired("database")
drillRunCmd.MarkFlagRequired("type")
// Quick command flags
drillQuickCmd.Flags().StringVar(&drillDatabaseName, "database", "", "Target database name (required)")
drillQuickCmd.Flags().StringVar(&drillDatabaseType, "type", "", "Database type: postgresql, mysql, mariadb (required)")
drillQuickCmd.Flags().BoolVarP(&drillVerbose, "verbose", "v", false, "Verbose output")
drillQuickCmd.MarkFlagRequired("database")
drillQuickCmd.MarkFlagRequired("type")
// Report command flags
drillReportCmd.Flags().StringVar(&drillFormat, "format", "table", "Output format: table, json")
}
func runDrill(cmd *cobra.Command, args []string) error {
backupPath := args[0]
// Validate backup file exists
absPath, err := filepath.Abs(backupPath)
if err != nil {
return fmt.Errorf("invalid backup path: %w", err)
}
if _, err := os.Stat(absPath); err != nil {
return fmt.Errorf("backup file not found: %s", absPath)
}
// Build drill config
config := drill.DefaultConfig()
config.BackupPath = absPath
config.DatabaseName = drillDatabaseName
config.DatabaseType = drillDatabaseType
config.ContainerImage = drillImage
config.ContainerPort = drillPort
config.ContainerTimeout = drillTimeout
config.MaxRestoreSeconds = drillRTOTarget
config.CleanupOnExit = !drillKeepContainer
config.KeepOnFailure = true
config.OutputDir = drillOutputDir
config.Verbose = drillVerbose
// Parse expected tables
if drillExpectedTables != "" {
config.ExpectedTables = strings.Split(drillExpectedTables, ",")
for i := range config.ExpectedTables {
config.ExpectedTables[i] = strings.TrimSpace(config.ExpectedTables[i])
}
}
// Set minimum row count
config.MinRowCount = drillMinRows
// Add validation query if provided
if drillQueries != "" {
config.ValidationQueries = append(config.ValidationQueries, drill.ValidationQuery{
Name: "Custom Query",
Query: drillQueries,
MustSucceed: true,
})
}
// Create drill engine
engine := drill.NewEngine(log, drillVerbose)
// Run drill
ctx := cmd.Context()
result, err := engine.Run(ctx, config)
if err != nil {
return err
}
// Update catalog if available
updateCatalogWithDrillResult(ctx, absPath, result)
// Output result
if drillFormat == "json" {
data, _ := json.MarshalIndent(result, "", " ")
fmt.Println(string(data))
} else {
printDrillResult(result)
}
if !result.Success {
return fmt.Errorf("drill failed: %s", result.Message)
}
return nil
}
func runQuickDrill(cmd *cobra.Command, args []string) error {
backupPath := args[0]
absPath, err := filepath.Abs(backupPath)
if err != nil {
return fmt.Errorf("invalid backup path: %w", err)
}
if _, err := os.Stat(absPath); err != nil {
return fmt.Errorf("backup file not found: %s", absPath)
}
engine := drill.NewEngine(log, drillVerbose)
ctx := cmd.Context()
result, err := engine.QuickTest(ctx, absPath, drillDatabaseType, drillDatabaseName)
if err != nil {
return err
}
// Update catalog
updateCatalogWithDrillResult(ctx, absPath, result)
printDrillResult(result)
if !result.Success {
return fmt.Errorf("quick test failed: %s", result.Message)
}
return nil
}
func runDrillList(cmd *cobra.Command, args []string) error {
docker := drill.NewDockerManager(false)
ctx := cmd.Context()
containers, err := docker.ListDrillContainers(ctx)
if err != nil {
return err
}
if len(containers) == 0 {
fmt.Println("No drill containers found.")
return nil
}
fmt.Printf("%-15s %-40s %-20s %s\n", "ID", "NAME", "IMAGE", "STATUS")
fmt.Println(strings.Repeat("─", 100))
for _, c := range containers {
fmt.Printf("%-15s %-40s %-20s %s\n",
c.ID[:12],
truncateString(c.Name, 38),
truncateString(c.Image, 18),
c.Status,
)
}
return nil
}
func runDrillCleanup(cmd *cobra.Command, args []string) error {
drillID := ""
if len(args) > 0 {
drillID = args[0]
}
engine := drill.NewEngine(log, true)
ctx := cmd.Context()
if err := engine.Cleanup(ctx, drillID); err != nil {
return err
}
fmt.Println("✅ Cleanup completed")
return nil
}
func runDrillReport(cmd *cobra.Command, args []string) error {
reportPath := args[0]
result, err := drill.LoadResult(reportPath)
if err != nil {
return err
}
if drillFormat == "json" {
data, _ := json.MarshalIndent(result, "", " ")
fmt.Println(string(data))
} else {
printDrillResult(result)
}
return nil
}
func printDrillResult(result *drill.DrillResult) {
fmt.Printf("\n")
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
fmt.Printf(" DR Drill Report: %s\n", result.DrillID)
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\n")
status := "✅ PASSED"
if !result.Success {
status = "❌ FAILED"
} else if result.Status == drill.StatusPartial {
status = "⚠️ PARTIAL"
}
fmt.Printf("📋 Status: %s\n", status)
fmt.Printf("💾 Backup: %s\n", filepath.Base(result.BackupPath))
fmt.Printf("🗄️ Database: %s (%s)\n", result.DatabaseName, result.DatabaseType)
fmt.Printf("⏱️ Duration: %.2fs\n", result.Duration)
fmt.Printf("📅 Started: %s\n", result.StartTime.Format(time.RFC3339))
fmt.Printf("\n")
// Phases
fmt.Printf("📊 Phases:\n")
for _, phase := range result.Phases {
icon := "✅"
if phase.Status == "failed" {
icon = "❌"
} else if phase.Status == "running" {
icon = "🔄"
}
fmt.Printf(" %s %-20s (%.2fs) %s\n", icon, phase.Name, phase.Duration, phase.Message)
}
fmt.Printf("\n")
// Metrics
fmt.Printf("📈 Metrics:\n")
fmt.Printf(" Tables: %d\n", result.TableCount)
fmt.Printf(" Total Rows: %d\n", result.TotalRows)
fmt.Printf(" Restore Time: %.2fs\n", result.RestoreTime)
fmt.Printf(" Validation: %.2fs\n", result.ValidationTime)
if result.QueryTimeAvg > 0 {
fmt.Printf(" Avg Query Time: %.0fms\n", result.QueryTimeAvg)
}
fmt.Printf("\n")
// RTO
fmt.Printf("⏱️ RTO Analysis:\n")
rtoIcon := "✅"
if !result.RTOMet {
rtoIcon = "❌"
}
fmt.Printf(" Actual RTO: %.2fs\n", result.ActualRTO)
fmt.Printf(" Target RTO: %.0fs\n", result.TargetRTO)
fmt.Printf(" RTO Met: %s\n", rtoIcon)
fmt.Printf("\n")
// Validation results
if len(result.ValidationResults) > 0 {
fmt.Printf("🔍 Validation Queries:\n")
for _, vr := range result.ValidationResults {
icon := "✅"
if !vr.Success {
icon = "❌"
}
fmt.Printf(" %s %s: %s\n", icon, vr.Name, vr.Result)
if vr.Error != "" {
fmt.Printf(" Error: %s\n", vr.Error)
}
}
fmt.Printf("\n")
}
// Check results
if len(result.CheckResults) > 0 {
fmt.Printf("✓ Checks:\n")
for _, cr := range result.CheckResults {
icon := "✅"
if !cr.Success {
icon = "❌"
}
fmt.Printf(" %s %s\n", icon, cr.Message)
}
fmt.Printf("\n")
}
// Errors and warnings
if len(result.Errors) > 0 {
fmt.Printf("❌ Errors:\n")
for _, e := range result.Errors {
fmt.Printf(" • %s\n", e)
}
fmt.Printf("\n")
}
if len(result.Warnings) > 0 {
fmt.Printf("⚠️ Warnings:\n")
for _, w := range result.Warnings {
fmt.Printf(" • %s\n", w)
}
fmt.Printf("\n")
}
// Container info
if result.ContainerKept {
fmt.Printf("📦 Container kept: %s\n", result.ContainerID[:12])
fmt.Printf(" Connect with: docker exec -it %s bash\n", result.ContainerID[:12])
fmt.Printf("\n")
}
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
fmt.Printf(" %s\n", result.Message)
fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n")
}
func updateCatalogWithDrillResult(ctx context.Context, backupPath string, result *drill.DrillResult) {
// Try to update the catalog with drill results
cat, err := catalog.NewSQLiteCatalog(catalogDBPath)
if err != nil {
return // Catalog not available, skip
}
defer cat.Close()
entry, err := cat.GetByPath(ctx, backupPath)
if err != nil || entry == nil {
return // Entry not in catalog
}
// Update drill status
if err := cat.MarkDrillTested(ctx, entry.ID, result.Success); err != nil {
log.Debug("Failed to update catalog drill status", "error", err)
}
}

View File

@@ -2,10 +2,15 @@ package cmd
import (
"context"
"database/sql"
"fmt"
"os"
"path/filepath"
"time"
"github.com/spf13/cobra"
"dbbackup/internal/pitr"
"dbbackup/internal/wal"
)
@@ -32,6 +37,14 @@ var (
pitrTargetImmediate bool
pitrRecoveryAction string
pitrWALSource string
// MySQL PITR flags
mysqlBinlogDir string
mysqlArchiveDir string
mysqlArchiveInterval string
mysqlRequireRowFormat bool
mysqlRequireGTID bool
mysqlWatchMode bool
)
// pitrCmd represents the pitr command group
@@ -183,21 +196,180 @@ Example:
RunE: runWALTimeline,
}
// ============================================================================
// MySQL/MariaDB Binlog Commands
// ============================================================================
// binlogCmd represents the binlog command group (MySQL equivalent of WAL)
var binlogCmd = &cobra.Command{
Use: "binlog",
Short: "Binary log operations for MySQL/MariaDB",
Long: `Manage MySQL/MariaDB binary log files for Point-in-Time Recovery.
Binary logs contain all changes made to the database and are essential
for Point-in-Time Recovery (PITR) with MySQL and MariaDB.
Commands:
list - List available binlog files
archive - Archive binlog files
watch - Watch for new binlog files and archive them
validate - Validate binlog chain integrity
position - Show current binlog position
`,
}
// binlogListCmd lists binary log files
var binlogListCmd = &cobra.Command{
Use: "list",
Short: "List binary log files",
Long: `List all available binary log files from the MySQL data directory
and/or the archive directory.
Shows: filename, size, timestamps, server_id, and format for each binlog.
Examples:
dbbackup binlog list --binlog-dir /var/lib/mysql
dbbackup binlog list --archive-dir /backups/binlog_archive
`,
RunE: runBinlogList,
}
// binlogArchiveCmd archives binary log files
var binlogArchiveCmd = &cobra.Command{
Use: "archive",
Short: "Archive binary log files",
Long: `Archive MySQL binary log files to a backup location.
This command copies completed binlog files (not the currently active one)
to the archive directory, optionally with compression and encryption.
Examples:
dbbackup binlog archive --binlog-dir /var/lib/mysql --archive-dir /backups/binlog
dbbackup binlog archive --compress --archive-dir /backups/binlog
`,
RunE: runBinlogArchive,
}
// binlogWatchCmd watches for new binlogs and archives them
var binlogWatchCmd = &cobra.Command{
Use: "watch",
Short: "Watch for new binlog files and archive them automatically",
Long: `Continuously monitor the binlog directory for new files and
archive them automatically when they are closed.
This runs as a background process and provides continuous binlog archiving
for PITR capability.
Example:
dbbackup binlog watch --binlog-dir /var/lib/mysql --archive-dir /backups/binlog --interval 30s
`,
RunE: runBinlogWatch,
}
// binlogValidateCmd validates binlog chain
var binlogValidateCmd = &cobra.Command{
Use: "validate",
Short: "Validate binlog chain integrity",
Long: `Check the binary log chain for gaps or inconsistencies.
Validates:
- Sequential numbering of binlog files
- No missing files in the chain
- Server ID consistency
- GTID continuity (if enabled)
Example:
dbbackup binlog validate --binlog-dir /var/lib/mysql
dbbackup binlog validate --archive-dir /backups/binlog
`,
RunE: runBinlogValidate,
}
// binlogPositionCmd shows current binlog position
var binlogPositionCmd = &cobra.Command{
Use: "position",
Short: "Show current binary log position",
Long: `Display the current MySQL binary log position.
This connects to MySQL and runs SHOW MASTER STATUS to get:
- Current binlog filename
- Current byte position
- Executed GTID set (if GTID mode is enabled)
Example:
dbbackup binlog position
`,
RunE: runBinlogPosition,
}
// mysqlPitrStatusCmd shows MySQL-specific PITR status
var mysqlPitrStatusCmd = &cobra.Command{
Use: "mysql-status",
Short: "Show MySQL/MariaDB PITR status",
Long: `Display MySQL/MariaDB-specific PITR configuration and status.
Shows:
- Binary log configuration (log_bin, binlog_format)
- GTID mode status
- Archive directory and statistics
- Current binlog position
- Recovery windows available
Example:
dbbackup pitr mysql-status
`,
RunE: runMySQLPITRStatus,
}
// mysqlPitrEnableCmd enables MySQL PITR
var mysqlPitrEnableCmd = &cobra.Command{
Use: "mysql-enable",
Short: "Enable PITR for MySQL/MariaDB",
Long: `Configure MySQL/MariaDB for Point-in-Time Recovery.
This validates MySQL settings and sets up binlog archiving:
- Checks binary logging is enabled (log_bin=ON)
- Validates binlog_format (ROW recommended)
- Creates archive directory
- Saves PITR configuration
Prerequisites in my.cnf:
[mysqld]
log_bin = mysql-bin
binlog_format = ROW
server_id = 1
Example:
dbbackup pitr mysql-enable --archive-dir /backups/binlog_archive
`,
RunE: runMySQLPITREnable,
}
func init() {
rootCmd.AddCommand(pitrCmd)
rootCmd.AddCommand(walCmd)
rootCmd.AddCommand(binlogCmd)
// PITR subcommands
pitrCmd.AddCommand(pitrEnableCmd)
pitrCmd.AddCommand(pitrDisableCmd)
pitrCmd.AddCommand(pitrStatusCmd)
pitrCmd.AddCommand(mysqlPitrStatusCmd)
pitrCmd.AddCommand(mysqlPitrEnableCmd)
// WAL subcommands
// WAL subcommands (PostgreSQL)
walCmd.AddCommand(walArchiveCmd)
walCmd.AddCommand(walListCmd)
walCmd.AddCommand(walCleanupCmd)
walCmd.AddCommand(walTimelineCmd)
// Binlog subcommands (MySQL/MariaDB)
binlogCmd.AddCommand(binlogListCmd)
binlogCmd.AddCommand(binlogArchiveCmd)
binlogCmd.AddCommand(binlogWatchCmd)
binlogCmd.AddCommand(binlogValidateCmd)
binlogCmd.AddCommand(binlogPositionCmd)
// PITR enable flags
pitrEnableCmd.Flags().StringVar(&pitrArchiveDir, "archive-dir", "/var/backups/wal_archive", "Directory to store WAL archives")
pitrEnableCmd.Flags().BoolVar(&pitrForce, "force", false, "Overwrite existing PITR configuration")
@@ -219,6 +391,33 @@ func init() {
// WAL timeline flags
walTimelineCmd.Flags().StringVar(&walArchiveDir, "archive-dir", "/var/backups/wal_archive", "WAL archive directory")
// MySQL binlog flags
binlogListCmd.Flags().StringVar(&mysqlBinlogDir, "binlog-dir", "/var/lib/mysql", "MySQL binary log directory")
binlogListCmd.Flags().StringVar(&mysqlArchiveDir, "archive-dir", "", "Binlog archive directory")
binlogArchiveCmd.Flags().StringVar(&mysqlBinlogDir, "binlog-dir", "/var/lib/mysql", "MySQL binary log directory")
binlogArchiveCmd.Flags().StringVar(&mysqlArchiveDir, "archive-dir", "/var/backups/binlog_archive", "Binlog archive directory")
binlogArchiveCmd.Flags().BoolVar(&walCompress, "compress", false, "Compress binlog files")
binlogArchiveCmd.Flags().BoolVar(&walEncrypt, "encrypt", false, "Encrypt binlog files")
binlogArchiveCmd.Flags().StringVar(&walEncryptionKeyFile, "encryption-key-file", "", "Path to encryption key file")
binlogArchiveCmd.MarkFlagRequired("archive-dir")
binlogWatchCmd.Flags().StringVar(&mysqlBinlogDir, "binlog-dir", "/var/lib/mysql", "MySQL binary log directory")
binlogWatchCmd.Flags().StringVar(&mysqlArchiveDir, "archive-dir", "/var/backups/binlog_archive", "Binlog archive directory")
binlogWatchCmd.Flags().StringVar(&mysqlArchiveInterval, "interval", "30s", "Check interval for new binlogs")
binlogWatchCmd.Flags().BoolVar(&walCompress, "compress", false, "Compress binlog files")
binlogWatchCmd.MarkFlagRequired("archive-dir")
binlogValidateCmd.Flags().StringVar(&mysqlBinlogDir, "binlog-dir", "/var/lib/mysql", "MySQL binary log directory")
binlogValidateCmd.Flags().StringVar(&mysqlArchiveDir, "archive-dir", "", "Binlog archive directory")
// MySQL PITR enable flags
mysqlPitrEnableCmd.Flags().StringVar(&mysqlArchiveDir, "archive-dir", "/var/backups/binlog_archive", "Binlog archive directory")
mysqlPitrEnableCmd.Flags().IntVar(&walRetentionDays, "retention-days", 7, "Days to keep archived binlogs")
mysqlPitrEnableCmd.Flags().BoolVar(&mysqlRequireRowFormat, "require-row-format", true, "Require ROW binlog format")
mysqlPitrEnableCmd.Flags().BoolVar(&mysqlRequireGTID, "require-gtid", false, "Require GTID mode enabled")
mysqlPitrEnableCmd.MarkFlagRequired("archive-dir")
}
// Command implementations
@@ -512,3 +711,614 @@ func formatWALSize(bytes int64) string {
}
return fmt.Sprintf("%.1f KB", float64(bytes)/float64(KB))
}
// ============================================================================
// MySQL/MariaDB Binlog Command Implementations
// ============================================================================
func runBinlogList(cmd *cobra.Command, args []string) error {
ctx := context.Background()
if !cfg.IsMySQL() {
return fmt.Errorf("binlog commands are only supported for MySQL/MariaDB (detected: %s)", cfg.DisplayDatabaseType())
}
binlogDir := mysqlBinlogDir
if binlogDir == "" && mysqlArchiveDir != "" {
binlogDir = mysqlArchiveDir
}
if binlogDir == "" {
return fmt.Errorf("please specify --binlog-dir or --archive-dir")
}
bmConfig := pitr.BinlogManagerConfig{
BinlogDir: binlogDir,
ArchiveDir: mysqlArchiveDir,
}
bm, err := pitr.NewBinlogManager(bmConfig)
if err != nil {
return fmt.Errorf("initializing binlog manager: %w", err)
}
// List binlogs from source directory
binlogs, err := bm.DiscoverBinlogs(ctx)
if err != nil {
return fmt.Errorf("discovering binlogs: %w", err)
}
// Also list archived binlogs if archive dir is specified
var archived []pitr.BinlogArchiveInfo
if mysqlArchiveDir != "" {
archived, _ = bm.ListArchivedBinlogs(ctx)
}
if len(binlogs) == 0 && len(archived) == 0 {
fmt.Println("No binary log files found")
return nil
}
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Printf(" Binary Log Files (%s)\n", bm.ServerType())
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Println()
if len(binlogs) > 0 {
fmt.Println("Source Directory:")
fmt.Printf("%-24s %10s %-19s %-19s %s\n", "Filename", "Size", "Start Time", "End Time", "Format")
fmt.Println("────────────────────────────────────────────────────────────────────────────────")
var totalSize int64
for _, b := range binlogs {
size := formatWALSize(b.Size)
totalSize += b.Size
startTime := "unknown"
endTime := "unknown"
if !b.StartTime.IsZero() {
startTime = b.StartTime.Format("2006-01-02 15:04:05")
}
if !b.EndTime.IsZero() {
endTime = b.EndTime.Format("2006-01-02 15:04:05")
}
format := b.Format
if format == "" {
format = "-"
}
fmt.Printf("%-24s %10s %-19s %-19s %s\n", b.Name, size, startTime, endTime, format)
}
fmt.Printf("\nTotal: %d files, %s\n", len(binlogs), formatWALSize(totalSize))
}
if len(archived) > 0 {
fmt.Println()
fmt.Println("Archived Binlogs:")
fmt.Printf("%-24s %10s %-19s %s\n", "Original", "Size", "Archived At", "Flags")
fmt.Println("────────────────────────────────────────────────────────────────────────────────")
var totalSize int64
for _, a := range archived {
size := formatWALSize(a.Size)
totalSize += a.Size
archivedTime := a.ArchivedAt.Format("2006-01-02 15:04:05")
flags := ""
if a.Compressed {
flags += "C"
}
if a.Encrypted {
flags += "E"
}
if flags != "" {
flags = "[" + flags + "]"
}
fmt.Printf("%-24s %10s %-19s %s\n", a.OriginalFile, size, archivedTime, flags)
}
fmt.Printf("\nTotal archived: %d files, %s\n", len(archived), formatWALSize(totalSize))
}
return nil
}
func runBinlogArchive(cmd *cobra.Command, args []string) error {
ctx := context.Background()
if !cfg.IsMySQL() {
return fmt.Errorf("binlog commands are only supported for MySQL/MariaDB")
}
if mysqlBinlogDir == "" {
return fmt.Errorf("--binlog-dir is required")
}
// Load encryption key if needed
var encryptionKey []byte
if walEncrypt {
key, err := loadEncryptionKey(walEncryptionKeyFile, walEncryptionKeyEnv)
if err != nil {
return fmt.Errorf("failed to load encryption key: %w", err)
}
encryptionKey = key
}
bmConfig := pitr.BinlogManagerConfig{
BinlogDir: mysqlBinlogDir,
ArchiveDir: mysqlArchiveDir,
Compression: walCompress,
Encryption: walEncrypt,
EncryptionKey: encryptionKey,
}
bm, err := pitr.NewBinlogManager(bmConfig)
if err != nil {
return fmt.Errorf("initializing binlog manager: %w", err)
}
// Discover binlogs
binlogs, err := bm.DiscoverBinlogs(ctx)
if err != nil {
return fmt.Errorf("discovering binlogs: %w", err)
}
// Get already archived
archived, _ := bm.ListArchivedBinlogs(ctx)
archivedSet := make(map[string]struct{})
for _, a := range archived {
archivedSet[a.OriginalFile] = struct{}{}
}
// Need to connect to MySQL to get current position
// For now, skip the active binlog by looking at which one was modified most recently
var latestModTime int64
var latestBinlog string
for _, b := range binlogs {
if b.ModTime.Unix() > latestModTime {
latestModTime = b.ModTime.Unix()
latestBinlog = b.Name
}
}
var newArchives []pitr.BinlogArchiveInfo
for i := range binlogs {
b := &binlogs[i]
// Skip if already archived
if _, exists := archivedSet[b.Name]; exists {
log.Info("Skipping already archived", "binlog", b.Name)
continue
}
// Skip the most recently modified (likely active)
if b.Name == latestBinlog {
log.Info("Skipping active binlog", "binlog", b.Name)
continue
}
log.Info("Archiving binlog", "binlog", b.Name, "size", formatWALSize(b.Size))
archiveInfo, err := bm.ArchiveBinlog(ctx, b)
if err != nil {
log.Error("Failed to archive binlog", "binlog", b.Name, "error", err)
continue
}
newArchives = append(newArchives, *archiveInfo)
}
// Update metadata
if len(newArchives) > 0 {
allArchived, _ := bm.ListArchivedBinlogs(ctx)
bm.SaveArchiveMetadata(allArchived)
}
log.Info("✅ Binlog archiving completed", "archived", len(newArchives))
return nil
}
func runBinlogWatch(cmd *cobra.Command, args []string) error {
ctx := context.Background()
if !cfg.IsMySQL() {
return fmt.Errorf("binlog commands are only supported for MySQL/MariaDB")
}
interval, err := time.ParseDuration(mysqlArchiveInterval)
if err != nil {
return fmt.Errorf("invalid interval: %w", err)
}
bmConfig := pitr.BinlogManagerConfig{
BinlogDir: mysqlBinlogDir,
ArchiveDir: mysqlArchiveDir,
Compression: walCompress,
}
bm, err := pitr.NewBinlogManager(bmConfig)
if err != nil {
return fmt.Errorf("initializing binlog manager: %w", err)
}
log.Info("Starting binlog watcher",
"binlog_dir", mysqlBinlogDir,
"archive_dir", mysqlArchiveDir,
"interval", interval)
// Watch for new binlogs
err = bm.WatchBinlogs(ctx, interval, func(b *pitr.BinlogFile) {
log.Info("New binlog detected, archiving", "binlog", b.Name)
archiveInfo, err := bm.ArchiveBinlog(ctx, b)
if err != nil {
log.Error("Failed to archive binlog", "binlog", b.Name, "error", err)
return
}
log.Info("Binlog archived successfully",
"binlog", b.Name,
"archive", archiveInfo.ArchivePath,
"size", formatWALSize(archiveInfo.Size))
// Update metadata
allArchived, _ := bm.ListArchivedBinlogs(ctx)
bm.SaveArchiveMetadata(allArchived)
})
if err != nil && err != context.Canceled {
return err
}
return nil
}
func runBinlogValidate(cmd *cobra.Command, args []string) error {
ctx := context.Background()
if !cfg.IsMySQL() {
return fmt.Errorf("binlog commands are only supported for MySQL/MariaDB")
}
binlogDir := mysqlBinlogDir
if binlogDir == "" {
binlogDir = mysqlArchiveDir
}
if binlogDir == "" {
return fmt.Errorf("please specify --binlog-dir or --archive-dir")
}
bmConfig := pitr.BinlogManagerConfig{
BinlogDir: binlogDir,
ArchiveDir: mysqlArchiveDir,
}
bm, err := pitr.NewBinlogManager(bmConfig)
if err != nil {
return fmt.Errorf("initializing binlog manager: %w", err)
}
// Discover binlogs
binlogs, err := bm.DiscoverBinlogs(ctx)
if err != nil {
return fmt.Errorf("discovering binlogs: %w", err)
}
if len(binlogs) == 0 {
fmt.Println("No binlog files found to validate")
return nil
}
// Validate chain
validation, err := bm.ValidateBinlogChain(ctx, binlogs)
if err != nil {
return fmt.Errorf("validating binlog chain: %w", err)
}
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Println(" Binlog Chain Validation")
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Println()
if validation.Valid {
fmt.Println("Status: ✅ VALID - Binlog chain is complete")
} else {
fmt.Println("Status: ❌ INVALID - Binlog chain has gaps")
}
fmt.Printf("Files: %d binlog files\n", validation.LogCount)
fmt.Printf("Total Size: %s\n", formatWALSize(validation.TotalSize))
if validation.StartPos != nil {
fmt.Printf("Start: %s\n", validation.StartPos.String())
}
if validation.EndPos != nil {
fmt.Printf("End: %s\n", validation.EndPos.String())
}
if len(validation.Gaps) > 0 {
fmt.Println()
fmt.Println("Gaps Found:")
for _, gap := range validation.Gaps {
fmt.Printf(" • After %s, before %s: %s\n", gap.After, gap.Before, gap.Reason)
}
}
if len(validation.Warnings) > 0 {
fmt.Println()
fmt.Println("Warnings:")
for _, w := range validation.Warnings {
fmt.Printf(" ⚠ %s\n", w)
}
}
if len(validation.Errors) > 0 {
fmt.Println()
fmt.Println("Errors:")
for _, e := range validation.Errors {
fmt.Printf(" ✗ %s\n", e)
}
}
if !validation.Valid {
os.Exit(1)
}
return nil
}
func runBinlogPosition(cmd *cobra.Command, args []string) error {
ctx := context.Background()
if !cfg.IsMySQL() {
return fmt.Errorf("binlog commands are only supported for MySQL/MariaDB")
}
// Connect to MySQL
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/",
cfg.User, cfg.Password, cfg.Host, cfg.Port)
db, err := sql.Open("mysql", dsn)
if err != nil {
return fmt.Errorf("connecting to MySQL: %w", err)
}
defer db.Close()
if err := db.PingContext(ctx); err != nil {
return fmt.Errorf("pinging MySQL: %w", err)
}
// Get binlog position using raw query
rows, err := db.QueryContext(ctx, "SHOW MASTER STATUS")
if err != nil {
return fmt.Errorf("getting master status: %w", err)
}
defer rows.Close()
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Println(" Current Binary Log Position")
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Println()
if rows.Next() {
var file string
var position uint64
var binlogDoDB, binlogIgnoreDB, executedGtidSet sql.NullString
cols, _ := rows.Columns()
switch len(cols) {
case 5:
err = rows.Scan(&file, &position, &binlogDoDB, &binlogIgnoreDB, &executedGtidSet)
case 4:
err = rows.Scan(&file, &position, &binlogDoDB, &binlogIgnoreDB)
default:
err = rows.Scan(&file, &position)
}
if err != nil {
return fmt.Errorf("scanning master status: %w", err)
}
fmt.Printf("File: %s\n", file)
fmt.Printf("Position: %d\n", position)
if executedGtidSet.Valid && executedGtidSet.String != "" {
fmt.Printf("GTID Set: %s\n", executedGtidSet.String)
}
// Compact format for use in restore commands
fmt.Println()
fmt.Printf("Position String: %s:%d\n", file, position)
} else {
fmt.Println("Binary logging appears to be disabled.")
fmt.Println("Enable binary logging by adding to my.cnf:")
fmt.Println(" [mysqld]")
fmt.Println(" log_bin = mysql-bin")
fmt.Println(" server_id = 1")
}
return nil
}
func runMySQLPITRStatus(cmd *cobra.Command, args []string) error {
ctx := context.Background()
if !cfg.IsMySQL() {
return fmt.Errorf("this command is only for MySQL/MariaDB (use 'pitr status' for PostgreSQL)")
}
// Connect to MySQL
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/",
cfg.User, cfg.Password, cfg.Host, cfg.Port)
db, err := sql.Open("mysql", dsn)
if err != nil {
return fmt.Errorf("connecting to MySQL: %w", err)
}
defer db.Close()
if err := db.PingContext(ctx); err != nil {
return fmt.Errorf("pinging MySQL: %w", err)
}
pitrConfig := pitr.MySQLPITRConfig{
Host: cfg.Host,
Port: cfg.Port,
User: cfg.User,
Password: cfg.Password,
BinlogDir: mysqlBinlogDir,
ArchiveDir: mysqlArchiveDir,
}
mysqlPitr, err := pitr.NewMySQLPITR(db, pitrConfig)
if err != nil {
return fmt.Errorf("initializing MySQL PITR: %w", err)
}
status, err := mysqlPitr.Status(ctx)
if err != nil {
return fmt.Errorf("getting PITR status: %w", err)
}
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Printf(" MySQL/MariaDB PITR Status (%s)\n", status.DatabaseType)
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Println()
if status.Enabled {
fmt.Println("PITR Status: ✅ ENABLED")
} else {
fmt.Println("PITR Status: ❌ NOT CONFIGURED")
}
// Get binary logging status
var logBin string
db.QueryRowContext(ctx, "SELECT @@log_bin").Scan(&logBin)
if logBin == "1" || logBin == "ON" {
fmt.Println("Binary Logging: ✅ ENABLED")
} else {
fmt.Println("Binary Logging: ❌ DISABLED")
}
fmt.Printf("Binlog Format: %s\n", status.LogLevel)
// Check GTID mode
var gtidMode string
if status.DatabaseType == pitr.DatabaseMariaDB {
db.QueryRowContext(ctx, "SELECT @@gtid_current_pos").Scan(&gtidMode)
if gtidMode != "" {
fmt.Println("GTID Mode: ✅ ENABLED")
} else {
fmt.Println("GTID Mode: ❌ DISABLED")
}
} else {
db.QueryRowContext(ctx, "SELECT @@gtid_mode").Scan(&gtidMode)
if gtidMode == "ON" {
fmt.Println("GTID Mode: ✅ ENABLED")
} else {
fmt.Printf("GTID Mode: %s\n", gtidMode)
}
}
if status.Position != nil {
fmt.Printf("Current Position: %s\n", status.Position.String())
}
if status.ArchiveDir != "" {
fmt.Println()
fmt.Println("Archive Statistics:")
fmt.Printf(" Directory: %s\n", status.ArchiveDir)
fmt.Printf(" File Count: %d\n", status.ArchiveCount)
fmt.Printf(" Total Size: %s\n", formatWALSize(status.ArchiveSize))
if !status.LastArchived.IsZero() {
fmt.Printf(" Last Archive: %s\n", status.LastArchived.Format("2006-01-02 15:04:05"))
}
}
// Show requirements
fmt.Println()
fmt.Println("PITR Requirements:")
if logBin == "1" || logBin == "ON" {
fmt.Println(" ✅ Binary logging enabled")
} else {
fmt.Println(" ❌ Binary logging must be enabled (log_bin = mysql-bin)")
}
if status.LogLevel == "ROW" {
fmt.Println(" ✅ Row-based logging (recommended)")
} else {
fmt.Printf(" ⚠ binlog_format = %s (ROW recommended for PITR)\n", status.LogLevel)
}
return nil
}
func runMySQLPITREnable(cmd *cobra.Command, args []string) error {
ctx := context.Background()
if !cfg.IsMySQL() {
return fmt.Errorf("this command is only for MySQL/MariaDB (use 'pitr enable' for PostgreSQL)")
}
// Connect to MySQL
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/",
cfg.User, cfg.Password, cfg.Host, cfg.Port)
db, err := sql.Open("mysql", dsn)
if err != nil {
return fmt.Errorf("connecting to MySQL: %w", err)
}
defer db.Close()
if err := db.PingContext(ctx); err != nil {
return fmt.Errorf("pinging MySQL: %w", err)
}
pitrConfig := pitr.MySQLPITRConfig{
Host: cfg.Host,
Port: cfg.Port,
User: cfg.User,
Password: cfg.Password,
BinlogDir: mysqlBinlogDir,
ArchiveDir: mysqlArchiveDir,
RequireRowFormat: mysqlRequireRowFormat,
RequireGTID: mysqlRequireGTID,
}
mysqlPitr, err := pitr.NewMySQLPITR(db, pitrConfig)
if err != nil {
return fmt.Errorf("initializing MySQL PITR: %w", err)
}
enableConfig := pitr.PITREnableConfig{
ArchiveDir: mysqlArchiveDir,
RetentionDays: walRetentionDays,
Compression: walCompress,
}
log.Info("Enabling MySQL PITR", "archive_dir", mysqlArchiveDir)
if err := mysqlPitr.Enable(ctx, enableConfig); err != nil {
return fmt.Errorf("enabling PITR: %w", err)
}
log.Info("✅ MySQL PITR enabled successfully!")
log.Info("")
log.Info("Next steps:")
log.Info("1. Start binlog archiving: dbbackup binlog watch --archive-dir " + mysqlArchiveDir)
log.Info("2. Create a base backup: dbbackup backup single <database>")
log.Info("3. Binlogs will be archived to: " + mysqlArchiveDir)
log.Info("")
log.Info("To restore to a point in time, use:")
log.Info(" dbbackup restore pitr <backup> --target-time '2024-01-15 14:30:00'")
return nil
}
// getMySQLBinlogDir attempts to determine the binlog directory from MySQL
func getMySQLBinlogDir(ctx context.Context, db *sql.DB) (string, error) {
var logBinBasename string
err := db.QueryRowContext(ctx, "SELECT @@log_bin_basename").Scan(&logBinBasename)
if err != nil {
return "", err
}
return filepath.Dir(logBinBasename), nil
}

316
cmd/report.go Normal file
View File

@@ -0,0 +1,316 @@
package cmd
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"dbbackup/internal/catalog"
"dbbackup/internal/report"
"github.com/spf13/cobra"
)
var reportCmd = &cobra.Command{
Use: "report",
Short: "Generate compliance reports",
Long: `Generate compliance reports for various regulatory frameworks.
Supported frameworks:
- soc2 SOC 2 Type II Trust Service Criteria
- gdpr General Data Protection Regulation
- hipaa Health Insurance Portability and Accountability Act
- pci-dss Payment Card Industry Data Security Standard
- iso27001 ISO 27001 Information Security Management
Examples:
# Generate SOC2 report for the last 90 days
dbbackup report generate --type soc2 --days 90
# Generate HIPAA report as HTML
dbbackup report generate --type hipaa --format html --output report.html
# Show report summary for current period
dbbackup report summary --type soc2`,
}
var reportGenerateCmd = &cobra.Command{
Use: "generate",
Short: "Generate a compliance report",
Long: "Generate a compliance report for a specified framework and time period",
RunE: runReportGenerate,
}
var reportSummaryCmd = &cobra.Command{
Use: "summary",
Short: "Show compliance summary",
Long: "Display a quick compliance summary for the specified framework",
RunE: runReportSummary,
}
var reportListCmd = &cobra.Command{
Use: "list",
Short: "List available frameworks",
Long: "Display all available compliance frameworks",
RunE: runReportList,
}
var reportControlsCmd = &cobra.Command{
Use: "controls [framework]",
Short: "List controls for a framework",
Long: "Display all controls for a specific compliance framework",
Args: cobra.ExactArgs(1),
RunE: runReportControls,
}
var (
reportType string
reportDays int
reportStartDate string
reportEndDate string
reportFormat string
reportOutput string
reportCatalog string
reportTitle string
includeEvidence bool
)
func init() {
rootCmd.AddCommand(reportCmd)
reportCmd.AddCommand(reportGenerateCmd)
reportCmd.AddCommand(reportSummaryCmd)
reportCmd.AddCommand(reportListCmd)
reportCmd.AddCommand(reportControlsCmd)
// Generate command flags
reportGenerateCmd.Flags().StringVarP(&reportType, "type", "t", "soc2", "Report type (soc2, gdpr, hipaa, pci-dss, iso27001)")
reportGenerateCmd.Flags().IntVarP(&reportDays, "days", "d", 90, "Number of days to include in report")
reportGenerateCmd.Flags().StringVar(&reportStartDate, "start", "", "Start date (YYYY-MM-DD)")
reportGenerateCmd.Flags().StringVar(&reportEndDate, "end", "", "End date (YYYY-MM-DD)")
reportGenerateCmd.Flags().StringVarP(&reportFormat, "format", "f", "markdown", "Output format (json, markdown, html)")
reportGenerateCmd.Flags().StringVarP(&reportOutput, "output", "o", "", "Output file path")
reportGenerateCmd.Flags().StringVar(&reportCatalog, "catalog", "", "Path to backup catalog database")
reportGenerateCmd.Flags().StringVar(&reportTitle, "title", "", "Custom report title")
reportGenerateCmd.Flags().BoolVar(&includeEvidence, "evidence", true, "Include evidence in report")
// Summary command flags
reportSummaryCmd.Flags().StringVarP(&reportType, "type", "t", "soc2", "Report type")
reportSummaryCmd.Flags().IntVarP(&reportDays, "days", "d", 90, "Number of days to include")
reportSummaryCmd.Flags().StringVar(&reportCatalog, "catalog", "", "Path to backup catalog database")
}
func runReportGenerate(cmd *cobra.Command, args []string) error {
// Determine time period
var startDate, endDate time.Time
endDate = time.Now()
if reportStartDate != "" {
parsed, err := time.Parse("2006-01-02", reportStartDate)
if err != nil {
return fmt.Errorf("invalid start date: %w", err)
}
startDate = parsed
} else {
startDate = endDate.AddDate(0, 0, -reportDays)
}
if reportEndDate != "" {
parsed, err := time.Parse("2006-01-02", reportEndDate)
if err != nil {
return fmt.Errorf("invalid end date: %w", err)
}
endDate = parsed
}
// Determine report type
rptType := parseReportType(reportType)
if rptType == "" {
return fmt.Errorf("unknown report type: %s", reportType)
}
// Get catalog path
catalogPath := reportCatalog
if catalogPath == "" {
homeDir, _ := os.UserHomeDir()
catalogPath = filepath.Join(homeDir, ".dbbackup", "catalog.db")
}
// Open catalog
cat, err := catalog.NewSQLiteCatalog(catalogPath)
if err != nil {
return fmt.Errorf("failed to open catalog: %w", err)
}
defer cat.Close()
// Configure generator
config := report.ReportConfig{
Type: rptType,
PeriodStart: startDate,
PeriodEnd: endDate,
CatalogPath: catalogPath,
OutputFormat: parseOutputFormat(reportFormat),
OutputPath: reportOutput,
IncludeEvidence: includeEvidence,
}
if reportTitle != "" {
config.Title = reportTitle
}
// Generate report
gen := report.NewGenerator(cat, config)
rpt, err := gen.Generate()
if err != nil {
return fmt.Errorf("failed to generate report: %w", err)
}
// Get formatter
formatter := report.GetFormatter(config.OutputFormat)
// Write output
var output *os.File
if reportOutput != "" {
output, err = os.Create(reportOutput)
if err != nil {
return fmt.Errorf("failed to create output file: %w", err)
}
defer output.Close()
} else {
output = os.Stdout
}
if err := formatter.Format(rpt, output); err != nil {
return fmt.Errorf("failed to format report: %w", err)
}
if reportOutput != "" {
fmt.Printf("Report generated: %s\n", reportOutput)
fmt.Printf(" Type: %s\n", rpt.Type)
fmt.Printf(" Status: %s %s\n", report.StatusIcon(rpt.Status), rpt.Status)
fmt.Printf(" Score: %.1f%%\n", rpt.Score)
fmt.Printf(" Findings: %d open\n", rpt.Summary.OpenFindings)
}
return nil
}
func runReportSummary(cmd *cobra.Command, args []string) error {
endDate := time.Now()
startDate := endDate.AddDate(0, 0, -reportDays)
rptType := parseReportType(reportType)
if rptType == "" {
return fmt.Errorf("unknown report type: %s", reportType)
}
// Get catalog path
catalogPath := reportCatalog
if catalogPath == "" {
homeDir, _ := os.UserHomeDir()
catalogPath = filepath.Join(homeDir, ".dbbackup", "catalog.db")
}
// Open catalog
cat, err := catalog.NewSQLiteCatalog(catalogPath)
if err != nil {
return fmt.Errorf("failed to open catalog: %w", err)
}
defer cat.Close()
// Configure and generate
config := report.ReportConfig{
Type: rptType,
PeriodStart: startDate,
PeriodEnd: endDate,
CatalogPath: catalogPath,
}
gen := report.NewGenerator(cat, config)
rpt, err := gen.Generate()
if err != nil {
return fmt.Errorf("failed to generate report: %w", err)
}
// Display console summary
formatter := &report.ConsoleFormatter{}
return formatter.Format(rpt, os.Stdout)
}
func runReportList(cmd *cobra.Command, args []string) error {
fmt.Println("\nAvailable Compliance Frameworks:")
fmt.Println(strings.Repeat("-", 50))
fmt.Printf(" %-12s %s\n", "soc2", "SOC 2 Type II Trust Service Criteria")
fmt.Printf(" %-12s %s\n", "gdpr", "General Data Protection Regulation (EU)")
fmt.Printf(" %-12s %s\n", "hipaa", "Health Insurance Portability and Accountability Act")
fmt.Printf(" %-12s %s\n", "pci-dss", "Payment Card Industry Data Security Standard")
fmt.Printf(" %-12s %s\n", "iso27001", "ISO 27001 Information Security Management")
fmt.Println()
fmt.Println("Usage: dbbackup report generate --type <framework>")
fmt.Println()
return nil
}
func runReportControls(cmd *cobra.Command, args []string) error {
rptType := parseReportType(args[0])
if rptType == "" {
return fmt.Errorf("unknown report type: %s", args[0])
}
framework := report.GetFramework(rptType)
if framework == nil {
return fmt.Errorf("no framework defined for: %s", args[0])
}
fmt.Printf("\n%s Controls\n", strings.ToUpper(args[0]))
fmt.Println(strings.Repeat("=", 60))
for _, cat := range framework {
fmt.Printf("\n%s\n", cat.Name)
fmt.Printf("%s\n", cat.Description)
fmt.Println(strings.Repeat("-", 40))
for _, ctrl := range cat.Controls {
fmt.Printf(" [%s] %s\n", ctrl.Reference, ctrl.Name)
fmt.Printf(" %s\n", ctrl.Description)
}
}
fmt.Println()
return nil
}
func parseReportType(s string) report.ReportType {
switch strings.ToLower(s) {
case "soc2", "soc-2", "soc2-type2":
return report.ReportSOC2
case "gdpr":
return report.ReportGDPR
case "hipaa":
return report.ReportHIPAA
case "pci-dss", "pcidss", "pci":
return report.ReportPCIDSS
case "iso27001", "iso-27001", "iso":
return report.ReportISO27001
case "custom":
return report.ReportCustom
default:
return ""
}
}
func parseOutputFormat(s string) report.OutputFormat {
switch strings.ToLower(s) {
case "json":
return report.FormatJSON
case "html":
return report.FormatHTML
case "md", "markdown":
return report.FormatMarkdown
case "pdf":
return report.FormatPDF
default:
return report.FormatMarkdown
}
}

458
cmd/rto.go Normal file
View File

@@ -0,0 +1,458 @@
package cmd
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"dbbackup/internal/catalog"
"dbbackup/internal/rto"
"github.com/spf13/cobra"
)
var rtoCmd = &cobra.Command{
Use: "rto",
Short: "RTO/RPO analysis and monitoring",
Long: `Analyze and monitor Recovery Time Objective (RTO) and
Recovery Point Objective (RPO) metrics.
RTO: How long to recover from a failure
RPO: How much data you can afford to lose
Examples:
# Analyze RTO/RPO for all databases
dbbackup rto analyze
# Analyze specific database
dbbackup rto analyze --database mydb
# Show summary status
dbbackup rto status
# Set targets and check compliance
dbbackup rto check --target-rto 4h --target-rpo 1h`,
}
var rtoAnalyzeCmd = &cobra.Command{
Use: "analyze",
Short: "Analyze RTO/RPO for databases",
Long: "Perform detailed RTO/RPO analysis based on backup history",
RunE: runRTOAnalyze,
}
var rtoStatusCmd = &cobra.Command{
Use: "status",
Short: "Show RTO/RPO status summary",
Long: "Display current RTO/RPO compliance status for all databases",
RunE: runRTOStatus,
}
var rtoCheckCmd = &cobra.Command{
Use: "check",
Short: "Check RTO/RPO compliance",
Long: "Check if databases meet RTO/RPO targets",
RunE: runRTOCheck,
}
var (
rtoDatabase string
rtoTargetRTO string
rtoTargetRPO string
rtoCatalog string
rtoFormat string
rtoOutput string
)
func init() {
rootCmd.AddCommand(rtoCmd)
rtoCmd.AddCommand(rtoAnalyzeCmd)
rtoCmd.AddCommand(rtoStatusCmd)
rtoCmd.AddCommand(rtoCheckCmd)
// Analyze command flags
rtoAnalyzeCmd.Flags().StringVarP(&rtoDatabase, "database", "d", "", "Database to analyze (all if not specified)")
rtoAnalyzeCmd.Flags().StringVar(&rtoTargetRTO, "target-rto", "4h", "Target RTO (e.g., 4h, 30m)")
rtoAnalyzeCmd.Flags().StringVar(&rtoTargetRPO, "target-rpo", "1h", "Target RPO (e.g., 1h, 15m)")
rtoAnalyzeCmd.Flags().StringVar(&rtoCatalog, "catalog", "", "Path to backup catalog")
rtoAnalyzeCmd.Flags().StringVarP(&rtoFormat, "format", "f", "text", "Output format (text, json)")
rtoAnalyzeCmd.Flags().StringVarP(&rtoOutput, "output", "o", "", "Output file")
// Status command flags
rtoStatusCmd.Flags().StringVar(&rtoCatalog, "catalog", "", "Path to backup catalog")
rtoStatusCmd.Flags().StringVar(&rtoTargetRTO, "target-rto", "4h", "Target RTO")
rtoStatusCmd.Flags().StringVar(&rtoTargetRPO, "target-rpo", "1h", "Target RPO")
// Check command flags
rtoCheckCmd.Flags().StringVarP(&rtoDatabase, "database", "d", "", "Database to check")
rtoCheckCmd.Flags().StringVar(&rtoTargetRTO, "target-rto", "4h", "Target RTO")
rtoCheckCmd.Flags().StringVar(&rtoTargetRPO, "target-rpo", "1h", "Target RPO")
rtoCheckCmd.Flags().StringVar(&rtoCatalog, "catalog", "", "Path to backup catalog")
}
func runRTOAnalyze(cmd *cobra.Command, args []string) error {
ctx := context.Background()
// Parse duration targets
targetRTO, err := time.ParseDuration(rtoTargetRTO)
if err != nil {
return fmt.Errorf("invalid target-rto: %w", err)
}
targetRPO, err := time.ParseDuration(rtoTargetRPO)
if err != nil {
return fmt.Errorf("invalid target-rpo: %w", err)
}
// Get catalog
cat, err := openRTOCatalog()
if err != nil {
return err
}
defer cat.Close()
// Create calculator
config := rto.DefaultConfig()
config.TargetRTO = targetRTO
config.TargetRPO = targetRPO
calc := rto.NewCalculator(cat, config)
var analyses []*rto.Analysis
if rtoDatabase != "" {
// Analyze single database
analysis, err := calc.Analyze(ctx, rtoDatabase)
if err != nil {
return fmt.Errorf("analysis failed: %w", err)
}
analyses = append(analyses, analysis)
} else {
// Analyze all databases
analyses, err = calc.AnalyzeAll(ctx)
if err != nil {
return fmt.Errorf("analysis failed: %w", err)
}
}
// Output
if rtoFormat == "json" {
return outputJSON(analyses, rtoOutput)
}
return outputAnalysisText(analyses)
}
func runRTOStatus(cmd *cobra.Command, args []string) error {
ctx := context.Background()
// Parse targets
targetRTO, err := time.ParseDuration(rtoTargetRTO)
if err != nil {
return fmt.Errorf("invalid target-rto: %w", err)
}
targetRPO, err := time.ParseDuration(rtoTargetRPO)
if err != nil {
return fmt.Errorf("invalid target-rpo: %w", err)
}
// Get catalog
cat, err := openRTOCatalog()
if err != nil {
return err
}
defer cat.Close()
// Create calculator and analyze all
config := rto.DefaultConfig()
config.TargetRTO = targetRTO
config.TargetRPO = targetRPO
calc := rto.NewCalculator(cat, config)
analyses, err := calc.AnalyzeAll(ctx)
if err != nil {
return fmt.Errorf("analysis failed: %w", err)
}
// Create summary
summary := rto.Summarize(analyses)
// Display status
fmt.Println()
fmt.Println("╔═══════════════════════════════════════════════════════════╗")
fmt.Println("║ RTO/RPO STATUS SUMMARY ║")
fmt.Println("╠═══════════════════════════════════════════════════════════╣")
fmt.Printf("║ Target RTO: %-15s Target RPO: %-15s ║\n",
formatDuration(config.TargetRTO),
formatDuration(config.TargetRPO))
fmt.Println("╠═══════════════════════════════════════════════════════════╣")
// Compliance status
rpoRate := 0.0
rtoRate := 0.0
fullRate := 0.0
if summary.TotalDatabases > 0 {
rpoRate = float64(summary.RPOCompliant) / float64(summary.TotalDatabases) * 100
rtoRate = float64(summary.RTOCompliant) / float64(summary.TotalDatabases) * 100
fullRate = float64(summary.FullyCompliant) / float64(summary.TotalDatabases) * 100
}
fmt.Printf("║ Databases: %-5d ║\n", summary.TotalDatabases)
fmt.Printf("║ RPO Compliant: %-5d (%.0f%%) ║\n", summary.RPOCompliant, rpoRate)
fmt.Printf("║ RTO Compliant: %-5d (%.0f%%) ║\n", summary.RTOCompliant, rtoRate)
fmt.Printf("║ Fully Compliant: %-3d (%.0f%%) ║\n", summary.FullyCompliant, fullRate)
if summary.CriticalIssues > 0 {
fmt.Printf("║ ⚠️ Critical Issues: %-3d ║\n", summary.CriticalIssues)
}
fmt.Println("╠═══════════════════════════════════════════════════════════╣")
fmt.Printf("║ Average RPO: %-15s Worst: %-15s ║\n",
formatDuration(summary.AverageRPO),
formatDuration(summary.WorstRPO))
fmt.Printf("║ Average RTO: %-15s Worst: %-15s ║\n",
formatDuration(summary.AverageRTO),
formatDuration(summary.WorstRTO))
if summary.WorstRPODatabase != "" {
fmt.Printf("║ Worst RPO Database: %-38s║\n", summary.WorstRPODatabase)
}
if summary.WorstRTODatabase != "" {
fmt.Printf("║ Worst RTO Database: %-38s║\n", summary.WorstRTODatabase)
}
fmt.Println("╚═══════════════════════════════════════════════════════════╝")
fmt.Println()
// Per-database status
if len(analyses) > 0 {
fmt.Println("Database Status:")
fmt.Println(strings.Repeat("-", 70))
fmt.Printf("%-25s %-12s %-12s %-12s\n", "DATABASE", "RPO", "RTO", "STATUS")
fmt.Println(strings.Repeat("-", 70))
for _, a := range analyses {
status := "✅"
if !a.RPOCompliant || !a.RTOCompliant {
status = "❌"
}
rpoStr := formatDuration(a.CurrentRPO)
rtoStr := formatDuration(a.CurrentRTO)
if !a.RPOCompliant {
rpoStr = "⚠️ " + rpoStr
}
if !a.RTOCompliant {
rtoStr = "⚠️ " + rtoStr
}
fmt.Printf("%-25s %-12s %-12s %s\n",
truncateRTO(a.Database, 24),
rpoStr,
rtoStr,
status)
}
fmt.Println(strings.Repeat("-", 70))
}
return nil
}
func runRTOCheck(cmd *cobra.Command, args []string) error {
ctx := context.Background()
// Parse targets
targetRTO, err := time.ParseDuration(rtoTargetRTO)
if err != nil {
return fmt.Errorf("invalid target-rto: %w", err)
}
targetRPO, err := time.ParseDuration(rtoTargetRPO)
if err != nil {
return fmt.Errorf("invalid target-rpo: %w", err)
}
// Get catalog
cat, err := openRTOCatalog()
if err != nil {
return err
}
defer cat.Close()
// Create calculator
config := rto.DefaultConfig()
config.TargetRTO = targetRTO
config.TargetRPO = targetRPO
calc := rto.NewCalculator(cat, config)
var analyses []*rto.Analysis
if rtoDatabase != "" {
analysis, err := calc.Analyze(ctx, rtoDatabase)
if err != nil {
return fmt.Errorf("analysis failed: %w", err)
}
analyses = append(analyses, analysis)
} else {
analyses, err = calc.AnalyzeAll(ctx)
if err != nil {
return fmt.Errorf("analysis failed: %w", err)
}
}
// Check compliance
exitCode := 0
for _, a := range analyses {
if !a.RPOCompliant {
fmt.Printf("❌ %s: RPO violation - current %s exceeds target %s\n",
a.Database,
formatDuration(a.CurrentRPO),
formatDuration(config.TargetRPO))
exitCode = 1
}
if !a.RTOCompliant {
fmt.Printf("❌ %s: RTO violation - estimated %s exceeds target %s\n",
a.Database,
formatDuration(a.CurrentRTO),
formatDuration(config.TargetRTO))
exitCode = 1
}
if a.RPOCompliant && a.RTOCompliant {
fmt.Printf("✅ %s: Compliant (RPO: %s, RTO: %s)\n",
a.Database,
formatDuration(a.CurrentRPO),
formatDuration(a.CurrentRTO))
}
}
if exitCode != 0 {
os.Exit(exitCode)
}
return nil
}
func openRTOCatalog() (*catalog.SQLiteCatalog, error) {
catalogPath := rtoCatalog
if catalogPath == "" {
homeDir, _ := os.UserHomeDir()
catalogPath = filepath.Join(homeDir, ".dbbackup", "catalog.db")
}
cat, err := catalog.NewSQLiteCatalog(catalogPath)
if err != nil {
return nil, fmt.Errorf("failed to open catalog: %w", err)
}
return cat, nil
}
func outputJSON(data interface{}, outputPath string) error {
jsonData, err := json.MarshalIndent(data, "", " ")
if err != nil {
return err
}
if outputPath != "" {
return os.WriteFile(outputPath, jsonData, 0644)
}
fmt.Println(string(jsonData))
return nil
}
func outputAnalysisText(analyses []*rto.Analysis) error {
for _, a := range analyses {
fmt.Println()
fmt.Println(strings.Repeat("=", 60))
fmt.Printf(" Database: %s\n", a.Database)
fmt.Println(strings.Repeat("=", 60))
// Status
rpoStatus := "✅ Compliant"
if !a.RPOCompliant {
rpoStatus = "❌ Violation"
}
rtoStatus := "✅ Compliant"
if !a.RTOCompliant {
rtoStatus = "❌ Violation"
}
fmt.Println()
fmt.Println(" Recovery Objectives:")
fmt.Println(strings.Repeat("-", 50))
fmt.Printf(" RPO (Current): %-15s Target: %s\n",
formatDuration(a.CurrentRPO), formatDuration(a.TargetRPO))
fmt.Printf(" RPO Status: %s\n", rpoStatus)
fmt.Printf(" RTO (Estimated): %-14s Target: %s\n",
formatDuration(a.CurrentRTO), formatDuration(a.TargetRTO))
fmt.Printf(" RTO Status: %s\n", rtoStatus)
if a.LastBackup != nil {
fmt.Printf(" Last Backup: %s\n", a.LastBackup.Format("2006-01-02 15:04:05"))
}
if a.BackupInterval > 0 {
fmt.Printf(" Backup Interval: %s\n", formatDuration(a.BackupInterval))
}
// RTO Breakdown
fmt.Println()
fmt.Println(" RTO Breakdown:")
fmt.Println(strings.Repeat("-", 50))
b := a.RTOBreakdown
fmt.Printf(" Detection: %s\n", formatDuration(b.DetectionTime))
fmt.Printf(" Decision: %s\n", formatDuration(b.DecisionTime))
if b.DownloadTime > 0 {
fmt.Printf(" Download: %s\n", formatDuration(b.DownloadTime))
}
fmt.Printf(" Restore: %s\n", formatDuration(b.RestoreTime))
fmt.Printf(" Startup: %s\n", formatDuration(b.StartupTime))
fmt.Printf(" Validation: %s\n", formatDuration(b.ValidationTime))
fmt.Printf(" Switchover: %s\n", formatDuration(b.SwitchoverTime))
fmt.Println(strings.Repeat("-", 30))
fmt.Printf(" Total: %s\n", formatDuration(b.TotalTime))
// Recommendations
if len(a.Recommendations) > 0 {
fmt.Println()
fmt.Println(" Recommendations:")
fmt.Println(strings.Repeat("-", 50))
for _, r := range a.Recommendations {
icon := "💡"
switch r.Priority {
case rto.PriorityCritical:
icon = "🔴"
case rto.PriorityHigh:
icon = "🟠"
case rto.PriorityMedium:
icon = "🟡"
}
fmt.Printf(" %s [%s] %s\n", icon, r.Priority, r.Title)
fmt.Printf(" %s\n", r.Description)
}
}
}
return nil
}
func formatDuration(d time.Duration) string {
if d < time.Minute {
return fmt.Sprintf("%.0fs", d.Seconds())
}
if d < time.Hour {
return fmt.Sprintf("%.0fm", d.Minutes())
}
hours := int(d.Hours())
mins := int(d.Minutes()) - hours*60
return fmt.Sprintf("%dh %dm", hours, mins)
}
func truncateRTO(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen-3] + "..."
}