Final debug pass
This commit is contained in:
@ -50,6 +50,8 @@ dbbackup interactive --database your_database
|
|||||||
dbbackup interactive --database postgres --host localhost --user postgres
|
dbbackup interactive --database postgres --host localhost --user postgres
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> Tip: In the interactive menu, tap the left/right arrows (or `t`) to toggle between PostgreSQL and MySQL/MariaDB before starting a task.
|
||||||
|
|
||||||
### 📊 Command Line with Progress
|
### 📊 Command Line with Progress
|
||||||
```bash
|
```bash
|
||||||
# Single backup with live progress
|
# Single backup with live progress
|
||||||
@ -118,6 +120,9 @@ dbbackup status --db-type postgres
|
|||||||
# Single database backup
|
# Single database backup
|
||||||
dbbackup backup single myapp_db --db-type mysql
|
dbbackup backup single myapp_db --db-type mysql
|
||||||
|
|
||||||
|
# Using the short flag for database selection
|
||||||
|
dbbackup backup single myapp_db -d mysql
|
||||||
|
|
||||||
# Sample backup
|
# Sample backup
|
||||||
dbbackup backup sample myapp_db --sample-ratio 10 --db-type mysql
|
dbbackup backup sample myapp_db --sample-ratio 10 --db-type mysql
|
||||||
|
|
||||||
@ -145,7 +150,7 @@ dbbackup backup cluster --jobs 16 --dump-jobs 8 --max-cores 32
|
|||||||
| `--host` | Database host | `--host db.example.com` |
|
| `--host` | Database host | `--host db.example.com` |
|
||||||
| `--port` | Database port | `--port 5432` |
|
| `--port` | Database port | `--port 5432` |
|
||||||
| `--user` | Database user | `--user backup_user` |
|
| `--user` | Database user | `--user backup_user` |
|
||||||
| `--db-type` | Database type | `--db-type mysql` |
|
| `-d`, `--db-type` | Database type (`postgres`, `mysql`, `mariadb`) | `-d mysql` |
|
||||||
| `--insecure` | Disable SSL | `--insecure` |
|
| `--insecure` | Disable SSL | `--insecure` |
|
||||||
| `--jobs` | Parallel jobs | `--jobs 8` |
|
| `--jobs` | Parallel jobs | `--jobs 8` |
|
||||||
| `--debug` | Debug mode | `--debug` |
|
| `--debug` | Debug mode | `--debug` |
|
||||||
|
|||||||
12
README.md
12
README.md
@ -111,6 +111,8 @@ dbbackup menu --database postgres --host localhost --user postgres
|
|||||||
dbbackup ui --database myapp_db --progress
|
dbbackup ui --database myapp_db --progress
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> 💡 In the interactive menu, use the left/right arrow keys (or press `t`) to switch the target engine between PostgreSQL and MySQL/MariaDB before launching an operation.
|
||||||
|
|
||||||
### Enhanced Progress Tracking Commands
|
### Enhanced Progress Tracking Commands
|
||||||
|
|
||||||
#### Real-Time Progress Monitoring
|
#### Real-Time Progress Monitoring
|
||||||
@ -160,6 +162,9 @@ dbbackup restore backup.dump --progress --verify --show-steps
|
|||||||
# Single database backup (auto-optimized for your CPU)
|
# Single database backup (auto-optimized for your CPU)
|
||||||
dbbackup backup single myapp_db --db-type postgres
|
dbbackup backup single myapp_db --db-type postgres
|
||||||
|
|
||||||
|
# MySQL/MariaDB backup using the short flag
|
||||||
|
dbbackup backup single myapp_db -d mysql --host mysql.example.com --port 3306
|
||||||
|
|
||||||
# Sample backup (10% of data)
|
# Sample backup (10% of data)
|
||||||
dbbackup backup sample myapp_db --sample-ratio 10
|
dbbackup backup sample myapp_db --sample-ratio 10
|
||||||
|
|
||||||
@ -245,7 +250,7 @@ dbbackup cpu
|
|||||||
| `--port` | Database port | `5432` (PG), `3306` (MySQL) | `--port 5432` |
|
| `--port` | Database port | `5432` (PG), `3306` (MySQL) | `--port 5432` |
|
||||||
| `--user` | Database user | `postgres` (PG), `root` (MySQL) | `--user backup_user` |
|
| `--user` | Database user | `postgres` (PG), `root` (MySQL) | `--user backup_user` |
|
||||||
| `--database` | Database name | `postgres` | `--database myapp_db` |
|
| `--database` | Database name | `postgres` | `--database myapp_db` |
|
||||||
| `--db-type` | Database type | `postgres` | `--db-type mysql` |
|
| `-d`, `--db-type` | Database type (`postgres`, `mysql`, `mariadb`) | `postgres` | `-d mysql` |
|
||||||
| `--ssl-mode` | SSL mode | `prefer` | `--ssl-mode require` |
|
| `--ssl-mode` | SSL mode | `prefer` | `--ssl-mode require` |
|
||||||
| `--insecure` | Disable SSL | `false` | `--insecure` |
|
| `--insecure` | Disable SSL | `false` | `--insecure` |
|
||||||
|
|
||||||
@ -258,6 +263,11 @@ export PG_PORT=5432
|
|||||||
export PG_USER=postgres
|
export PG_USER=postgres
|
||||||
export PGPASSWORD=secret
|
export PGPASSWORD=secret
|
||||||
export DB_TYPE=postgres
|
export DB_TYPE=postgres
|
||||||
|
export MYSQL_HOST=localhost
|
||||||
|
export MYSQL_PORT=3306
|
||||||
|
export MYSQL_USER=root
|
||||||
|
export MYSQL_PWD=secret
|
||||||
|
export MYSQL_DATABASE=myapp_db
|
||||||
|
|
||||||
# CPU optimization
|
# CPU optimization
|
||||||
export AUTO_DETECT_CORES=true
|
export AUTO_DETECT_CORES=true
|
||||||
|
|||||||
@ -1,16 +1,18 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"compress/gzip"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"dbbackup/internal/tui"
|
"dbbackup/internal/tui"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Create placeholder commands for the other subcommands
|
// Create placeholder commands for the other subcommands
|
||||||
@ -24,9 +26,6 @@ var restoreCmd = &cobra.Command{
|
|||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
return fmt.Errorf("backup archive filename required")
|
return fmt.Errorf("backup archive filename required")
|
||||||
}
|
}
|
||||||
if len(args) == 0 {
|
|
||||||
return fmt.Errorf("backup archive filename required")
|
|
||||||
}
|
|
||||||
return runRestore(cmd.Context(), args[0])
|
return runRestore(cmd.Context(), args[0])
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -54,9 +53,9 @@ var listCmd = &cobra.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
var interactiveCmd = &cobra.Command{
|
var interactiveCmd = &cobra.Command{
|
||||||
Use: "interactive",
|
Use: "interactive",
|
||||||
Short: "Start interactive menu mode",
|
Short: "Start interactive menu mode",
|
||||||
Long: `Start the interactive menu system for guided backup operations.`,
|
Long: `Start the interactive menu system for guided backup operations.`,
|
||||||
Aliases: []string{"menu", "ui"},
|
Aliases: []string{"menu", "ui"},
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
// Start the interactive TUI
|
// Start the interactive TUI
|
||||||
@ -82,32 +81,30 @@ var statusCmd = &cobra.Command{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// runList lists available backups and databases
|
// runList lists available backups and databases
|
||||||
func runList(ctx context.Context) error {
|
func runList(ctx context.Context) error {
|
||||||
fmt.Println("==============================================================")
|
fmt.Println("==============================================================")
|
||||||
fmt.Println(" Available Backups")
|
fmt.Println(" Available Backups")
|
||||||
fmt.Println("==============================================================")
|
fmt.Println("==============================================================")
|
||||||
|
|
||||||
// List backup files
|
// List backup files
|
||||||
backupFiles, err := listBackupFiles(cfg.BackupDir)
|
backupFiles, err := listBackupFiles(cfg.BackupDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to list backup files", "error", err)
|
log.Error("Failed to list backup files", "error", err)
|
||||||
return fmt.Errorf("failed to list backup files: %w", err)
|
return fmt.Errorf("failed to list backup files: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(backupFiles) == 0 {
|
if len(backupFiles) == 0 {
|
||||||
fmt.Printf("No backup files found in: %s\n", cfg.BackupDir)
|
fmt.Printf("No backup files found in: %s\n", cfg.BackupDir)
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("Found %d backup files in: %s\n\n", len(backupFiles), cfg.BackupDir)
|
fmt.Printf("Found %d backup files in: %s\n\n", len(backupFiles), cfg.BackupDir)
|
||||||
|
|
||||||
for _, file := range backupFiles {
|
for _, file := range backupFiles {
|
||||||
stat, err := os.Stat(filepath.Join(cfg.BackupDir, file.Name))
|
stat, err := os.Stat(filepath.Join(cfg.BackupDir, file.Name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("📦 %s\n", file.Name)
|
fmt.Printf("📦 %s\n", file.Name)
|
||||||
fmt.Printf(" Size: %s\n", formatFileSize(stat.Size()))
|
fmt.Printf(" Size: %s\n", formatFileSize(stat.Size()))
|
||||||
fmt.Printf(" Modified: %s\n", stat.ModTime().Format("2006-01-02 15:04:05"))
|
fmt.Printf(" Modified: %s\n", stat.ModTime().Format("2006-01-02 15:04:05"))
|
||||||
@ -115,7 +112,7 @@ func runList(ctx context.Context) error {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,12 +121,12 @@ func listBackupFiles(backupDir string) ([]backupFile, error) {
|
|||||||
if _, err := os.Stat(backupDir); os.IsNotExist(err) {
|
if _, err := os.Stat(backupDir); os.IsNotExist(err) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
entries, err := os.ReadDir(backupDir)
|
entries, err := os.ReadDir(backupDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var files []backupFile
|
var files []backupFile
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if !entry.IsDir() && isBackupFile(entry.Name()) {
|
if !entry.IsDir() && isBackupFile(entry.Name()) {
|
||||||
@ -144,12 +141,12 @@ func listBackupFiles(backupDir string) ([]backupFile, error) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort by modification time (newest first)
|
// Sort by modification time (newest first)
|
||||||
sort.Slice(files, func(i, j int) bool {
|
sort.Slice(files, func(i, j int) bool {
|
||||||
return files[i].ModTime.After(files[j].ModTime)
|
return files[i].ModTime.After(files[j].ModTime)
|
||||||
})
|
})
|
||||||
|
|
||||||
return files, nil
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -162,8 +159,8 @@ type backupFile struct {
|
|||||||
// isBackupFile checks if a file is a backup file based on extension
|
// isBackupFile checks if a file is a backup file based on extension
|
||||||
func isBackupFile(filename string) bool {
|
func isBackupFile(filename string) bool {
|
||||||
ext := strings.ToLower(filepath.Ext(filename))
|
ext := strings.ToLower(filepath.Ext(filename))
|
||||||
return ext == ".dump" || ext == ".sql" || ext == ".tar" || ext == ".gz" ||
|
return ext == ".dump" || ext == ".sql" || ext == ".tar" || ext == ".gz" ||
|
||||||
strings.HasSuffix(filename, ".tar.gz") || strings.HasSuffix(filename, ".dump.gz")
|
strings.HasSuffix(filename, ".tar.gz") || strings.HasSuffix(filename, ".dump.gz")
|
||||||
}
|
}
|
||||||
|
|
||||||
// getBackupType determines backup type from filename
|
// getBackupType determines backup type from filename
|
||||||
@ -199,10 +196,10 @@ func runPreflight(ctx context.Context) error {
|
|||||||
fmt.Println("==============================================================")
|
fmt.Println("==============================================================")
|
||||||
fmt.Println(" Preflight Checks")
|
fmt.Println(" Preflight Checks")
|
||||||
fmt.Println("==============================================================")
|
fmt.Println("==============================================================")
|
||||||
|
|
||||||
checksPassed := 0
|
checksPassed := 0
|
||||||
totalChecks := 6
|
totalChecks := 6
|
||||||
|
|
||||||
// 1. Database connectivity check
|
// 1. Database connectivity check
|
||||||
fmt.Print("🔗 Database connectivity... ")
|
fmt.Print("🔗 Database connectivity... ")
|
||||||
if err := testDatabaseConnection(); err != nil {
|
if err := testDatabaseConnection(); err != nil {
|
||||||
@ -211,7 +208,7 @@ func runPreflight(ctx context.Context) error {
|
|||||||
fmt.Println("✅ PASSED")
|
fmt.Println("✅ PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Required tools check
|
// 2. Required tools check
|
||||||
fmt.Print("🛠️ Required tools (pg_dump/pg_restore)... ")
|
fmt.Print("🛠️ Required tools (pg_dump/pg_restore)... ")
|
||||||
if err := checkRequiredTools(); err != nil {
|
if err := checkRequiredTools(); err != nil {
|
||||||
@ -220,7 +217,7 @@ func runPreflight(ctx context.Context) error {
|
|||||||
fmt.Println("✅ PASSED")
|
fmt.Println("✅ PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. Backup directory check
|
// 3. Backup directory check
|
||||||
fmt.Print("📁 Backup directory access... ")
|
fmt.Print("📁 Backup directory access... ")
|
||||||
if err := checkBackupDirectory(); err != nil {
|
if err := checkBackupDirectory(); err != nil {
|
||||||
@ -229,7 +226,7 @@ func runPreflight(ctx context.Context) error {
|
|||||||
fmt.Println("✅ PASSED")
|
fmt.Println("✅ PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. Disk space check
|
// 4. Disk space check
|
||||||
fmt.Print("💾 Available disk space... ")
|
fmt.Print("💾 Available disk space... ")
|
||||||
if err := checkDiskSpace(); err != nil {
|
if err := checkDiskSpace(); err != nil {
|
||||||
@ -238,7 +235,7 @@ func runPreflight(ctx context.Context) error {
|
|||||||
fmt.Println("✅ PASSED")
|
fmt.Println("✅ PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 5. Permissions check
|
// 5. Permissions check
|
||||||
fmt.Print("🔐 File permissions... ")
|
fmt.Print("🔐 File permissions... ")
|
||||||
if err := checkPermissions(); err != nil {
|
if err := checkPermissions(); err != nil {
|
||||||
@ -247,7 +244,7 @@ func runPreflight(ctx context.Context) error {
|
|||||||
fmt.Println("✅ PASSED")
|
fmt.Println("✅ PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6. CPU/Memory resources check
|
// 6. CPU/Memory resources check
|
||||||
fmt.Print("🖥️ System resources... ")
|
fmt.Print("🖥️ System resources... ")
|
||||||
if err := checkSystemResources(); err != nil {
|
if err := checkSystemResources(); err != nil {
|
||||||
@ -256,10 +253,10 @@ func runPreflight(ctx context.Context) error {
|
|||||||
fmt.Println("✅ PASSED")
|
fmt.Println("✅ PASSED")
|
||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("")
|
fmt.Println("")
|
||||||
fmt.Printf("Results: %d/%d checks passed\n", checksPassed, totalChecks)
|
fmt.Printf("Results: %d/%d checks passed\n", checksPassed, totalChecks)
|
||||||
|
|
||||||
if checksPassed == totalChecks {
|
if checksPassed == totalChecks {
|
||||||
fmt.Println("🎉 All preflight checks passed! System is ready for backup operations.")
|
fmt.Println("🎉 All preflight checks passed! System is ready for backup operations.")
|
||||||
return nil
|
return nil
|
||||||
@ -286,7 +283,7 @@ func checkRequiredTools() error {
|
|||||||
if cfg.DatabaseType == "mysql" {
|
if cfg.DatabaseType == "mysql" {
|
||||||
tools = []string{"mysqldump", "mysql"}
|
tools = []string{"mysqldump", "mysql"}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tool := range tools {
|
for _, tool := range tools {
|
||||||
if _, err := os.Stat("/usr/bin/" + tool); os.IsNotExist(err) {
|
if _, err := os.Stat("/usr/bin/" + tool); os.IsNotExist(err) {
|
||||||
if _, err := os.Stat("/usr/local/bin/" + tool); os.IsNotExist(err) {
|
if _, err := os.Stat("/usr/local/bin/" + tool); os.IsNotExist(err) {
|
||||||
@ -302,7 +299,7 @@ func checkBackupDirectory() error {
|
|||||||
if err := os.MkdirAll(cfg.BackupDir, 0755); err != nil {
|
if err := os.MkdirAll(cfg.BackupDir, 0755); err != nil {
|
||||||
return fmt.Errorf("cannot create backup directory: %w", err)
|
return fmt.Errorf("cannot create backup directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test write access
|
// Test write access
|
||||||
testFile := filepath.Join(cfg.BackupDir, ".preflight_test")
|
testFile := filepath.Join(cfg.BackupDir, ".preflight_test")
|
||||||
if err := os.WriteFile(testFile, []byte("test"), 0644); err != nil {
|
if err := os.WriteFile(testFile, []byte("test"), 0644); err != nil {
|
||||||
@ -326,7 +323,7 @@ func checkPermissions() error {
|
|||||||
if _, err := os.Stat(cfg.BackupDir); os.IsNotExist(err) {
|
if _, err := os.Stat(cfg.BackupDir); os.IsNotExist(err) {
|
||||||
return fmt.Errorf("backup directory not accessible")
|
return fmt.Errorf("backup directory not accessible")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test file creation and deletion
|
// Test file creation and deletion
|
||||||
testFile := filepath.Join(cfg.BackupDir, ".permissions_test")
|
testFile := filepath.Join(cfg.BackupDir, ".permissions_test")
|
||||||
if err := os.WriteFile(testFile, []byte("test"), 0644); err != nil {
|
if err := os.WriteFile(testFile, []byte("test"), 0644); err != nil {
|
||||||
@ -354,47 +351,69 @@ func runRestore(ctx context.Context, archiveName string) error {
|
|||||||
fmt.Println("==============================================================")
|
fmt.Println("==============================================================")
|
||||||
fmt.Println(" Database Restore")
|
fmt.Println(" Database Restore")
|
||||||
fmt.Println("==============================================================")
|
fmt.Println("==============================================================")
|
||||||
|
|
||||||
// Construct full path to archive
|
// Construct full path to archive
|
||||||
archivePath := filepath.Join(cfg.BackupDir, archiveName)
|
archivePath := filepath.Join(cfg.BackupDir, archiveName)
|
||||||
|
|
||||||
// Check if archive exists
|
// Check if archive exists
|
||||||
if _, err := os.Stat(archivePath); os.IsNotExist(err) {
|
if _, err := os.Stat(archivePath); os.IsNotExist(err) {
|
||||||
return fmt.Errorf("backup archive not found: %s", archivePath)
|
return fmt.Errorf("backup archive not found: %s", archivePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Detect archive type
|
// Detect archive type
|
||||||
archiveType := detectArchiveType(archiveName)
|
archiveType := detectArchiveType(archiveName)
|
||||||
fmt.Printf("Archive: %s\n", archiveName)
|
fmt.Printf("Archive: %s\n", archiveName)
|
||||||
fmt.Printf("Type: %s\n", archiveType)
|
fmt.Printf("Type: %s\n", archiveType)
|
||||||
fmt.Printf("Location: %s\n", archivePath)
|
fmt.Printf("Location: %s\n", archivePath)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
// Get archive info
|
// Get archive info
|
||||||
stat, err := os.Stat(archivePath)
|
stat, err := os.Stat(archivePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot access archive: %w", err)
|
return fmt.Errorf("cannot access archive: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Size: %s\n", formatFileSize(stat.Size()))
|
fmt.Printf("Size: %s\n", formatFileSize(stat.Size()))
|
||||||
fmt.Printf("Created: %s\n", stat.ModTime().Format("2006-01-02 15:04:05"))
|
fmt.Printf("Created: %s\n", stat.ModTime().Format("2006-01-02 15:04:05"))
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
// Show warning
|
// Show warning
|
||||||
fmt.Println("⚠️ WARNING: This will restore data to the target database.")
|
fmt.Println("⚠️ WARNING: This will restore data to the target database.")
|
||||||
fmt.Println(" Existing data may be overwritten or merged depending on the restore method.")
|
fmt.Println(" Existing data may be overwritten or merged depending on the restore method.")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
// For safety, show what would be done without actually doing it
|
// For safety, show what would be done without actually doing it
|
||||||
switch archiveType {
|
switch archiveType {
|
||||||
case "Single Database (.dump)":
|
case "Single Database (.dump)":
|
||||||
fmt.Println("🔄 Would execute: pg_restore to restore single database")
|
fmt.Println("🔄 Would execute: pg_restore to restore single database")
|
||||||
fmt.Printf(" Command: pg_restore -h %s -p %d -U %s -d %s --verbose %s\n",
|
fmt.Printf(" Command: pg_restore -h %s -p %d -U %s -d %s --verbose %s\n",
|
||||||
cfg.Host, cfg.Port, cfg.User, cfg.Database, archivePath)
|
cfg.Host, cfg.Port, cfg.User, cfg.Database, archivePath)
|
||||||
|
case "Single Database (.dump.gz)":
|
||||||
|
fmt.Println("🔄 Would execute: gunzip and pg_restore to restore single database")
|
||||||
|
fmt.Printf(" Command: gunzip -c %s | pg_restore -h %s -p %d -U %s -d %s --verbose\n",
|
||||||
|
archivePath, cfg.Host, cfg.Port, cfg.User, cfg.Database)
|
||||||
case "SQL Script (.sql)":
|
case "SQL Script (.sql)":
|
||||||
fmt.Println("🔄 Would execute: psql to run SQL script")
|
if cfg.IsPostgreSQL() {
|
||||||
fmt.Printf(" Command: psql -h %s -p %d -U %s -d %s -f %s\n",
|
fmt.Println("🔄 Would execute: psql to run SQL script")
|
||||||
cfg.Host, cfg.Port, cfg.User, cfg.Database, archivePath)
|
fmt.Printf(" Command: psql -h %s -p %d -U %s -d %s -f %s\n",
|
||||||
|
cfg.Host, cfg.Port, cfg.User, cfg.Database, archivePath)
|
||||||
|
} else if cfg.IsMySQL() {
|
||||||
|
fmt.Println("🔄 Would execute: mysql to run SQL script")
|
||||||
|
fmt.Printf(" Command: %s\n", mysqlRestoreCommand(archivePath, false))
|
||||||
|
} else {
|
||||||
|
fmt.Println("🔄 Would execute: SQL client to run script (database type unknown)")
|
||||||
|
}
|
||||||
|
case "SQL Script (.sql.gz)":
|
||||||
|
if cfg.IsPostgreSQL() {
|
||||||
|
fmt.Println("🔄 Would execute: gunzip and psql to run SQL script")
|
||||||
|
fmt.Printf(" Command: gunzip -c %s | psql -h %s -p %d -U %s -d %s\n",
|
||||||
|
archivePath, cfg.Host, cfg.Port, cfg.User, cfg.Database)
|
||||||
|
} else if cfg.IsMySQL() {
|
||||||
|
fmt.Println("🔄 Would execute: gunzip and mysql to run SQL script")
|
||||||
|
fmt.Printf(" Command: %s\n", mysqlRestoreCommand(archivePath, true))
|
||||||
|
} else {
|
||||||
|
fmt.Println("🔄 Would execute: gunzip and SQL client to run script (database type unknown)")
|
||||||
|
}
|
||||||
case "Cluster Backup (.tar.gz)":
|
case "Cluster Backup (.tar.gz)":
|
||||||
fmt.Println("🔄 Would execute: Extract and restore cluster backup")
|
fmt.Println("🔄 Would execute: Extract and restore cluster backup")
|
||||||
fmt.Println(" Steps:")
|
fmt.Println(" Steps:")
|
||||||
@ -404,19 +423,23 @@ func runRestore(ctx context.Context, archiveName string) error {
|
|||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported archive type: %s", archiveType)
|
return fmt.Errorf("unsupported archive type: %s", archiveType)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("🛡️ SAFETY MODE: Restore command is in preview mode.")
|
fmt.Println("🛡️ SAFETY MODE: Restore command is in preview mode.")
|
||||||
fmt.Println(" This shows what would be executed without making changes.")
|
fmt.Println(" This shows what would be executed without making changes.")
|
||||||
fmt.Println(" To enable actual restore, add --confirm flag (not yet implemented).")
|
fmt.Println(" To enable actual restore, add --confirm flag (not yet implemented).")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func detectArchiveType(filename string) string {
|
func detectArchiveType(filename string) string {
|
||||||
switch {
|
switch {
|
||||||
|
case strings.HasSuffix(filename, ".dump.gz"):
|
||||||
|
return "Single Database (.dump.gz)"
|
||||||
case strings.HasSuffix(filename, ".dump"):
|
case strings.HasSuffix(filename, ".dump"):
|
||||||
return "Single Database (.dump)"
|
return "Single Database (.dump)"
|
||||||
|
case strings.HasSuffix(filename, ".sql.gz"):
|
||||||
|
return "SQL Script (.sql.gz)"
|
||||||
case strings.HasSuffix(filename, ".sql"):
|
case strings.HasSuffix(filename, ".sql"):
|
||||||
return "SQL Script (.sql)"
|
return "SQL Script (.sql)"
|
||||||
case strings.HasSuffix(filename, ".tar.gz"):
|
case strings.HasSuffix(filename, ".tar.gz"):
|
||||||
@ -433,33 +456,33 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
fmt.Println("==============================================================")
|
fmt.Println("==============================================================")
|
||||||
fmt.Println(" Backup Archive Verification")
|
fmt.Println(" Backup Archive Verification")
|
||||||
fmt.Println("==============================================================")
|
fmt.Println("==============================================================")
|
||||||
|
|
||||||
// Construct full path to archive
|
// Construct full path to archive
|
||||||
archivePath := filepath.Join(cfg.BackupDir, archiveName)
|
archivePath := filepath.Join(cfg.BackupDir, archiveName)
|
||||||
|
|
||||||
// Check if archive exists
|
// Check if archive exists
|
||||||
if _, err := os.Stat(archivePath); os.IsNotExist(err) {
|
if _, err := os.Stat(archivePath); os.IsNotExist(err) {
|
||||||
return fmt.Errorf("backup archive not found: %s", archivePath)
|
return fmt.Errorf("backup archive not found: %s", archivePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get archive info
|
// Get archive info
|
||||||
stat, err := os.Stat(archivePath)
|
stat, err := os.Stat(archivePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot access archive: %w", err)
|
return fmt.Errorf("cannot access archive: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Archive: %s\n", archiveName)
|
fmt.Printf("Archive: %s\n", archiveName)
|
||||||
fmt.Printf("Size: %s\n", formatFileSize(stat.Size()))
|
fmt.Printf("Size: %s\n", formatFileSize(stat.Size()))
|
||||||
fmt.Printf("Created: %s\n", stat.ModTime().Format("2006-01-02 15:04:05"))
|
fmt.Printf("Created: %s\n", stat.ModTime().Format("2006-01-02 15:04:05"))
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
// Detect and verify based on archive type
|
// Detect and verify based on archive type
|
||||||
archiveType := detectArchiveType(archiveName)
|
archiveType := detectArchiveType(archiveName)
|
||||||
fmt.Printf("Type: %s\n", archiveType)
|
fmt.Printf("Type: %s\n", archiveType)
|
||||||
|
|
||||||
checksRun := 0
|
checksRun := 0
|
||||||
checksPassed := 0
|
checksPassed := 0
|
||||||
|
|
||||||
// Basic file existence and readability
|
// Basic file existence and readability
|
||||||
fmt.Print("📁 File accessibility... ")
|
fmt.Print("📁 File accessibility... ")
|
||||||
if file, err := os.Open(archivePath); err != nil {
|
if file, err := os.Open(archivePath); err != nil {
|
||||||
@ -470,7 +493,7 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
// File size sanity check
|
// File size sanity check
|
||||||
fmt.Print("📏 File size check... ")
|
fmt.Print("📏 File size check... ")
|
||||||
if stat.Size() == 0 {
|
if stat.Size() == 0 {
|
||||||
@ -483,7 +506,7 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
// Type-specific verification
|
// Type-specific verification
|
||||||
switch archiveType {
|
switch archiveType {
|
||||||
case "Single Database (.dump)":
|
case "Single Database (.dump)":
|
||||||
@ -495,7 +518,17 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
|
case "Single Database (.dump.gz)":
|
||||||
|
fmt.Print("🔍 PostgreSQL dump format check (gzip)... ")
|
||||||
|
if err := verifyPgDumpGzip(archivePath); err != nil {
|
||||||
|
fmt.Printf("❌ FAILED: %v\n", err)
|
||||||
|
} else {
|
||||||
|
fmt.Println("✅ PASSED")
|
||||||
|
checksPassed++
|
||||||
|
}
|
||||||
|
checksRun++
|
||||||
|
|
||||||
case "SQL Script (.sql)":
|
case "SQL Script (.sql)":
|
||||||
fmt.Print("📜 SQL script validation... ")
|
fmt.Print("📜 SQL script validation... ")
|
||||||
if err := verifySqlScript(archivePath); err != nil {
|
if err := verifySqlScript(archivePath); err != nil {
|
||||||
@ -505,7 +538,17 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
|
case "SQL Script (.sql.gz)":
|
||||||
|
fmt.Print("📜 SQL script validation (gzip)... ")
|
||||||
|
if err := verifyGzipSqlScript(archivePath); err != nil {
|
||||||
|
fmt.Printf("❌ FAILED: %v\n", err)
|
||||||
|
} else {
|
||||||
|
fmt.Println("✅ PASSED")
|
||||||
|
checksPassed++
|
||||||
|
}
|
||||||
|
checksRun++
|
||||||
|
|
||||||
case "Cluster Backup (.tar.gz)":
|
case "Cluster Backup (.tar.gz)":
|
||||||
fmt.Print("📦 Archive extraction test... ")
|
fmt.Print("📦 Archive extraction test... ")
|
||||||
if err := verifyTarGz(archivePath); err != nil {
|
if err := verifyTarGz(archivePath); err != nil {
|
||||||
@ -516,7 +559,7 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for metadata file
|
// Check for metadata file
|
||||||
metadataPath := archivePath + ".info"
|
metadataPath := archivePath + ".info"
|
||||||
fmt.Print("📋 Metadata file check... ")
|
fmt.Print("📋 Metadata file check... ")
|
||||||
@ -527,10 +570,10 @@ func runVerify(ctx context.Context, archiveName string) error {
|
|||||||
checksPassed++
|
checksPassed++
|
||||||
}
|
}
|
||||||
checksRun++
|
checksRun++
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Verification Results: %d/%d checks passed\n", checksPassed, checksRun)
|
fmt.Printf("Verification Results: %d/%d checks passed\n", checksPassed, checksRun)
|
||||||
|
|
||||||
if checksPassed == checksRun {
|
if checksPassed == checksRun {
|
||||||
fmt.Println("🎉 Archive verification completed successfully!")
|
fmt.Println("🎉 Archive verification completed successfully!")
|
||||||
return nil
|
return nil
|
||||||
@ -550,19 +593,42 @@ func verifyPgDump(path string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
buffer := make([]byte, 100)
|
buffer := make([]byte, 512)
|
||||||
n, err := file.Read(buffer)
|
n, err := file.Read(buffer)
|
||||||
if err != nil && n == 0 {
|
if err != nil && err != io.EOF {
|
||||||
|
return fmt.Errorf("cannot read file: %w", err)
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
return fmt.Errorf("cannot read file")
|
return fmt.Errorf("cannot read file")
|
||||||
}
|
}
|
||||||
|
|
||||||
content := string(buffer[:n])
|
return checkPgDumpSignature(buffer[:n])
|
||||||
if strings.Contains(content, "PostgreSQL") || strings.Contains(content, "pg_dump") {
|
}
|
||||||
return nil
|
|
||||||
|
func verifyPgDumpGzip(path string) error {
|
||||||
|
file, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
defer file.Close()
|
||||||
return fmt.Errorf("does not appear to be a PostgreSQL dump file")
|
|
||||||
|
gz, err := gzip.NewReader(file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open gzip stream: %w", err)
|
||||||
|
}
|
||||||
|
defer gz.Close()
|
||||||
|
|
||||||
|
buffer := make([]byte, 512)
|
||||||
|
n, err := gz.Read(buffer)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return fmt.Errorf("cannot read gzip contents: %w", err)
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
return fmt.Errorf("gzip archive is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
return checkPgDumpSignature(buffer[:n])
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifySqlScript(path string) error {
|
func verifySqlScript(path string) error {
|
||||||
@ -572,22 +638,49 @@ func verifySqlScript(path string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
buffer := make([]byte, 500)
|
buffer := make([]byte, 1024)
|
||||||
n, err := file.Read(buffer)
|
n, err := file.Read(buffer)
|
||||||
if err != nil && n == 0 {
|
if err != nil && err != io.EOF {
|
||||||
|
return fmt.Errorf("cannot read file: %w", err)
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
return fmt.Errorf("cannot read file")
|
return fmt.Errorf("cannot read file")
|
||||||
}
|
}
|
||||||
|
|
||||||
content := strings.ToLower(string(buffer[:n]))
|
if containsSQLKeywords(strings.ToLower(string(buffer[:n]))) {
|
||||||
sqlKeywords := []string{"select", "insert", "create", "drop", "alter", "database", "table"}
|
return nil
|
||||||
|
|
||||||
for _, keyword := range sqlKeywords {
|
|
||||||
if strings.Contains(content, keyword) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("does not appear to contain SQL content")
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyGzipSqlScript(path string) error {
|
||||||
|
file, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
gz, err := gzip.NewReader(file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open gzip stream: %w", err)
|
||||||
|
}
|
||||||
|
defer gz.Close()
|
||||||
|
|
||||||
|
buffer := make([]byte, 1024)
|
||||||
|
n, err := gz.Read(buffer)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return fmt.Errorf("cannot read gzip contents: %w", err)
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
return fmt.Errorf("gzip archive is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if containsSQLKeywords(strings.ToLower(string(buffer[:n]))) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return fmt.Errorf("does not appear to contain SQL content")
|
return fmt.Errorf("does not appear to contain SQL content")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -598,17 +691,67 @@ func verifyTarGz(path string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
// Check if it starts with gzip magic number
|
// Check if it starts with gzip magic number
|
||||||
buffer := make([]byte, 3)
|
buffer := make([]byte, 3)
|
||||||
n, err := file.Read(buffer)
|
n, err := file.Read(buffer)
|
||||||
if err != nil || n < 3 {
|
if err != nil || n < 3 {
|
||||||
return fmt.Errorf("cannot read file header")
|
return fmt.Errorf("cannot read file header")
|
||||||
}
|
}
|
||||||
|
|
||||||
if buffer[0] == 0x1f && buffer[1] == 0x8b {
|
if buffer[0] == 0x1f && buffer[1] == 0x8b {
|
||||||
return nil // Valid gzip header
|
return nil // Valid gzip header
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("does not appear to be a valid gzip file")
|
return fmt.Errorf("does not appear to be a valid gzip file")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func checkPgDumpSignature(data []byte) error {
|
||||||
|
if len(data) >= 5 && string(data[:5]) == "PGDMP" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
content := strings.ToLower(string(data))
|
||||||
|
if strings.Contains(content, "postgresql") || strings.Contains(content, "pg_dump") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("does not appear to be a PostgreSQL dump file")
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsSQLKeywords(content string) bool {
|
||||||
|
sqlKeywords := []string{"select", "insert", "create", "drop", "alter", "database", "table", "update", "delete"}
|
||||||
|
|
||||||
|
for _, keyword := range sqlKeywords {
|
||||||
|
if strings.Contains(content, keyword) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func mysqlRestoreCommand(archivePath string, compressed bool) string {
|
||||||
|
parts := []string{
|
||||||
|
"mysql",
|
||||||
|
"-h", cfg.Host,
|
||||||
|
"-P", fmt.Sprintf("%d", cfg.Port),
|
||||||
|
"-u", cfg.User,
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Password != "" {
|
||||||
|
parts = append(parts, fmt.Sprintf("-p'%s'", cfg.Password))
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Database != "" {
|
||||||
|
parts = append(parts, cfg.Database)
|
||||||
|
}
|
||||||
|
|
||||||
|
command := strings.Join(parts, " ")
|
||||||
|
|
||||||
|
if compressed {
|
||||||
|
return fmt.Sprintf("gunzip -c %s | %s", archivePath, command)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%s < %s", command, archivePath)
|
||||||
|
}
|
||||||
|
|||||||
18
cmd/root.go
18
cmd/root.go
@ -4,9 +4,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"dbbackup/internal/config"
|
"dbbackup/internal/config"
|
||||||
"dbbackup/internal/logger"
|
"dbbackup/internal/logger"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -34,24 +34,30 @@ Database Support:
|
|||||||
|
|
||||||
For help with specific commands, use: dbbackup [command] --help`,
|
For help with specific commands, use: dbbackup [command] --help`,
|
||||||
Version: "",
|
Version: "",
|
||||||
|
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if cfg == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return cfg.SetDatabaseType(cfg.DatabaseType)
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||||
func Execute(ctx context.Context, config *config.Config, logger logger.Logger) error {
|
func Execute(ctx context.Context, config *config.Config, logger logger.Logger) error {
|
||||||
cfg = config
|
cfg = config
|
||||||
log = logger
|
log = logger
|
||||||
|
|
||||||
// Set version info
|
// Set version info
|
||||||
rootCmd.Version = fmt.Sprintf("%s (built: %s, commit: %s)",
|
rootCmd.Version = fmt.Sprintf("%s (built: %s, commit: %s)",
|
||||||
cfg.Version, cfg.BuildTime, cfg.GitCommit)
|
cfg.Version, cfg.BuildTime, cfg.GitCommit)
|
||||||
|
|
||||||
// Add persistent flags
|
// Add persistent flags
|
||||||
rootCmd.PersistentFlags().StringVar(&cfg.Host, "host", cfg.Host, "Database host")
|
rootCmd.PersistentFlags().StringVar(&cfg.Host, "host", cfg.Host, "Database host")
|
||||||
rootCmd.PersistentFlags().IntVar(&cfg.Port, "port", cfg.Port, "Database port")
|
rootCmd.PersistentFlags().IntVar(&cfg.Port, "port", cfg.Port, "Database port")
|
||||||
rootCmd.PersistentFlags().StringVar(&cfg.User, "user", cfg.User, "Database user")
|
rootCmd.PersistentFlags().StringVar(&cfg.User, "user", cfg.User, "Database user")
|
||||||
rootCmd.PersistentFlags().StringVar(&cfg.Database, "database", cfg.Database, "Database name")
|
rootCmd.PersistentFlags().StringVar(&cfg.Database, "database", cfg.Database, "Database name")
|
||||||
rootCmd.PersistentFlags().StringVar(&cfg.Password, "password", cfg.Password, "Database password")
|
rootCmd.PersistentFlags().StringVar(&cfg.Password, "password", cfg.Password, "Database password")
|
||||||
rootCmd.PersistentFlags().StringVar(&cfg.DatabaseType, "db-type", cfg.DatabaseType, "Database type (postgres|mysql)")
|
rootCmd.PersistentFlags().StringVarP(&cfg.DatabaseType, "db-type", "d", cfg.DatabaseType, "Database type (postgres|mysql|mariadb)")
|
||||||
rootCmd.PersistentFlags().StringVar(&cfg.BackupDir, "backup-dir", cfg.BackupDir, "Backup directory")
|
rootCmd.PersistentFlags().StringVar(&cfg.BackupDir, "backup-dir", cfg.BackupDir, "Backup directory")
|
||||||
rootCmd.PersistentFlags().BoolVar(&cfg.NoColor, "no-color", cfg.NoColor, "Disable colored output")
|
rootCmd.PersistentFlags().BoolVar(&cfg.NoColor, "no-color", cfg.NoColor, "Disable colored output")
|
||||||
rootCmd.PersistentFlags().BoolVar(&cfg.Debug, "debug", cfg.Debug, "Enable debug logging")
|
rootCmd.PersistentFlags().BoolVar(&cfg.Debug, "debug", cfg.Debug, "Enable debug logging")
|
||||||
@ -76,4 +82,4 @@ func init() {
|
|||||||
rootCmd.AddCommand(interactiveCmd)
|
rootCmd.AddCommand(interactiveCmd)
|
||||||
rootCmd.AddCommand(statusCmd)
|
rootCmd.AddCommand(statusCmd)
|
||||||
rootCmd.AddCommand(preflightCmd)
|
rootCmd.AddCommand(preflightCmd)
|
||||||
}
|
}
|
||||||
|
|||||||
Binary file not shown.
@ -5,7 +5,8 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"dbbackup/internal/cpu"
|
"dbbackup/internal/cpu"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -34,7 +35,7 @@ type Config struct {
|
|||||||
MaxCores int
|
MaxCores int
|
||||||
AutoDetectCores bool
|
AutoDetectCores bool
|
||||||
CPUWorkloadType string // "cpu-intensive", "io-intensive", "balanced"
|
CPUWorkloadType string // "cpu-intensive", "io-intensive", "balanced"
|
||||||
|
|
||||||
// CPU detection
|
// CPU detection
|
||||||
CPUDetector *cpu.Detector
|
CPUDetector *cpu.Detector
|
||||||
CPUInfo *cpu.CPUInfo
|
CPUInfo *cpu.CPUInfo
|
||||||
@ -64,15 +65,41 @@ func New() *Config {
|
|||||||
cpuDetector := cpu.NewDetector()
|
cpuDetector := cpu.NewDetector()
|
||||||
cpuInfo, _ := cpuDetector.DetectCPU()
|
cpuInfo, _ := cpuDetector.DetectCPU()
|
||||||
|
|
||||||
return &Config{
|
dbTypeRaw := getEnvString("DB_TYPE", "postgres")
|
||||||
|
canonicalType, ok := canonicalDatabaseType(dbTypeRaw)
|
||||||
|
if !ok {
|
||||||
|
canonicalType = "postgres"
|
||||||
|
}
|
||||||
|
|
||||||
|
host := getEnvString("PG_HOST", "localhost")
|
||||||
|
port := getEnvInt("PG_PORT", postgresDefaultPort)
|
||||||
|
user := getEnvString("PG_USER", getCurrentUser())
|
||||||
|
databaseName := getEnvString("PG_DATABASE", "postgres")
|
||||||
|
password := getEnvString("PGPASSWORD", "")
|
||||||
|
sslMode := getEnvString("PG_SSLMODE", "prefer")
|
||||||
|
|
||||||
|
if canonicalType == "mysql" {
|
||||||
|
host = getEnvString("MYSQL_HOST", host)
|
||||||
|
port = getEnvInt("MYSQL_PORT", mysqlDefaultPort)
|
||||||
|
user = getEnvString("MYSQL_USER", user)
|
||||||
|
if db := getEnvString("MYSQL_DATABASE", ""); db != "" {
|
||||||
|
databaseName = db
|
||||||
|
}
|
||||||
|
if pwd := getEnvString("MYSQL_PWD", ""); pwd != "" {
|
||||||
|
password = pwd
|
||||||
|
}
|
||||||
|
sslMode = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := &Config{
|
||||||
// Database defaults
|
// Database defaults
|
||||||
Host: getEnvString("PG_HOST", "localhost"),
|
Host: host,
|
||||||
Port: getEnvInt("PG_PORT", 5432),
|
Port: port,
|
||||||
User: getEnvString("PG_USER", getCurrentUser()),
|
User: user,
|
||||||
Database: getEnvString("PG_DATABASE", "postgres"),
|
Database: databaseName,
|
||||||
Password: getEnvString("PGPASSWORD", ""),
|
Password: password,
|
||||||
DatabaseType: getEnvString("DB_TYPE", "postgres"),
|
DatabaseType: canonicalType,
|
||||||
SSLMode: getEnvString("PG_SSLMODE", "prefer"),
|
SSLMode: sslMode,
|
||||||
Insecure: getEnvBool("INSECURE", false),
|
Insecure: getEnvBool("INSECURE", false),
|
||||||
|
|
||||||
// Backup defaults
|
// Backup defaults
|
||||||
@ -103,6 +130,15 @@ func New() *Config {
|
|||||||
SingleDBName: getEnvString("SINGLE_DB_NAME", ""),
|
SingleDBName: getEnvString("SINGLE_DB_NAME", ""),
|
||||||
RestoreDBName: getEnvString("RESTORE_DB_NAME", ""),
|
RestoreDBName: getEnvString("RESTORE_DB_NAME", ""),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure canonical defaults are enforced
|
||||||
|
if err := cfg.SetDatabaseType(cfg.DatabaseType); err != nil {
|
||||||
|
cfg.DatabaseType = "postgres"
|
||||||
|
cfg.Port = postgresDefaultPort
|
||||||
|
cfg.SSLMode = "prefer"
|
||||||
|
}
|
||||||
|
|
||||||
|
return cfg
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateFromEnvironment updates configuration from environment variables
|
// UpdateFromEnvironment updates configuration from environment variables
|
||||||
@ -117,18 +153,18 @@ func (c *Config) UpdateFromEnvironment() {
|
|||||||
|
|
||||||
// Validate validates the configuration
|
// Validate validates the configuration
|
||||||
func (c *Config) Validate() error {
|
func (c *Config) Validate() error {
|
||||||
if c.DatabaseType != "postgres" && c.DatabaseType != "mysql" {
|
if err := c.SetDatabaseType(c.DatabaseType); err != nil {
|
||||||
return &ConfigError{Field: "database-type", Value: c.DatabaseType, Message: "must be 'postgres' or 'mysql'"}
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.CompressionLevel < 0 || c.CompressionLevel > 9 {
|
if c.CompressionLevel < 0 || c.CompressionLevel > 9 {
|
||||||
return &ConfigError{Field: "compression", Value: string(rune(c.CompressionLevel)), Message: "must be between 0-9"}
|
return &ConfigError{Field: "compression", Value: string(rune(c.CompressionLevel)), Message: "must be between 0-9"}
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Jobs < 1 {
|
if c.Jobs < 1 {
|
||||||
return &ConfigError{Field: "jobs", Value: string(rune(c.Jobs)), Message: "must be at least 1"}
|
return &ConfigError{Field: "jobs", Value: string(rune(c.Jobs)), Message: "must be at least 1"}
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.DumpJobs < 1 {
|
if c.DumpJobs < 1 {
|
||||||
return &ConfigError{Field: "dump-jobs", Value: string(rune(c.DumpJobs)), Message: "must be at least 1"}
|
return &ConfigError{Field: "dump-jobs", Value: string(rune(c.DumpJobs)), Message: "must be at least 1"}
|
||||||
}
|
}
|
||||||
@ -154,12 +190,61 @@ func (c *Config) GetDefaultPort() int {
|
|||||||
return 5432
|
return 5432
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DisplayDatabaseType returns a human-friendly name for the database type
|
||||||
|
func (c *Config) DisplayDatabaseType() string {
|
||||||
|
switch c.DatabaseType {
|
||||||
|
case "postgres":
|
||||||
|
return "PostgreSQL"
|
||||||
|
case "mysql":
|
||||||
|
return "MySQL/MariaDB"
|
||||||
|
default:
|
||||||
|
return c.DatabaseType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDatabaseType normalizes the database type and updates dependent defaults
|
||||||
|
func (c *Config) SetDatabaseType(dbType string) error {
|
||||||
|
normalized, ok := canonicalDatabaseType(dbType)
|
||||||
|
if !ok {
|
||||||
|
return &ConfigError{Field: "database-type", Value: dbType, Message: "must be 'postgres' or 'mysql'"}
|
||||||
|
}
|
||||||
|
|
||||||
|
previous := c.DatabaseType
|
||||||
|
previousPort := c.Port
|
||||||
|
|
||||||
|
c.DatabaseType = normalized
|
||||||
|
|
||||||
|
if c.Port == 0 {
|
||||||
|
c.Port = defaultPortFor(normalized)
|
||||||
|
}
|
||||||
|
|
||||||
|
if normalized != previous {
|
||||||
|
if previousPort == defaultPortFor(previous) || previousPort == 0 {
|
||||||
|
c.Port = defaultPortFor(normalized)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adjust SSL mode defaults when switching engines. Preserve explicit user choices.
|
||||||
|
switch normalized {
|
||||||
|
case "mysql":
|
||||||
|
if strings.EqualFold(c.SSLMode, "prefer") || strings.EqualFold(c.SSLMode, "preferred") {
|
||||||
|
c.SSLMode = ""
|
||||||
|
}
|
||||||
|
case "postgres":
|
||||||
|
if c.SSLMode == "" {
|
||||||
|
c.SSLMode = "prefer"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// OptimizeForCPU optimizes job settings based on detected CPU
|
// OptimizeForCPU optimizes job settings based on detected CPU
|
||||||
func (c *Config) OptimizeForCPU() error {
|
func (c *Config) OptimizeForCPU() error {
|
||||||
if c.CPUDetector == nil {
|
if c.CPUDetector == nil {
|
||||||
c.CPUDetector = cpu.NewDetector()
|
c.CPUDetector = cpu.NewDetector()
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.CPUInfo == nil {
|
if c.CPUInfo == nil {
|
||||||
info, err := c.CPUDetector.DetectCPU()
|
info, err := c.CPUDetector.DetectCPU()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -167,13 +252,13 @@ func (c *Config) OptimizeForCPU() error {
|
|||||||
}
|
}
|
||||||
c.CPUInfo = info
|
c.CPUInfo = info
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.AutoDetectCores {
|
if c.AutoDetectCores {
|
||||||
// Optimize jobs based on workload type
|
// Optimize jobs based on workload type
|
||||||
if jobs, err := c.CPUDetector.CalculateOptimalJobs(c.CPUWorkloadType, c.MaxCores); err == nil {
|
if jobs, err := c.CPUDetector.CalculateOptimalJobs(c.CPUWorkloadType, c.MaxCores); err == nil {
|
||||||
c.Jobs = jobs
|
c.Jobs = jobs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Optimize dump jobs (more conservative for database dumps)
|
// Optimize dump jobs (more conservative for database dumps)
|
||||||
if dumpJobs, err := c.CPUDetector.CalculateOptimalJobs("cpu-intensive", c.MaxCores/2); err == nil {
|
if dumpJobs, err := c.CPUDetector.CalculateOptimalJobs("cpu-intensive", c.MaxCores/2); err == nil {
|
||||||
c.DumpJobs = dumpJobs
|
c.DumpJobs = dumpJobs
|
||||||
@ -182,7 +267,7 @@ func (c *Config) OptimizeForCPU() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,16 +276,16 @@ func (c *Config) GetCPUInfo() (*cpu.CPUInfo, error) {
|
|||||||
if c.CPUInfo != nil {
|
if c.CPUInfo != nil {
|
||||||
return c.CPUInfo, nil
|
return c.CPUInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.CPUDetector == nil {
|
if c.CPUDetector == nil {
|
||||||
c.CPUDetector = cpu.NewDetector()
|
c.CPUDetector = cpu.NewDetector()
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := c.CPUDetector.DetectCPU()
|
info, err := c.CPUDetector.DetectCPU()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.CPUInfo = info
|
c.CPUInfo = info
|
||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
@ -216,6 +301,33 @@ func (e *ConfigError) Error() string {
|
|||||||
return "config error in field '" + e.Field + "' with value '" + e.Value + "': " + e.Message
|
return "config error in field '" + e.Field + "' with value '" + e.Value + "': " + e.Message
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
postgresDefaultPort = 5432
|
||||||
|
mysqlDefaultPort = 3306
|
||||||
|
)
|
||||||
|
|
||||||
|
func canonicalDatabaseType(input string) (string, bool) {
|
||||||
|
switch strings.ToLower(strings.TrimSpace(input)) {
|
||||||
|
case "postgres", "postgresql", "pg":
|
||||||
|
return "postgres", true
|
||||||
|
case "mysql", "mariadb", "mariadb-server", "maria":
|
||||||
|
return "mysql", true
|
||||||
|
default:
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultPortFor(dbType string) int {
|
||||||
|
switch dbType {
|
||||||
|
case "postgres":
|
||||||
|
return postgresDefaultPort
|
||||||
|
case "mysql":
|
||||||
|
return mysqlDefaultPort
|
||||||
|
default:
|
||||||
|
return postgresDefaultPort
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Helper functions
|
// Helper functions
|
||||||
func getEnvString(key, defaultValue string) string {
|
func getEnvString(key, defaultValue string) string {
|
||||||
if value := os.Getenv(key); value != "" {
|
if value := os.Getenv(key); value != "" {
|
||||||
@ -258,17 +370,17 @@ func getDefaultBackupDir() string {
|
|||||||
if homeDir != "" {
|
if homeDir != "" {
|
||||||
return filepath.Join(homeDir, "db_backups")
|
return filepath.Join(homeDir, "db_backups")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback based on OS
|
// Fallback based on OS
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
return "C:\\db_backups"
|
return "C:\\db_backups"
|
||||||
}
|
}
|
||||||
|
|
||||||
// For PostgreSQL user on Linux/Unix
|
// For PostgreSQL user on Linux/Unix
|
||||||
if getCurrentUser() == "postgres" {
|
if getCurrentUser() == "postgres" {
|
||||||
return "/var/lib/pgsql/pg_backups"
|
return "/var/lib/pgsql/pg_backups"
|
||||||
}
|
}
|
||||||
|
|
||||||
return "/tmp/db_backups"
|
return "/tmp/db_backups"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -316,4 +428,4 @@ func getDefaultMaxCores(cpuInfo *cpu.CPUInfo) int {
|
|||||||
maxCores = 64
|
maxCores = 64
|
||||||
}
|
}
|
||||||
return maxCores
|
return maxCores
|
||||||
}
|
}
|
||||||
|
|||||||
@ -32,28 +32,28 @@ func (m *MySQL) Connect(ctx context.Context) error {
|
|||||||
// Build MySQL DSN
|
// Build MySQL DSN
|
||||||
dsn := m.buildDSN()
|
dsn := m.buildDSN()
|
||||||
m.dsn = dsn
|
m.dsn = dsn
|
||||||
|
|
||||||
m.log.Debug("Connecting to MySQL", "dsn", sanitizeMySQLDSN(dsn))
|
m.log.Debug("Connecting to MySQL", "dsn", sanitizeMySQLDSN(dsn))
|
||||||
|
|
||||||
db, err := sql.Open("mysql", dsn)
|
db, err := sql.Open("mysql", dsn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to open MySQL connection: %w", err)
|
return fmt.Errorf("failed to open MySQL connection: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure connection pool
|
// Configure connection pool
|
||||||
db.SetMaxOpenConns(10)
|
db.SetMaxOpenConns(10)
|
||||||
db.SetMaxIdleConns(5)
|
db.SetMaxIdleConns(5)
|
||||||
db.SetConnMaxLifetime(0)
|
db.SetConnMaxLifetime(0)
|
||||||
|
|
||||||
// Test connection
|
// Test connection
|
||||||
timeoutCtx, cancel := buildTimeout(ctx, 0)
|
timeoutCtx, cancel := buildTimeout(ctx, 0)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := db.PingContext(timeoutCtx); err != nil {
|
if err := db.PingContext(timeoutCtx); err != nil {
|
||||||
db.Close()
|
db.Close()
|
||||||
return fmt.Errorf("failed to ping MySQL: %w", err)
|
return fmt.Errorf("failed to ping MySQL: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.db = db
|
m.db = db
|
||||||
m.log.Info("Connected to MySQL successfully")
|
m.log.Info("Connected to MySQL successfully")
|
||||||
return nil
|
return nil
|
||||||
@ -64,15 +64,15 @@ func (m *MySQL) ListDatabases(ctx context.Context) ([]string, error) {
|
|||||||
if m.db == nil {
|
if m.db == nil {
|
||||||
return nil, fmt.Errorf("not connected to database")
|
return nil, fmt.Errorf("not connected to database")
|
||||||
}
|
}
|
||||||
|
|
||||||
query := `SHOW DATABASES`
|
query := `SHOW DATABASES`
|
||||||
|
|
||||||
rows, err := m.db.QueryContext(ctx, query)
|
rows, err := m.db.QueryContext(ctx, query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to query databases: %w", err)
|
return nil, fmt.Errorf("failed to query databases: %w", err)
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
|
||||||
var databases []string
|
var databases []string
|
||||||
systemDbs := map[string]bool{
|
systemDbs := map[string]bool{
|
||||||
"information_schema": true,
|
"information_schema": true,
|
||||||
@ -80,19 +80,19 @@ func (m *MySQL) ListDatabases(ctx context.Context) ([]string, error) {
|
|||||||
"mysql": true,
|
"mysql": true,
|
||||||
"sys": true,
|
"sys": true,
|
||||||
}
|
}
|
||||||
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var name string
|
var name string
|
||||||
if err := rows.Scan(&name); err != nil {
|
if err := rows.Scan(&name); err != nil {
|
||||||
return nil, fmt.Errorf("failed to scan database name: %w", err)
|
return nil, fmt.Errorf("failed to scan database name: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip system databases
|
// Skip system databases
|
||||||
if !systemDbs[name] {
|
if !systemDbs[name] {
|
||||||
databases = append(databases, name)
|
databases = append(databases, name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return databases, rows.Err()
|
return databases, rows.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,17 +101,17 @@ func (m *MySQL) ListTables(ctx context.Context, database string) ([]string, erro
|
|||||||
if m.db == nil {
|
if m.db == nil {
|
||||||
return nil, fmt.Errorf("not connected to database")
|
return nil, fmt.Errorf("not connected to database")
|
||||||
}
|
}
|
||||||
|
|
||||||
query := `SELECT table_name FROM information_schema.tables
|
query := `SELECT table_name FROM information_schema.tables
|
||||||
WHERE table_schema = ? AND table_type = 'BASE TABLE'
|
WHERE table_schema = ? AND table_type = 'BASE TABLE'
|
||||||
ORDER BY table_name`
|
ORDER BY table_name`
|
||||||
|
|
||||||
rows, err := m.db.QueryContext(ctx, query, database)
|
rows, err := m.db.QueryContext(ctx, query, database)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to query tables: %w", err)
|
return nil, fmt.Errorf("failed to query tables: %w", err)
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
|
||||||
var tables []string
|
var tables []string
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var name string
|
var name string
|
||||||
@ -120,7 +120,7 @@ func (m *MySQL) ListTables(ctx context.Context, database string) ([]string, erro
|
|||||||
}
|
}
|
||||||
tables = append(tables, name)
|
tables = append(tables, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return tables, rows.Err()
|
return tables, rows.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -129,13 +129,13 @@ func (m *MySQL) CreateDatabase(ctx context.Context, name string) error {
|
|||||||
if m.db == nil {
|
if m.db == nil {
|
||||||
return fmt.Errorf("not connected to database")
|
return fmt.Errorf("not connected to database")
|
||||||
}
|
}
|
||||||
|
|
||||||
query := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", name)
|
query := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", name)
|
||||||
_, err := m.db.ExecContext(ctx, query)
|
_, err := m.db.ExecContext(ctx, query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create database %s: %w", name, err)
|
return fmt.Errorf("failed to create database %s: %w", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.log.Info("Created database", "name", name)
|
m.log.Info("Created database", "name", name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -145,13 +145,13 @@ func (m *MySQL) DropDatabase(ctx context.Context, name string) error {
|
|||||||
if m.db == nil {
|
if m.db == nil {
|
||||||
return fmt.Errorf("not connected to database")
|
return fmt.Errorf("not connected to database")
|
||||||
}
|
}
|
||||||
|
|
||||||
query := fmt.Sprintf("DROP DATABASE IF EXISTS `%s`", name)
|
query := fmt.Sprintf("DROP DATABASE IF EXISTS `%s`", name)
|
||||||
_, err := m.db.ExecContext(ctx, query)
|
_, err := m.db.ExecContext(ctx, query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to drop database %s: %w", name, err)
|
return fmt.Errorf("failed to drop database %s: %w", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.log.Info("Dropped database", "name", name)
|
m.log.Info("Dropped database", "name", name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -161,7 +161,7 @@ func (m *MySQL) DatabaseExists(ctx context.Context, name string) (bool, error) {
|
|||||||
if m.db == nil {
|
if m.db == nil {
|
||||||
return false, fmt.Errorf("not connected to database")
|
return false, fmt.Errorf("not connected to database")
|
||||||
}
|
}
|
||||||
|
|
||||||
query := `SELECT SCHEMA_NAME FROM information_schema.SCHEMATA WHERE SCHEMA_NAME = ?`
|
query := `SELECT SCHEMA_NAME FROM information_schema.SCHEMATA WHERE SCHEMA_NAME = ?`
|
||||||
var dbName string
|
var dbName string
|
||||||
err := m.db.QueryRowContext(ctx, query, name).Scan(&dbName)
|
err := m.db.QueryRowContext(ctx, query, name).Scan(&dbName)
|
||||||
@ -171,7 +171,7 @@ func (m *MySQL) DatabaseExists(ctx context.Context, name string) (bool, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("failed to check database existence: %w", err)
|
return false, fmt.Errorf("failed to check database existence: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -180,13 +180,13 @@ func (m *MySQL) GetVersion(ctx context.Context) (string, error) {
|
|||||||
if m.db == nil {
|
if m.db == nil {
|
||||||
return "", fmt.Errorf("not connected to database")
|
return "", fmt.Errorf("not connected to database")
|
||||||
}
|
}
|
||||||
|
|
||||||
var version string
|
var version string
|
||||||
err := m.db.QueryRowContext(ctx, "SELECT VERSION()").Scan(&version)
|
err := m.db.QueryRowContext(ctx, "SELECT VERSION()").Scan(&version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to get version: %w", err)
|
return "", fmt.Errorf("failed to get version: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return version, nil
|
return version, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,17 +195,17 @@ func (m *MySQL) GetDatabaseSize(ctx context.Context, database string) (int64, er
|
|||||||
if m.db == nil {
|
if m.db == nil {
|
||||||
return 0, fmt.Errorf("not connected to database")
|
return 0, fmt.Errorf("not connected to database")
|
||||||
}
|
}
|
||||||
|
|
||||||
query := `SELECT COALESCE(SUM(data_length + index_length), 0) as size_bytes
|
query := `SELECT COALESCE(SUM(data_length + index_length), 0) as size_bytes
|
||||||
FROM information_schema.tables
|
FROM information_schema.tables
|
||||||
WHERE table_schema = ?`
|
WHERE table_schema = ?`
|
||||||
|
|
||||||
var size int64
|
var size int64
|
||||||
err := m.db.QueryRowContext(ctx, query, database).Scan(&size)
|
err := m.db.QueryRowContext(ctx, query, database).Scan(&size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("failed to get database size: %w", err)
|
return 0, fmt.Errorf("failed to get database size: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return size, nil
|
return size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,11 +214,11 @@ func (m *MySQL) GetTableRowCount(ctx context.Context, database, table string) (i
|
|||||||
if m.db == nil {
|
if m.db == nil {
|
||||||
return 0, fmt.Errorf("not connected to database")
|
return 0, fmt.Errorf("not connected to database")
|
||||||
}
|
}
|
||||||
|
|
||||||
// First try information_schema for approximate count (faster)
|
// First try information_schema for approximate count (faster)
|
||||||
query := `SELECT table_rows FROM information_schema.tables
|
query := `SELECT table_rows FROM information_schema.tables
|
||||||
WHERE table_schema = ? AND table_name = ?`
|
WHERE table_schema = ? AND table_name = ?`
|
||||||
|
|
||||||
var count int64
|
var count int64
|
||||||
err := m.db.QueryRowContext(ctx, query, database, table).Scan(&count)
|
err := m.db.QueryRowContext(ctx, query, database, table).Scan(&count)
|
||||||
if err != nil || count == 0 {
|
if err != nil || count == 0 {
|
||||||
@ -229,95 +229,92 @@ func (m *MySQL) GetTableRowCount(ctx context.Context, database, table string) (i
|
|||||||
return 0, fmt.Errorf("failed to get table row count: %w", err)
|
return 0, fmt.Errorf("failed to get table row count: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return count, nil
|
return count, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildBackupCommand builds mysqldump command
|
// BuildBackupCommand builds mysqldump command
|
||||||
func (m *MySQL) BuildBackupCommand(database, outputFile string, options BackupOptions) []string {
|
func (m *MySQL) BuildBackupCommand(database, outputFile string, options BackupOptions) []string {
|
||||||
cmd := []string{"mysqldump"}
|
cmd := []string{"mysqldump"}
|
||||||
|
|
||||||
// Connection parameters
|
// Connection parameters
|
||||||
cmd = append(cmd, "-h", m.cfg.Host)
|
cmd = append(cmd, "-h", m.cfg.Host)
|
||||||
cmd = append(cmd, "-P", strconv.Itoa(m.cfg.Port))
|
cmd = append(cmd, "-P", strconv.Itoa(m.cfg.Port))
|
||||||
cmd = append(cmd, "-u", m.cfg.User)
|
cmd = append(cmd, "-u", m.cfg.User)
|
||||||
|
|
||||||
if m.cfg.Password != "" {
|
if m.cfg.Password != "" {
|
||||||
cmd = append(cmd, "-p"+m.cfg.Password)
|
cmd = append(cmd, "-p"+m.cfg.Password)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SSL options
|
// SSL options
|
||||||
if m.cfg.Insecure {
|
if m.cfg.Insecure {
|
||||||
cmd = append(cmd, "--skip-ssl")
|
cmd = append(cmd, "--skip-ssl")
|
||||||
} else if m.cfg.SSLMode != "" {
|
} else if mode := strings.ToLower(m.cfg.SSLMode); mode != "" {
|
||||||
// MySQL SSL modes: DISABLED, PREFERRED, REQUIRED, VERIFY_CA, VERIFY_IDENTITY
|
switch mode {
|
||||||
switch strings.ToLower(m.cfg.SSLMode) {
|
|
||||||
case "disable", "disabled":
|
|
||||||
cmd = append(cmd, "--skip-ssl")
|
|
||||||
case "require", "required":
|
case "require", "required":
|
||||||
cmd = append(cmd, "--ssl-mode=REQUIRED")
|
cmd = append(cmd, "--ssl-mode=REQUIRED")
|
||||||
case "verify-ca":
|
case "verify-ca":
|
||||||
cmd = append(cmd, "--ssl-mode=VERIFY_CA")
|
cmd = append(cmd, "--ssl-mode=VERIFY_CA")
|
||||||
case "verify-full", "verify-identity":
|
case "verify-full", "verify-identity":
|
||||||
cmd = append(cmd, "--ssl-mode=VERIFY_IDENTITY")
|
cmd = append(cmd, "--ssl-mode=VERIFY_IDENTITY")
|
||||||
default:
|
case "disable", "disabled":
|
||||||
cmd = append(cmd, "--ssl-mode=PREFERRED")
|
cmd = append(cmd, "--skip-ssl")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Backup options
|
// Backup options
|
||||||
cmd = append(cmd, "--single-transaction") // Consistent backup
|
cmd = append(cmd, "--single-transaction") // Consistent backup
|
||||||
cmd = append(cmd, "--routines") // Include stored procedures/functions
|
cmd = append(cmd, "--routines") // Include stored procedures/functions
|
||||||
cmd = append(cmd, "--triggers") // Include triggers
|
cmd = append(cmd, "--triggers") // Include triggers
|
||||||
cmd = append(cmd, "--events") // Include events
|
cmd = append(cmd, "--events") // Include events
|
||||||
|
|
||||||
if options.SchemaOnly {
|
if options.SchemaOnly {
|
||||||
cmd = append(cmd, "--no-data")
|
cmd = append(cmd, "--no-data")
|
||||||
} else if options.DataOnly {
|
} else if options.DataOnly {
|
||||||
cmd = append(cmd, "--no-create-info")
|
cmd = append(cmd, "--no-create-info")
|
||||||
}
|
}
|
||||||
|
|
||||||
if options.NoOwner || options.NoPrivileges {
|
if options.NoOwner || options.NoPrivileges {
|
||||||
cmd = append(cmd, "--skip-add-drop-table")
|
cmd = append(cmd, "--skip-add-drop-table")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compression (handled externally for MySQL)
|
// Compression (handled externally for MySQL)
|
||||||
// Output redirection will be handled by caller
|
// Output redirection will be handled by caller
|
||||||
|
|
||||||
// Database
|
// Database
|
||||||
cmd = append(cmd, database)
|
cmd = append(cmd, database)
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildRestoreCommand builds mysql restore command
|
// BuildRestoreCommand builds mysql restore command
|
||||||
func (m *MySQL) BuildRestoreCommand(database, inputFile string, options RestoreOptions) []string {
|
func (m *MySQL) BuildRestoreCommand(database, inputFile string, options RestoreOptions) []string {
|
||||||
cmd := []string{"mysql"}
|
cmd := []string{"mysql"}
|
||||||
|
|
||||||
// Connection parameters
|
// Connection parameters
|
||||||
cmd = append(cmd, "-h", m.cfg.Host)
|
cmd = append(cmd, "-h", m.cfg.Host)
|
||||||
cmd = append(cmd, "-P", strconv.Itoa(m.cfg.Port))
|
cmd = append(cmd, "-P", strconv.Itoa(m.cfg.Port))
|
||||||
cmd = append(cmd, "-u", m.cfg.User)
|
cmd = append(cmd, "-u", m.cfg.User)
|
||||||
|
|
||||||
if m.cfg.Password != "" {
|
if m.cfg.Password != "" {
|
||||||
cmd = append(cmd, "-p"+m.cfg.Password)
|
cmd = append(cmd, "-p"+m.cfg.Password)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SSL options
|
// SSL options
|
||||||
if m.cfg.Insecure {
|
if m.cfg.Insecure {
|
||||||
cmd = append(cmd, "--skip-ssl")
|
cmd = append(cmd, "--skip-ssl")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options
|
// Options
|
||||||
if options.SingleTransaction {
|
if options.SingleTransaction {
|
||||||
cmd = append(cmd, "--single-transaction")
|
cmd = append(cmd, "--single-transaction")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Database
|
// Database
|
||||||
cmd = append(cmd, database)
|
cmd = append(cmd, database)
|
||||||
|
|
||||||
// Input file (will be handled via stdin redirection)
|
// Input file (will be handled via stdin redirection)
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,11 +323,11 @@ func (m *MySQL) BuildSampleQuery(database, table string, strategy SampleStrategy
|
|||||||
switch strategy.Type {
|
switch strategy.Type {
|
||||||
case "ratio":
|
case "ratio":
|
||||||
// Every Nth record using row_number (MySQL 8.0+) or modulo
|
// Every Nth record using row_number (MySQL 8.0+) or modulo
|
||||||
return fmt.Sprintf("SELECT * FROM (SELECT *, (@row_number:=@row_number + 1) AS rn FROM %s.%s CROSS JOIN (SELECT @row_number:=0) AS t) AS numbered WHERE rn %% %d = 1",
|
return fmt.Sprintf("SELECT * FROM (SELECT *, (@row_number:=@row_number + 1) AS rn FROM %s.%s CROSS JOIN (SELECT @row_number:=0) AS t) AS numbered WHERE rn %% %d = 1",
|
||||||
database, table, strategy.Value)
|
database, table, strategy.Value)
|
||||||
case "percent":
|
case "percent":
|
||||||
// Percentage sampling using RAND()
|
// Percentage sampling using RAND()
|
||||||
return fmt.Sprintf("SELECT * FROM %s.%s WHERE RAND() <= %f",
|
return fmt.Sprintf("SELECT * FROM %s.%s WHERE RAND() <= %f",
|
||||||
database, table, float64(strategy.Value)/100.0)
|
database, table, float64(strategy.Value)/100.0)
|
||||||
case "count":
|
case "count":
|
||||||
// First N records
|
// First N records
|
||||||
@ -343,58 +340,56 @@ func (m *MySQL) BuildSampleQuery(database, table string, strategy SampleStrategy
|
|||||||
// ValidateBackupTools checks if required MySQL tools are available
|
// ValidateBackupTools checks if required MySQL tools are available
|
||||||
func (m *MySQL) ValidateBackupTools() error {
|
func (m *MySQL) ValidateBackupTools() error {
|
||||||
tools := []string{"mysqldump", "mysql"}
|
tools := []string{"mysqldump", "mysql"}
|
||||||
|
|
||||||
for _, tool := range tools {
|
for _, tool := range tools {
|
||||||
if _, err := exec.LookPath(tool); err != nil {
|
if _, err := exec.LookPath(tool); err != nil {
|
||||||
return fmt.Errorf("required tool not found: %s", tool)
|
return fmt.Errorf("required tool not found: %s", tool)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildDSN constructs MySQL connection string
|
// buildDSN constructs MySQL connection string
|
||||||
func (m *MySQL) buildDSN() string {
|
func (m *MySQL) buildDSN() string {
|
||||||
dsn := ""
|
dsn := ""
|
||||||
|
|
||||||
if m.cfg.User != "" {
|
if m.cfg.User != "" {
|
||||||
dsn += m.cfg.User
|
dsn += m.cfg.User
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.cfg.Password != "" {
|
if m.cfg.Password != "" {
|
||||||
dsn += ":" + m.cfg.Password
|
dsn += ":" + m.cfg.Password
|
||||||
}
|
}
|
||||||
|
|
||||||
dsn += "@"
|
dsn += "@"
|
||||||
|
|
||||||
if m.cfg.Host != "" && m.cfg.Host != "localhost" {
|
if m.cfg.Host != "" && m.cfg.Host != "localhost" {
|
||||||
dsn += "tcp(" + m.cfg.Host + ":" + strconv.Itoa(m.cfg.Port) + ")"
|
dsn += "tcp(" + m.cfg.Host + ":" + strconv.Itoa(m.cfg.Port) + ")"
|
||||||
}
|
}
|
||||||
|
|
||||||
dsn += "/" + m.cfg.Database
|
dsn += "/" + m.cfg.Database
|
||||||
|
|
||||||
// Add connection parameters
|
// Add connection parameters
|
||||||
params := []string{}
|
params := []string{}
|
||||||
|
|
||||||
if m.cfg.Insecure {
|
if !m.cfg.Insecure {
|
||||||
params = append(params, "tls=skip-verify")
|
|
||||||
} else if m.cfg.SSLMode != "" {
|
|
||||||
switch strings.ToLower(m.cfg.SSLMode) {
|
switch strings.ToLower(m.cfg.SSLMode) {
|
||||||
case "disable", "disabled":
|
|
||||||
params = append(params, "tls=false")
|
|
||||||
case "require", "required":
|
case "require", "required":
|
||||||
params = append(params, "tls=true")
|
params = append(params, "tls=true")
|
||||||
|
case "verify-ca", "verify-full", "verify-identity":
|
||||||
|
params = append(params, "tls=preferred")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add charset
|
// Add charset
|
||||||
params = append(params, "charset=utf8mb4")
|
params = append(params, "charset=utf8mb4")
|
||||||
params = append(params, "parseTime=true")
|
params = append(params, "parseTime=true")
|
||||||
|
|
||||||
if len(params) > 0 {
|
if len(params) > 0 {
|
||||||
dsn += "?" + strings.Join(params, "&")
|
dsn += "?" + strings.Join(params, "&")
|
||||||
}
|
}
|
||||||
|
|
||||||
return dsn
|
return dsn
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -407,4 +402,4 @@ func sanitizeMySQLDSN(dsn string) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return dsn
|
return dsn
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3,6 +3,7 @@ package tui
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
tea "github.com/charmbracelet/bubbletea"
|
tea "github.com/charmbracelet/bubbletea"
|
||||||
"github.com/charmbracelet/lipgloss"
|
"github.com/charmbracelet/lipgloss"
|
||||||
@ -23,8 +24,8 @@ var (
|
|||||||
Foreground(lipgloss.Color("#626262"))
|
Foreground(lipgloss.Color("#626262"))
|
||||||
|
|
||||||
menuSelectedStyle = lipgloss.NewStyle().
|
menuSelectedStyle = lipgloss.NewStyle().
|
||||||
Foreground(lipgloss.Color("#FF75B7")).
|
Foreground(lipgloss.Color("#FF75B7")).
|
||||||
Bold(true)
|
Bold(true)
|
||||||
|
|
||||||
infoStyle = lipgloss.NewStyle().
|
infoStyle = lipgloss.NewStyle().
|
||||||
Foreground(lipgloss.Color("#626262"))
|
Foreground(lipgloss.Color("#626262"))
|
||||||
@ -36,17 +37,28 @@ var (
|
|||||||
errorStyle = lipgloss.NewStyle().
|
errorStyle = lipgloss.NewStyle().
|
||||||
Foreground(lipgloss.Color("#FF6B6B")).
|
Foreground(lipgloss.Color("#FF6B6B")).
|
||||||
Bold(true)
|
Bold(true)
|
||||||
|
|
||||||
|
dbSelectorLabelStyle = lipgloss.NewStyle().
|
||||||
|
Foreground(lipgloss.Color("#57C7FF")).
|
||||||
|
Bold(true)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type dbTypeOption struct {
|
||||||
|
label string
|
||||||
|
value string
|
||||||
|
}
|
||||||
|
|
||||||
// MenuModel represents the simple menu state
|
// MenuModel represents the simple menu state
|
||||||
type MenuModel struct {
|
type MenuModel struct {
|
||||||
choices []string
|
choices []string
|
||||||
cursor int
|
cursor int
|
||||||
config *config.Config
|
config *config.Config
|
||||||
logger logger.Logger
|
logger logger.Logger
|
||||||
quitting bool
|
quitting bool
|
||||||
message string
|
message string
|
||||||
|
dbTypes []dbTypeOption
|
||||||
|
dbTypeCursor int
|
||||||
|
|
||||||
// Background operations
|
// Background operations
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
@ -54,7 +66,17 @@ type MenuModel struct {
|
|||||||
|
|
||||||
func NewMenuModel(cfg *config.Config, log logger.Logger) MenuModel {
|
func NewMenuModel(cfg *config.Config, log logger.Logger) MenuModel {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
dbTypes := []dbTypeOption{
|
||||||
|
{label: "PostgreSQL", value: "postgres"},
|
||||||
|
{label: "MySQL / MariaDB", value: "mysql"},
|
||||||
|
}
|
||||||
|
|
||||||
|
dbCursor := 0
|
||||||
|
if cfg.IsMySQL() {
|
||||||
|
dbCursor = 1
|
||||||
|
}
|
||||||
|
|
||||||
model := MenuModel{
|
model := MenuModel{
|
||||||
choices: []string{
|
choices: []string{
|
||||||
"Single Database Backup",
|
"Single Database Backup",
|
||||||
@ -67,12 +89,14 @@ func NewMenuModel(cfg *config.Config, log logger.Logger) MenuModel {
|
|||||||
"Clear Operation History",
|
"Clear Operation History",
|
||||||
"Quit",
|
"Quit",
|
||||||
},
|
},
|
||||||
config: cfg,
|
config: cfg,
|
||||||
logger: log,
|
logger: log,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
|
dbTypes: dbTypes,
|
||||||
|
dbTypeCursor: dbCursor,
|
||||||
}
|
}
|
||||||
|
|
||||||
return model
|
return model
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,6 +117,24 @@ func (m MenuModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
m.quitting = true
|
m.quitting = true
|
||||||
return m, tea.Quit
|
return m, tea.Quit
|
||||||
|
|
||||||
|
case "left", "h":
|
||||||
|
if m.dbTypeCursor > 0 {
|
||||||
|
m.dbTypeCursor--
|
||||||
|
m.applyDatabaseSelection()
|
||||||
|
}
|
||||||
|
|
||||||
|
case "right", "l":
|
||||||
|
if m.dbTypeCursor < len(m.dbTypes)-1 {
|
||||||
|
m.dbTypeCursor++
|
||||||
|
m.applyDatabaseSelection()
|
||||||
|
}
|
||||||
|
|
||||||
|
case "t":
|
||||||
|
if len(m.dbTypes) > 0 {
|
||||||
|
m.dbTypeCursor = (m.dbTypeCursor + 1) % len(m.dbTypes)
|
||||||
|
m.applyDatabaseSelection()
|
||||||
|
}
|
||||||
|
|
||||||
case "up", "k":
|
case "up", "k":
|
||||||
if m.cursor > 0 {
|
if m.cursor > 0 {
|
||||||
m.cursor--
|
m.cursor--
|
||||||
@ -146,9 +188,24 @@ func (m MenuModel) View() string {
|
|||||||
header := titleStyle.Render("🗄️ Database Backup Tool - Interactive Menu")
|
header := titleStyle.Render("🗄️ Database Backup Tool - Interactive Menu")
|
||||||
s += fmt.Sprintf("\n%s\n\n", header)
|
s += fmt.Sprintf("\n%s\n\n", header)
|
||||||
|
|
||||||
|
if len(m.dbTypes) > 0 {
|
||||||
|
options := make([]string, len(m.dbTypes))
|
||||||
|
for i, opt := range m.dbTypes {
|
||||||
|
if m.dbTypeCursor == i {
|
||||||
|
options[i] = menuSelectedStyle.Render(opt.label)
|
||||||
|
} else {
|
||||||
|
options[i] = menuStyle.Render(opt.label)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
selector := fmt.Sprintf("Target Engine: %s", strings.Join(options, menuStyle.Render(" | ")))
|
||||||
|
s += dbSelectorLabelStyle.Render(selector) + "\n"
|
||||||
|
hint := infoStyle.Render("Switch with ←/→ or t • Cluster backup requires PostgreSQL")
|
||||||
|
s += hint + "\n\n"
|
||||||
|
}
|
||||||
|
|
||||||
// Database info
|
// Database info
|
||||||
dbInfo := infoStyle.Render(fmt.Sprintf("Database: %s@%s:%d (%s)",
|
dbInfo := infoStyle.Render(fmt.Sprintf("Database: %s@%s:%d (%s)",
|
||||||
m.config.User, m.config.Host, m.config.Port, m.config.DatabaseType))
|
m.config.User, m.config.Host, m.config.Port, m.config.DisplayDatabaseType()))
|
||||||
s += fmt.Sprintf("%s\n\n", dbInfo)
|
s += fmt.Sprintf("%s\n\n", dbInfo)
|
||||||
|
|
||||||
// Menu items
|
// Menu items
|
||||||
@ -189,6 +246,10 @@ func (m MenuModel) handleSampleBackup() (tea.Model, tea.Cmd) {
|
|||||||
|
|
||||||
// handleClusterBackup shows confirmation and executes cluster backup
|
// handleClusterBackup shows confirmation and executes cluster backup
|
||||||
func (m MenuModel) handleClusterBackup() (tea.Model, tea.Cmd) {
|
func (m MenuModel) handleClusterBackup() (tea.Model, tea.Cmd) {
|
||||||
|
if !m.config.IsPostgreSQL() {
|
||||||
|
m.message = errorStyle.Render("❌ Cluster backup is available only for PostgreSQL targets")
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
confirm := NewConfirmationModel(m.config, m.logger, m,
|
confirm := NewConfirmationModel(m.config, m.logger, m,
|
||||||
"🗄️ Cluster Backup",
|
"🗄️ Cluster Backup",
|
||||||
"This will backup ALL databases in the cluster. Continue?")
|
"This will backup ALL databases in the cluster. Continue?")
|
||||||
@ -220,14 +281,39 @@ func (m MenuModel) handleSettings() (tea.Model, tea.Cmd) {
|
|||||||
return settingsModel, nil
|
return settingsModel, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *MenuModel) applyDatabaseSelection() {
|
||||||
|
if m == nil || len(m.dbTypes) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if m.dbTypeCursor < 0 || m.dbTypeCursor >= len(m.dbTypes) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
selection := m.dbTypes[m.dbTypeCursor]
|
||||||
|
if err := m.config.SetDatabaseType(selection.value); err != nil {
|
||||||
|
m.message = errorStyle.Render(fmt.Sprintf("❌ %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh default port if unchanged
|
||||||
|
if m.config.Port == 0 {
|
||||||
|
m.config.Port = m.config.GetDefaultPort()
|
||||||
|
}
|
||||||
|
|
||||||
|
m.message = successStyle.Render(fmt.Sprintf("🔀 Target database set to %s", m.config.DisplayDatabaseType()))
|
||||||
|
if m.logger != nil {
|
||||||
|
m.logger.Info("updated target database type", "type", m.config.DatabaseType, "port", m.config.Port)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// RunInteractiveMenu starts the simple TUI
|
// RunInteractiveMenu starts the simple TUI
|
||||||
func RunInteractiveMenu(cfg *config.Config, log logger.Logger) error {
|
func RunInteractiveMenu(cfg *config.Config, log logger.Logger) error {
|
||||||
m := NewMenuModel(cfg, log)
|
m := NewMenuModel(cfg, log)
|
||||||
p := tea.NewProgram(m)
|
p := tea.NewProgram(m)
|
||||||
|
|
||||||
if _, err := p.Run(); err != nil {
|
if _, err := p.Run(); err != nil {
|
||||||
return fmt.Errorf("error running interactive menu: %w", err)
|
return fmt.Errorf("error running interactive menu: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -14,11 +14,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
headerStyle = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("99")).Padding(1, 2)
|
headerStyle = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("99")).Padding(1, 2)
|
||||||
inputStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("212"))
|
inputStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("212"))
|
||||||
buttonStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("15")).Background(lipgloss.Color("57")).Padding(0, 2)
|
buttonStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("15")).Background(lipgloss.Color("57")).Padding(0, 2)
|
||||||
selectedStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("212")).Background(lipgloss.Color("57")).Bold(true)
|
selectedStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("212")).Background(lipgloss.Color("57")).Bold(true)
|
||||||
detailStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("240")).Italic(true)
|
detailStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("240")).Italic(true)
|
||||||
)
|
)
|
||||||
|
|
||||||
// SettingsModel represents the settings configuration state
|
// SettingsModel represents the settings configuration state
|
||||||
@ -50,6 +50,16 @@ type SettingItem struct {
|
|||||||
// Initialize settings model
|
// Initialize settings model
|
||||||
func NewSettingsModel(cfg *config.Config, log logger.Logger, parent tea.Model) SettingsModel {
|
func NewSettingsModel(cfg *config.Config, log logger.Logger, parent tea.Model) SettingsModel {
|
||||||
settings := []SettingItem{
|
settings := []SettingItem{
|
||||||
|
{
|
||||||
|
Key: "database_type",
|
||||||
|
DisplayName: "Database Type",
|
||||||
|
Value: func(c *config.Config) string { return c.DatabaseType },
|
||||||
|
Update: func(c *config.Config, v string) error {
|
||||||
|
return c.SetDatabaseType(v)
|
||||||
|
},
|
||||||
|
Type: "string",
|
||||||
|
Description: "Target database engine (postgres, mysql, mariadb)",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Key: "backup_dir",
|
Key: "backup_dir",
|
||||||
DisplayName: "Backup Directory",
|
DisplayName: "Backup Directory",
|
||||||
@ -195,8 +205,12 @@ func NewSettingsModel(cfg *config.Config, log logger.Logger, parent tea.Model) S
|
|||||||
{
|
{
|
||||||
Key: "auto_detect_cores",
|
Key: "auto_detect_cores",
|
||||||
DisplayName: "Auto Detect CPU Cores",
|
DisplayName: "Auto Detect CPU Cores",
|
||||||
Value: func(c *config.Config) string {
|
Value: func(c *config.Config) string {
|
||||||
if c.AutoDetectCores { return "true" } else { return "false" }
|
if c.AutoDetectCores {
|
||||||
|
return "true"
|
||||||
|
} else {
|
||||||
|
return "false"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
Update: func(c *config.Config, v string) error {
|
Update: func(c *config.Config, v string) error {
|
||||||
val, err := strconv.ParseBool(v)
|
val, err := strconv.ParseBool(v)
|
||||||
@ -274,11 +288,11 @@ func (m SettingsModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
}
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.editing {
|
if m.editing {
|
||||||
return m.handleEditingInput(msg)
|
return m.handleEditingInput(msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch msg.String() {
|
switch msg.String() {
|
||||||
case "ctrl+c", "q", "esc":
|
case "ctrl+c", "q", "esc":
|
||||||
m.quitting = true
|
m.quitting = true
|
||||||
@ -328,29 +342,29 @@ func (m SettingsModel) handleEditingInput(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
|||||||
case "ctrl+c":
|
case "ctrl+c":
|
||||||
m.quitting = true
|
m.quitting = true
|
||||||
return m.parent, nil
|
return m.parent, nil
|
||||||
|
|
||||||
case "esc":
|
case "esc":
|
||||||
m.editing = false
|
m.editing = false
|
||||||
m.editingField = ""
|
m.editingField = ""
|
||||||
m.editingValue = ""
|
m.editingValue = ""
|
||||||
m.message = ""
|
m.message = ""
|
||||||
return m, nil
|
return m, nil
|
||||||
|
|
||||||
case "enter":
|
case "enter":
|
||||||
return m.saveEditedValue()
|
return m.saveEditedValue()
|
||||||
|
|
||||||
case "backspace":
|
case "backspace":
|
||||||
if len(m.editingValue) > 0 {
|
if len(m.editingValue) > 0 {
|
||||||
m.editingValue = m.editingValue[:len(m.editingValue)-1]
|
m.editingValue = m.editingValue[:len(m.editingValue)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
// Add character to editing value
|
// Add character to editing value
|
||||||
if len(msg.String()) == 1 {
|
if len(msg.String()) == 1 {
|
||||||
m.editingValue += msg.String()
|
m.editingValue += msg.String()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -359,13 +373,13 @@ func (m SettingsModel) startEditing() (tea.Model, tea.Cmd) {
|
|||||||
if m.cursor >= len(m.settings) {
|
if m.cursor >= len(m.settings) {
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
setting := m.settings[m.cursor]
|
setting := m.settings[m.cursor]
|
||||||
m.editing = true
|
m.editing = true
|
||||||
m.editingField = setting.Key
|
m.editingField = setting.Key
|
||||||
m.editingValue = setting.Value(m.config)
|
m.editingValue = setting.Value(m.config)
|
||||||
m.message = ""
|
m.message = ""
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -374,7 +388,7 @@ func (m SettingsModel) saveEditedValue() (tea.Model, tea.Cmd) {
|
|||||||
if m.editingField == "" {
|
if m.editingField == "" {
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find the setting being edited
|
// Find the setting being edited
|
||||||
var setting *SettingItem
|
var setting *SettingItem
|
||||||
for i := range m.settings {
|
for i := range m.settings {
|
||||||
@ -383,41 +397,41 @@ func (m SettingsModel) saveEditedValue() (tea.Model, tea.Cmd) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if setting == nil {
|
if setting == nil {
|
||||||
m.message = errorStyle.Render("❌ Setting not found")
|
m.message = errorStyle.Render("❌ Setting not found")
|
||||||
m.editing = false
|
m.editing = false
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the configuration
|
// Update the configuration
|
||||||
if err := setting.Update(m.config, m.editingValue); err != nil {
|
if err := setting.Update(m.config, m.editingValue); err != nil {
|
||||||
m.message = errorStyle.Render(fmt.Sprintf("❌ %s", err.Error()))
|
m.message = errorStyle.Render(fmt.Sprintf("❌ %s", err.Error()))
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
m.message = successStyle.Render(fmt.Sprintf("✅ Updated %s", setting.DisplayName))
|
m.message = successStyle.Render(fmt.Sprintf("✅ Updated %s", setting.DisplayName))
|
||||||
m.editing = false
|
m.editing = false
|
||||||
m.editingField = ""
|
m.editingField = ""
|
||||||
m.editingValue = ""
|
m.editingValue = ""
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// resetToDefaults resets configuration to default values
|
// resetToDefaults resets configuration to default values
|
||||||
func (m SettingsModel) resetToDefaults() (tea.Model, tea.Cmd) {
|
func (m SettingsModel) resetToDefaults() (tea.Model, tea.Cmd) {
|
||||||
newConfig := config.New()
|
newConfig := config.New()
|
||||||
|
|
||||||
// Copy important connection details
|
// Copy important connection details
|
||||||
newConfig.Host = m.config.Host
|
newConfig.Host = m.config.Host
|
||||||
newConfig.Port = m.config.Port
|
newConfig.Port = m.config.Port
|
||||||
newConfig.User = m.config.User
|
newConfig.User = m.config.User
|
||||||
newConfig.Database = m.config.Database
|
newConfig.Database = m.config.Database
|
||||||
newConfig.DatabaseType = m.config.DatabaseType
|
newConfig.DatabaseType = m.config.DatabaseType
|
||||||
|
|
||||||
*m.config = *newConfig
|
*m.config = *newConfig
|
||||||
m.message = successStyle.Render("✅ Settings reset to defaults")
|
m.message = successStyle.Render("✅ Settings reset to defaults")
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -427,7 +441,7 @@ func (m SettingsModel) saveSettings() (tea.Model, tea.Cmd) {
|
|||||||
m.message = errorStyle.Render(fmt.Sprintf("❌ Validation failed: %s", err.Error()))
|
m.message = errorStyle.Render(fmt.Sprintf("❌ Validation failed: %s", err.Error()))
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Optimize CPU settings if auto-detect is enabled
|
// Optimize CPU settings if auto-detect is enabled
|
||||||
if m.config.AutoDetectCores {
|
if m.config.AutoDetectCores {
|
||||||
if err := m.config.OptimizeForCPU(); err != nil {
|
if err := m.config.OptimizeForCPU(); err != nil {
|
||||||
@ -435,7 +449,7 @@ func (m SettingsModel) saveSettings() (tea.Model, tea.Cmd) {
|
|||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m.message = successStyle.Render("✅ Settings validated and saved")
|
m.message = successStyle.Render("✅ Settings validated and saved")
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
@ -456,7 +470,11 @@ func (m SettingsModel) View() string {
|
|||||||
for i, setting := range m.settings {
|
for i, setting := range m.settings {
|
||||||
cursor := " "
|
cursor := " "
|
||||||
value := setting.Value(m.config)
|
value := setting.Value(m.config)
|
||||||
|
displayValue := value
|
||||||
|
if setting.Key == "database_type" {
|
||||||
|
displayValue = fmt.Sprintf("%s (%s)", value, m.config.DisplayDatabaseType())
|
||||||
|
}
|
||||||
|
|
||||||
if m.cursor == i {
|
if m.cursor == i {
|
||||||
cursor = ">"
|
cursor = ">"
|
||||||
if m.editing && m.editingField == setting.Key {
|
if m.editing && m.editingField == setting.Key {
|
||||||
@ -469,22 +487,22 @@ func (m SettingsModel) View() string {
|
|||||||
b.WriteString(selectedStyle.Render(line))
|
b.WriteString(selectedStyle.Render(line))
|
||||||
b.WriteString(" ✏️")
|
b.WriteString(" ✏️")
|
||||||
} else {
|
} else {
|
||||||
line := fmt.Sprintf("%s %s: %s", cursor, setting.DisplayName, value)
|
line := fmt.Sprintf("%s %s: %s", cursor, setting.DisplayName, displayValue)
|
||||||
b.WriteString(selectedStyle.Render(line))
|
b.WriteString(selectedStyle.Render(line))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
line := fmt.Sprintf("%s %s: %s", cursor, setting.DisplayName, value)
|
line := fmt.Sprintf("%s %s: %s", cursor, setting.DisplayName, displayValue)
|
||||||
b.WriteString(menuStyle.Render(line))
|
b.WriteString(menuStyle.Render(line))
|
||||||
}
|
}
|
||||||
b.WriteString("\n")
|
b.WriteString("\n")
|
||||||
|
|
||||||
// Show description for selected item
|
// Show description for selected item
|
||||||
if m.cursor == i && !m.editing {
|
if m.cursor == i && !m.editing {
|
||||||
desc := detailStyle.Render(fmt.Sprintf(" %s", setting.Description))
|
desc := detailStyle.Render(fmt.Sprintf(" %s", setting.Description))
|
||||||
b.WriteString(desc)
|
b.WriteString(desc)
|
||||||
b.WriteString("\n")
|
b.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Show directory browser for current path field
|
// Show directory browser for current path field
|
||||||
if m.cursor == i && m.browsingDir && m.dirBrowser != nil && setting.Type == "path" {
|
if m.cursor == i && m.browsingDir && m.dirBrowser != nil && setting.Type == "path" {
|
||||||
b.WriteString("\n")
|
b.WriteString("\n")
|
||||||
@ -506,14 +524,15 @@ func (m SettingsModel) View() string {
|
|||||||
b.WriteString("\n")
|
b.WriteString("\n")
|
||||||
b.WriteString(infoStyle.Render("📋 Current Configuration:"))
|
b.WriteString(infoStyle.Render("📋 Current Configuration:"))
|
||||||
b.WriteString("\n")
|
b.WriteString("\n")
|
||||||
|
|
||||||
summary := []string{
|
summary := []string{
|
||||||
|
fmt.Sprintf("Target DB: %s (%s)", m.config.DisplayDatabaseType(), m.config.DatabaseType),
|
||||||
fmt.Sprintf("Database: %s@%s:%d", m.config.User, m.config.Host, m.config.Port),
|
fmt.Sprintf("Database: %s@%s:%d", m.config.User, m.config.Host, m.config.Port),
|
||||||
fmt.Sprintf("Backup Dir: %s", m.config.BackupDir),
|
fmt.Sprintf("Backup Dir: %s", m.config.BackupDir),
|
||||||
fmt.Sprintf("Compression: Level %d", m.config.CompressionLevel),
|
fmt.Sprintf("Compression: Level %d", m.config.CompressionLevel),
|
||||||
fmt.Sprintf("Jobs: %d parallel, %d dump", m.config.Jobs, m.config.DumpJobs),
|
fmt.Sprintf("Jobs: %d parallel, %d dump", m.config.Jobs, m.config.DumpJobs),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, line := range summary {
|
for _, line := range summary {
|
||||||
b.WriteString(detailStyle.Render(fmt.Sprintf(" %s", line)))
|
b.WriteString(detailStyle.Render(fmt.Sprintf(" %s", line)))
|
||||||
b.WriteString("\n")
|
b.WriteString("\n")
|
||||||
@ -559,7 +578,7 @@ func (m SettingsModel) openDirectoryBrowser() (tea.Model, tea.Cmd) {
|
|||||||
m.dirBrowser.CurrentPath = currentValue
|
m.dirBrowser.CurrentPath = currentValue
|
||||||
m.dirBrowser.LoadItems()
|
m.dirBrowser.LoadItems()
|
||||||
}
|
}
|
||||||
|
|
||||||
m.dirBrowser.Show()
|
m.dirBrowser.Show()
|
||||||
m.browsingDir = true
|
m.browsingDir = true
|
||||||
|
|
||||||
@ -570,10 +589,10 @@ func (m SettingsModel) openDirectoryBrowser() (tea.Model, tea.Cmd) {
|
|||||||
func RunSettingsMenu(cfg *config.Config, log logger.Logger, parent tea.Model) error {
|
func RunSettingsMenu(cfg *config.Config, log logger.Logger, parent tea.Model) error {
|
||||||
m := NewSettingsModel(cfg, log, parent)
|
m := NewSettingsModel(cfg, log, parent)
|
||||||
p := tea.NewProgram(m, tea.WithAltScreen())
|
p := tea.NewProgram(m, tea.WithAltScreen())
|
||||||
|
|
||||||
if _, err := p.Run(); err != nil {
|
if _, err := p.Run(); err != nil {
|
||||||
return fmt.Errorf("error running settings menu: %w", err)
|
return fmt.Errorf("error running settings menu: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -148,7 +148,7 @@ func (m StatusViewModel) View() string {
|
|||||||
}
|
}
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
s.WriteString(fmt.Sprintf("Database Type: %s\n", m.config.DatabaseType))
|
s.WriteString(fmt.Sprintf("Database Type: %s (%s)\n", m.config.DisplayDatabaseType(), m.config.DatabaseType))
|
||||||
s.WriteString(fmt.Sprintf("Host: %s:%d\n", m.config.Host, m.config.Port))
|
s.WriteString(fmt.Sprintf("Host: %s:%d\n", m.config.Host, m.config.Port))
|
||||||
s.WriteString(fmt.Sprintf("User: %s\n", m.config.User))
|
s.WriteString(fmt.Sprintf("User: %s\n", m.config.User))
|
||||||
s.WriteString(fmt.Sprintf("Backup Directory: %s\n", m.config.BackupDir))
|
s.WriteString(fmt.Sprintf("Backup Directory: %s\n", m.config.BackupDir))
|
||||||
|
|||||||
173
scripts/cli_switch_test.sh
Executable file
173
scripts/cli_switch_test.sh
Executable file
@ -0,0 +1,173 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -u
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||||
|
BINARY_NAME="dbbackup_linux_amd64"
|
||||||
|
BINARY="./${BINARY_NAME}"
|
||||||
|
LOG_DIR="${REPO_ROOT}/test_logs"
|
||||||
|
TIMESTAMP="$(date +%Y%m%d_%H%M%S)"
|
||||||
|
LOG_FILE="${LOG_DIR}/cli_switch_test_${TIMESTAMP}.log"
|
||||||
|
|
||||||
|
PG_BACKUP_DIR="/tmp/db_backups"
|
||||||
|
PG_DATABASE="postgres"
|
||||||
|
PG_FLAGS=(
|
||||||
|
--db-type postgres
|
||||||
|
--host localhost
|
||||||
|
--port 5432
|
||||||
|
--user postgres
|
||||||
|
--database "${PG_DATABASE}"
|
||||||
|
--backup-dir "${PG_BACKUP_DIR}"
|
||||||
|
--jobs 4
|
||||||
|
--dump-jobs 4
|
||||||
|
--max-cores 8
|
||||||
|
--cpu-workload balanced
|
||||||
|
--debug
|
||||||
|
)
|
||||||
|
|
||||||
|
MYSQL_BACKUP_DIR="/tmp/mysql_backups"
|
||||||
|
MYSQL_DATABASE="backup_demo"
|
||||||
|
MYSQL_FLAGS=(
|
||||||
|
--db-type mysql
|
||||||
|
--host 127.0.0.1
|
||||||
|
--port 3306
|
||||||
|
--user backup_user
|
||||||
|
--password backup_pass
|
||||||
|
--database "${MYSQL_DATABASE}"
|
||||||
|
--backup-dir "${MYSQL_BACKUP_DIR}"
|
||||||
|
--insecure
|
||||||
|
--jobs 2
|
||||||
|
--dump-jobs 2
|
||||||
|
--max-cores 4
|
||||||
|
--cpu-workload io-intensive
|
||||||
|
--debug
|
||||||
|
)
|
||||||
|
|
||||||
|
mkdir -p "${LOG_DIR}"
|
||||||
|
|
||||||
|
log() {
|
||||||
|
printf '%s\n' "$1" | tee -a "${LOG_FILE}" >/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
RESULTS=()
|
||||||
|
|
||||||
|
run_cmd() {
|
||||||
|
local label="$1"
|
||||||
|
shift
|
||||||
|
log ""
|
||||||
|
log "### ${label}"
|
||||||
|
log "Command: $*"
|
||||||
|
"$@" 2>&1 | tee -a "${LOG_FILE}"
|
||||||
|
local status=${PIPESTATUS[0]}
|
||||||
|
log "Exit: ${status}"
|
||||||
|
RESULTS+=("${label}|${status}")
|
||||||
|
}
|
||||||
|
|
||||||
|
latest_file() {
|
||||||
|
local dir="$1"
|
||||||
|
local pattern="$2"
|
||||||
|
shopt -s nullglob
|
||||||
|
local files=("${dir}"/${pattern})
|
||||||
|
shopt -u nullglob
|
||||||
|
if (( ${#files[@]} == 0 )); then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
local latest="${files[0]}"
|
||||||
|
for file in "${files[@]}"; do
|
||||||
|
if [[ "${file}" -nt "${latest}" ]]; then
|
||||||
|
latest="${file}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
printf '%s\n' "${latest}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log "dbbackup CLI regression started"
|
||||||
|
log "Log file: ${LOG_FILE}"
|
||||||
|
|
||||||
|
cd "${REPO_ROOT}"
|
||||||
|
|
||||||
|
run_cmd "Go build" go build -o "${BINARY}" .
|
||||||
|
run_cmd "Ensure Postgres backup dir" sudo -u postgres mkdir -p "${PG_BACKUP_DIR}"
|
||||||
|
run_cmd "Ensure MySQL backup dir" mkdir -p "${MYSQL_BACKUP_DIR}"
|
||||||
|
|
||||||
|
run_cmd "Postgres status" sudo -u postgres "${BINARY}" status "${PG_FLAGS[@]}"
|
||||||
|
run_cmd "Postgres preflight" sudo -u postgres "${BINARY}" preflight "${PG_FLAGS[@]}"
|
||||||
|
run_cmd "Postgres CPU info" sudo -u postgres "${BINARY}" cpu "${PG_FLAGS[@]}"
|
||||||
|
run_cmd "Postgres backup single" sudo -u postgres "${BINARY}" backup single "${PG_DATABASE}" "${PG_FLAGS[@]}"
|
||||||
|
run_cmd "Postgres backup sample" sudo -u postgres "${BINARY}" backup sample "${PG_DATABASE}" --sample-ratio 5 "${PG_FLAGS[@]}"
|
||||||
|
run_cmd "Postgres backup cluster" sudo -u postgres "${BINARY}" backup cluster "${PG_FLAGS[@]}"
|
||||||
|
run_cmd "Postgres list" sudo -u postgres "${BINARY}" list "${PG_FLAGS[@]}"
|
||||||
|
|
||||||
|
PG_SINGLE_FILE="$(latest_file "${PG_BACKUP_DIR}" "db_${PG_DATABASE}_*.dump" || true)"
|
||||||
|
PG_SAMPLE_FILE="$(latest_file "${PG_BACKUP_DIR}" "sample_${PG_DATABASE}_*.sql" || true)"
|
||||||
|
PG_CLUSTER_FILE="$(latest_file "${PG_BACKUP_DIR}" "cluster_*.tar.gz" || true)"
|
||||||
|
|
||||||
|
if [[ -n "${PG_SINGLE_FILE}" ]]; then
|
||||||
|
run_cmd "Postgres verify single" sudo -u postgres "${BINARY}" verify "$(basename "${PG_SINGLE_FILE}")" "${PG_FLAGS[@]}"
|
||||||
|
run_cmd "Postgres restore single" sudo -u postgres "${BINARY}" restore "$(basename "${PG_SINGLE_FILE}")" "${PG_FLAGS[@]}"
|
||||||
|
else
|
||||||
|
log "No PostgreSQL single backup found for verification"
|
||||||
|
RESULTS+=("Postgres single artifact missing|1")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "${PG_SAMPLE_FILE}" ]]; then
|
||||||
|
run_cmd "Postgres verify sample" sudo -u postgres "${BINARY}" verify "$(basename "${PG_SAMPLE_FILE}")" "${PG_FLAGS[@]}"
|
||||||
|
run_cmd "Postgres restore sample" sudo -u postgres "${BINARY}" restore "$(basename "${PG_SAMPLE_FILE}")" "${PG_FLAGS[@]}"
|
||||||
|
else
|
||||||
|
log "No PostgreSQL sample backup found for verification"
|
||||||
|
RESULTS+=("Postgres sample artifact missing|1")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "${PG_CLUSTER_FILE}" ]]; then
|
||||||
|
run_cmd "Postgres verify cluster" sudo -u postgres "${BINARY}" verify "$(basename "${PG_CLUSTER_FILE}")" "${PG_FLAGS[@]}"
|
||||||
|
run_cmd "Postgres restore cluster" sudo -u postgres "${BINARY}" restore "$(basename "${PG_CLUSTER_FILE}")" "${PG_FLAGS[@]}"
|
||||||
|
else
|
||||||
|
log "No PostgreSQL cluster backup found for verification"
|
||||||
|
RESULTS+=("Postgres cluster artifact missing|1")
|
||||||
|
fi
|
||||||
|
|
||||||
|
run_cmd "MySQL status" "${BINARY}" status "${MYSQL_FLAGS[@]}"
|
||||||
|
run_cmd "MySQL preflight" "${BINARY}" preflight "${MYSQL_FLAGS[@]}"
|
||||||
|
run_cmd "MySQL CPU info" "${BINARY}" cpu "${MYSQL_FLAGS[@]}"
|
||||||
|
run_cmd "MySQL backup single" "${BINARY}" backup single "${MYSQL_DATABASE}" "${MYSQL_FLAGS[@]}"
|
||||||
|
run_cmd "MySQL backup sample" "${BINARY}" backup sample "${MYSQL_DATABASE}" --sample-percent 25 "${MYSQL_FLAGS[@]}"
|
||||||
|
run_cmd "MySQL list" "${BINARY}" list "${MYSQL_FLAGS[@]}"
|
||||||
|
|
||||||
|
MYSQL_SINGLE_FILE="$(latest_file "${MYSQL_BACKUP_DIR}" "db_${MYSQL_DATABASE}_*.sql.gz" || true)"
|
||||||
|
MYSQL_SAMPLE_FILE="$(latest_file "${MYSQL_BACKUP_DIR}" "sample_${MYSQL_DATABASE}_*.sql" || true)"
|
||||||
|
|
||||||
|
if [[ -n "${MYSQL_SINGLE_FILE}" ]]; then
|
||||||
|
run_cmd "MySQL verify single" "${BINARY}" verify "$(basename "${MYSQL_SINGLE_FILE}")" "${MYSQL_FLAGS[@]}"
|
||||||
|
run_cmd "MySQL restore single" "${BINARY}" restore "$(basename "${MYSQL_SINGLE_FILE}")" "${MYSQL_FLAGS[@]}"
|
||||||
|
else
|
||||||
|
log "No MySQL single backup found for verification"
|
||||||
|
RESULTS+=("MySQL single artifact missing|1")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "${MYSQL_SAMPLE_FILE}" ]]; then
|
||||||
|
run_cmd "MySQL verify sample" "${BINARY}" verify "$(basename "${MYSQL_SAMPLE_FILE}")" "${MYSQL_FLAGS[@]}"
|
||||||
|
run_cmd "MySQL restore sample" "${BINARY}" restore "$(basename "${MYSQL_SAMPLE_FILE}")" "${MYSQL_FLAGS[@]}"
|
||||||
|
else
|
||||||
|
log "No MySQL sample backup found for verification"
|
||||||
|
RESULTS+=("MySQL sample artifact missing|1")
|
||||||
|
fi
|
||||||
|
|
||||||
|
run_cmd "Interactive help" "${BINARY}" interactive --help
|
||||||
|
run_cmd "Root help" "${BINARY}" --help
|
||||||
|
run_cmd "Root version" "${BINARY}" --version
|
||||||
|
|
||||||
|
log ""
|
||||||
|
log "=== Summary ==="
|
||||||
|
failed=0
|
||||||
|
for entry in "${RESULTS[@]}"; do
|
||||||
|
IFS='|' read -r label status <<<"${entry}"
|
||||||
|
if [[ "${status}" -eq 0 ]]; then
|
||||||
|
log "[PASS] ${label}"
|
||||||
|
else
|
||||||
|
log "[FAIL] ${label} (exit ${status})"
|
||||||
|
failed=1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
exit "${failed}"
|
||||||
@ -1,68 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
echo "==================================="
|
|
||||||
echo " DB BACKUP TOOL - FUNCTION TEST"
|
|
||||||
echo "==================================="
|
|
||||||
echo
|
|
||||||
|
|
||||||
# Test all CLI commands
|
|
||||||
commands=("backup" "restore" "list" "status" "verify" "preflight" "cpu")
|
|
||||||
|
|
||||||
echo "1. Testing CLI Commands:"
|
|
||||||
echo "------------------------"
|
|
||||||
for cmd in "${commands[@]}"; do
|
|
||||||
echo -n " $cmd: "
|
|
||||||
output=$(./dbbackup_linux_amd64 $cmd --help 2>&1 | head -1)
|
|
||||||
if [[ "$output" == *"not yet implemented"* ]]; then
|
|
||||||
echo "❌ PLACEHOLDER"
|
|
||||||
elif [[ "$output" == *"Error: unknown command"* ]]; then
|
|
||||||
echo "❌ MISSING"
|
|
||||||
else
|
|
||||||
echo "✅ IMPLEMENTED"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo
|
|
||||||
echo "2. Testing Backup Subcommands:"
|
|
||||||
echo "------------------------------"
|
|
||||||
backup_cmds=("single" "cluster" "sample")
|
|
||||||
for cmd in "${backup_cmds[@]}"; do
|
|
||||||
echo -n " backup $cmd: "
|
|
||||||
output=$(./dbbackup_linux_amd64 backup $cmd --help 2>&1 | head -1)
|
|
||||||
if [[ "$output" == *"Error"* ]]; then
|
|
||||||
echo "❌ MISSING"
|
|
||||||
else
|
|
||||||
echo "✅ IMPLEMENTED"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo
|
|
||||||
echo "3. Testing Status & Connection:"
|
|
||||||
echo "------------------------------"
|
|
||||||
echo " Database connection test:"
|
|
||||||
./dbbackup_linux_amd64 status 2>&1 | grep -E "(✅|❌)" | head -3
|
|
||||||
|
|
||||||
echo
|
|
||||||
echo "4. Testing Interactive Mode:"
|
|
||||||
echo "----------------------------"
|
|
||||||
echo " Starting interactive (5 sec timeout)..."
|
|
||||||
timeout 5s ./dbbackup_linux_amd64 interactive >/dev/null 2>&1
|
|
||||||
if [ $? -eq 124 ]; then
|
|
||||||
echo " ✅ Interactive mode starts (timed out = working)"
|
|
||||||
else
|
|
||||||
echo " ❌ Interactive mode failed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo
|
|
||||||
echo "5. Testing with Different DB Types:"
|
|
||||||
echo "----------------------------------"
|
|
||||||
echo " PostgreSQL config:"
|
|
||||||
./dbbackup_linux_amd64 --db-type postgres status 2>&1 | grep "Database Type" || echo " ❌ Failed"
|
|
||||||
|
|
||||||
echo " MySQL config:"
|
|
||||||
./dbbackup_linux_amd64 --db-type mysql status 2>&1 | grep "Database Type" || echo " ❌ Failed"
|
|
||||||
|
|
||||||
echo
|
|
||||||
echo "==================================="
|
|
||||||
echo " TEST COMPLETE"
|
|
||||||
echo "==================================="
|
|
||||||
Reference in New Issue
Block a user