Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b1ed3d8134 | |||
| c0603f40f4 | |||
| 2418fabbff | |||
| 31289b09d2 | |||
| a8d33a41e3 | |||
| b5239d839d | |||
| fab48ac564 | |||
| 66865a5fb8 | |||
| f9dd95520b | |||
| ac1c892d9b | |||
| 084f7b3938 | |||
| 173b2ce035 | |||
| efe9457aa4 | |||
| e2284f295a | |||
| 9e3270dc10 |
107
TODO_SESSION.md
Normal file
107
TODO_SESSION.md
Normal file
@ -0,0 +1,107 @@
|
||||
# dbbackup Session TODO - January 31, 2026
|
||||
|
||||
## ✅ Completed Today (Jan 30, 2026)
|
||||
|
||||
### Released Versions
|
||||
| Version | Feature | Status |
|
||||
|---------|---------|--------|
|
||||
| v4.2.6 | Initial session start | ✅ |
|
||||
| v4.2.7 | Restore Profiles | ✅ |
|
||||
| v4.2.8 | Backup Estimate | ✅ |
|
||||
| v4.2.9 | TUI Enhancements | ✅ |
|
||||
| v4.2.10 | Health Check | ✅ |
|
||||
| v4.2.11 | Completion Scripts | ✅ |
|
||||
| v4.2.12 | Man Pages | ✅ |
|
||||
| v4.2.13 | Parallel Jobs Fix (pg_dump -j for custom format) | ✅ |
|
||||
| v4.2.14 | Catalog Export (CSV/HTML/JSON) | ✅ |
|
||||
| v4.2.15 | Version Command | ✅ |
|
||||
| v4.2.16 | Cloud Sync | ✅ |
|
||||
|
||||
**Total: 11 releases in one session!**
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Quick Wins for Tomorrow (15-30 min each)
|
||||
|
||||
### High Priority
|
||||
1. **Backup Schedule Command** - Show next scheduled backup times
|
||||
2. **Catalog Prune** - Remove old entries from catalog
|
||||
3. **Config Validate** - Validate configuration file
|
||||
4. **Restore Dry-Run** - Preview restore without executing
|
||||
5. **Cleanup Preview** - Show what would be deleted
|
||||
|
||||
### Medium Priority
|
||||
6. **Notification Test** - Test webhook/email notifications
|
||||
7. **Cloud Status** - Check cloud storage connectivity
|
||||
8. **Backup Chain** - Show backup chain (full → incremental)
|
||||
9. **Space Forecast** - Predict disk space needs
|
||||
10. **Encryption Key Rotate** - Rotate encryption keys
|
||||
|
||||
### Enhancement Ideas
|
||||
11. **Progress Webhooks** - Send progress during backup
|
||||
12. **Parallel Restore** - Multi-threaded restore
|
||||
13. **Catalog Dashboard** - Interactive TUI for catalog
|
||||
14. **Retention Simulator** - Preview retention policy effects
|
||||
15. **Cross-Region Sync** - Sync to multiple cloud regions
|
||||
|
||||
---
|
||||
|
||||
## 📋 DBA World Meeting Backlog
|
||||
|
||||
### Enterprise Features (Larger scope)
|
||||
- [ ] Compliance Autopilot Enhancements
|
||||
- [ ] Advanced Retention Policies
|
||||
- [ ] Cross-Region Replication
|
||||
- [ ] Backup Verification Automation
|
||||
- [ ] HA/Clustering Support
|
||||
- [ ] Role-Based Access Control
|
||||
- [ ] Audit Log Export
|
||||
- [ ] Integration APIs
|
||||
|
||||
### Performance
|
||||
- [ ] Streaming Backup (no temp files)
|
||||
- [ ] Delta Backups
|
||||
- [ ] Compression Benchmarking
|
||||
- [ ] Memory Optimization
|
||||
|
||||
### Monitoring
|
||||
- [ ] Custom Prometheus Metrics
|
||||
- [ ] Grafana Dashboard Improvements
|
||||
- [ ] Alert Routing Rules
|
||||
- [ ] SLA Tracking
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Known Issues to Fix
|
||||
- None reported
|
||||
|
||||
---
|
||||
|
||||
## 📝 Session Notes
|
||||
|
||||
### Workflow That Works
|
||||
1. Pick 15-30 min feature
|
||||
2. Create new cmd file
|
||||
3. Build & test locally
|
||||
4. Commit with descriptive message
|
||||
5. Bump version
|
||||
6. Build all platforms
|
||||
7. Tag & push
|
||||
8. Create GitHub release
|
||||
|
||||
### Build Commands
|
||||
```bash
|
||||
go build # Quick local build
|
||||
bash build_all.sh # All 5 platforms
|
||||
git tag v4.2.X && git push origin main && git push github main && git push origin v4.2.X && git push github v4.2.X
|
||||
gh release create v4.2.X --title "..." --notes "..." bin/dbbackup_*
|
||||
```
|
||||
|
||||
### Key Files
|
||||
- `main.go` - Version string
|
||||
- `cmd/` - All CLI commands
|
||||
- `internal/` - Core packages
|
||||
|
||||
---
|
||||
|
||||
**Next version: v4.2.17**
|
||||
463
cmd/catalog_export.go
Normal file
463
cmd/catalog_export.go
Normal file
@ -0,0 +1,463 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"dbbackup/internal/catalog"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
exportOutput string
|
||||
exportFormat string
|
||||
)
|
||||
|
||||
// catalogExportCmd exports catalog to various formats
|
||||
var catalogExportCmd = &cobra.Command{
|
||||
Use: "export",
|
||||
Short: "Export catalog to file (CSV/HTML/JSON)",
|
||||
Long: `Export backup catalog to various formats for analysis, reporting, or archival.
|
||||
|
||||
Supports:
|
||||
- CSV format for spreadsheet import (Excel, LibreOffice)
|
||||
- HTML format for web-based reports and documentation
|
||||
- JSON format for programmatic access and integration
|
||||
|
||||
Examples:
|
||||
# Export to CSV
|
||||
dbbackup catalog export --format csv --output backups.csv
|
||||
|
||||
# Export to HTML report
|
||||
dbbackup catalog export --format html --output report.html
|
||||
|
||||
# Export specific database
|
||||
dbbackup catalog export --format csv --database myapp --output myapp_backups.csv
|
||||
|
||||
# Export date range
|
||||
dbbackup catalog export --format html --after 2026-01-01 --output january_report.html`,
|
||||
RunE: runCatalogExport,
|
||||
}
|
||||
|
||||
func init() {
|
||||
catalogCmd.AddCommand(catalogExportCmd)
|
||||
catalogExportCmd.Flags().StringVarP(&exportOutput, "output", "o", "", "Output file path (required)")
|
||||
catalogExportCmd.Flags().StringVarP(&exportFormat, "format", "f", "csv", "Export format: csv, html, json")
|
||||
catalogExportCmd.Flags().StringVar(&catalogDatabase, "database", "", "Filter by database name")
|
||||
catalogExportCmd.Flags().StringVar(&catalogStartDate, "after", "", "Show backups after date (YYYY-MM-DD)")
|
||||
catalogExportCmd.Flags().StringVar(&catalogEndDate, "before", "", "Show backups before date (YYYY-MM-DD)")
|
||||
catalogExportCmd.MarkFlagRequired("output")
|
||||
}
|
||||
|
||||
func runCatalogExport(cmd *cobra.Command, args []string) error {
|
||||
if exportOutput == "" {
|
||||
return fmt.Errorf("--output flag required")
|
||||
}
|
||||
|
||||
// Validate format
|
||||
exportFormat = strings.ToLower(exportFormat)
|
||||
if exportFormat != "csv" && exportFormat != "html" && exportFormat != "json" {
|
||||
return fmt.Errorf("invalid format: %s (supported: csv, html, json)", exportFormat)
|
||||
}
|
||||
|
||||
cat, err := openCatalog()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cat.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Build query
|
||||
query := &catalog.SearchQuery{
|
||||
Database: catalogDatabase,
|
||||
Limit: 0, // No limit - export all
|
||||
OrderBy: "created_at",
|
||||
OrderDesc: false, // Chronological order for exports
|
||||
}
|
||||
|
||||
// Parse dates if provided
|
||||
if catalogStartDate != "" {
|
||||
after, err := time.Parse("2006-01-02", catalogStartDate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid --after date format (use YYYY-MM-DD): %w", err)
|
||||
}
|
||||
query.StartDate = &after
|
||||
}
|
||||
|
||||
if catalogEndDate != "" {
|
||||
before, err := time.Parse("2006-01-02", catalogEndDate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid --before date format (use YYYY-MM-DD): %w", err)
|
||||
}
|
||||
query.EndDate = &before
|
||||
}
|
||||
|
||||
// Search backups
|
||||
entries, err := cat.Search(ctx, query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to search catalog: %w", err)
|
||||
}
|
||||
|
||||
if len(entries) == 0 {
|
||||
fmt.Println("No backups found matching criteria")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Export based on format
|
||||
switch exportFormat {
|
||||
case "csv":
|
||||
return exportCSV(entries, exportOutput)
|
||||
case "html":
|
||||
return exportHTML(entries, exportOutput, catalogDatabase)
|
||||
case "json":
|
||||
return exportJSON(entries, exportOutput)
|
||||
default:
|
||||
return fmt.Errorf("unsupported format: %s", exportFormat)
|
||||
}
|
||||
}
|
||||
|
||||
// exportCSV exports entries to CSV format
|
||||
func exportCSV(entries []*catalog.Entry, outputPath string) error {
|
||||
file, err := os.Create(outputPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
writer := csv.NewWriter(file)
|
||||
defer writer.Flush()
|
||||
|
||||
// Header
|
||||
header := []string{
|
||||
"ID",
|
||||
"Database",
|
||||
"DatabaseType",
|
||||
"Host",
|
||||
"Port",
|
||||
"BackupPath",
|
||||
"BackupType",
|
||||
"SizeBytes",
|
||||
"SizeHuman",
|
||||
"SHA256",
|
||||
"Compression",
|
||||
"Encrypted",
|
||||
"CreatedAt",
|
||||
"DurationSeconds",
|
||||
"Status",
|
||||
"VerifiedAt",
|
||||
"VerifyValid",
|
||||
"TestedAt",
|
||||
"TestSuccess",
|
||||
"RetentionPolicy",
|
||||
}
|
||||
|
||||
if err := writer.Write(header); err != nil {
|
||||
return fmt.Errorf("failed to write CSV header: %w", err)
|
||||
}
|
||||
|
||||
// Data rows
|
||||
for _, entry := range entries {
|
||||
row := []string{
|
||||
fmt.Sprintf("%d", entry.ID),
|
||||
entry.Database,
|
||||
entry.DatabaseType,
|
||||
entry.Host,
|
||||
fmt.Sprintf("%d", entry.Port),
|
||||
entry.BackupPath,
|
||||
entry.BackupType,
|
||||
fmt.Sprintf("%d", entry.SizeBytes),
|
||||
catalog.FormatSize(entry.SizeBytes),
|
||||
entry.SHA256,
|
||||
entry.Compression,
|
||||
fmt.Sprintf("%t", entry.Encrypted),
|
||||
entry.CreatedAt.Format(time.RFC3339),
|
||||
fmt.Sprintf("%.2f", entry.Duration),
|
||||
string(entry.Status),
|
||||
formatTime(entry.VerifiedAt),
|
||||
formatBool(entry.VerifyValid),
|
||||
formatTime(entry.DrillTestedAt),
|
||||
formatBool(entry.DrillSuccess),
|
||||
entry.RetentionPolicy,
|
||||
}
|
||||
|
||||
if err := writer.Write(row); err != nil {
|
||||
return fmt.Errorf("failed to write CSV row: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("✅ Exported %d backups to CSV: %s\n", len(entries), outputPath)
|
||||
fmt.Printf(" Open with Excel, LibreOffice, or other spreadsheet software\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// exportHTML exports entries to HTML format with styling
|
||||
func exportHTML(entries []*catalog.Entry, outputPath string, database string) error {
|
||||
file, err := os.Create(outputPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
title := "Backup Catalog Report"
|
||||
if database != "" {
|
||||
title = fmt.Sprintf("Backup Catalog Report: %s", database)
|
||||
}
|
||||
|
||||
// Write HTML header with embedded CSS
|
||||
htmlHeader := fmt.Sprintf(`<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>%s</title>
|
||||
<style>
|
||||
body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; margin: 20px; background: #f5f5f5; }
|
||||
.container { max-width: 1400px; margin: 0 auto; background: white; padding: 30px; box-shadow: 0 2px 10px rgba(0,0,0,0.1); }
|
||||
h1 { color: #2c3e50; border-bottom: 3px solid #3498db; padding-bottom: 10px; }
|
||||
.summary { background: #ecf0f1; padding: 15px; margin: 20px 0; border-radius: 5px; }
|
||||
.summary-item { display: inline-block; margin-right: 30px; }
|
||||
.summary-label { font-weight: bold; color: #7f8c8d; }
|
||||
.summary-value { color: #2c3e50; font-size: 18px; }
|
||||
table { width: 100%%; border-collapse: collapse; margin-top: 20px; }
|
||||
th { background: #34495e; color: white; padding: 12px; text-align: left; font-weight: 600; }
|
||||
td { padding: 10px; border-bottom: 1px solid #ecf0f1; }
|
||||
tr:hover { background: #f8f9fa; }
|
||||
.status-success { color: #27ae60; font-weight: bold; }
|
||||
.status-fail { color: #e74c3c; font-weight: bold; }
|
||||
.badge { padding: 3px 8px; border-radius: 3px; font-size: 12px; font-weight: bold; }
|
||||
.badge-encrypted { background: #3498db; color: white; }
|
||||
.badge-verified { background: #27ae60; color: white; }
|
||||
.badge-tested { background: #9b59b6; color: white; }
|
||||
.footer { margin-top: 30px; text-align: center; color: #95a5a6; font-size: 12px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1>%s</h1>
|
||||
`, title, title)
|
||||
|
||||
file.WriteString(htmlHeader)
|
||||
|
||||
// Summary section
|
||||
totalSize := int64(0)
|
||||
encryptedCount := 0
|
||||
verifiedCount := 0
|
||||
testedCount := 0
|
||||
|
||||
for _, entry := range entries {
|
||||
totalSize += entry.SizeBytes
|
||||
if entry.Encrypted {
|
||||
encryptedCount++
|
||||
}
|
||||
if entry.VerifyValid != nil && *entry.VerifyValid {
|
||||
verifiedCount++
|
||||
}
|
||||
if entry.DrillSuccess != nil && *entry.DrillSuccess {
|
||||
testedCount++
|
||||
}
|
||||
}
|
||||
|
||||
var oldestBackup, newestBackup time.Time
|
||||
if len(entries) > 0 {
|
||||
oldestBackup = entries[0].CreatedAt
|
||||
newestBackup = entries[len(entries)-1].CreatedAt
|
||||
}
|
||||
|
||||
summaryHTML := fmt.Sprintf(`
|
||||
<div class="summary">
|
||||
<div class="summary-item">
|
||||
<div class="summary-label">Total Backups:</div>
|
||||
<div class="summary-value">%d</div>
|
||||
</div>
|
||||
<div class="summary-item">
|
||||
<div class="summary-label">Total Size:</div>
|
||||
<div class="summary-value">%s</div>
|
||||
</div>
|
||||
<div class="summary-item">
|
||||
<div class="summary-label">Encrypted:</div>
|
||||
<div class="summary-value">%d (%.1f%%)</div>
|
||||
</div>
|
||||
<div class="summary-item">
|
||||
<div class="summary-label">Verified:</div>
|
||||
<div class="summary-value">%d (%.1f%%)</div>
|
||||
</div>
|
||||
<div class="summary-item">
|
||||
<div class="summary-label">DR Tested:</div>
|
||||
<div class="summary-value">%d (%.1f%%)</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="summary">
|
||||
<div class="summary-item">
|
||||
<div class="summary-label">Oldest Backup:</div>
|
||||
<div class="summary-value">%s</div>
|
||||
</div>
|
||||
<div class="summary-item">
|
||||
<div class="summary-label">Newest Backup:</div>
|
||||
<div class="summary-value">%s</div>
|
||||
</div>
|
||||
<div class="summary-item">
|
||||
<div class="summary-label">Time Span:</div>
|
||||
<div class="summary-value">%s</div>
|
||||
</div>
|
||||
</div>
|
||||
`,
|
||||
len(entries),
|
||||
catalog.FormatSize(totalSize),
|
||||
encryptedCount, float64(encryptedCount)/float64(len(entries))*100,
|
||||
verifiedCount, float64(verifiedCount)/float64(len(entries))*100,
|
||||
testedCount, float64(testedCount)/float64(len(entries))*100,
|
||||
oldestBackup.Format("2006-01-02 15:04"),
|
||||
newestBackup.Format("2006-01-02 15:04"),
|
||||
formatTimeSpan(newestBackup.Sub(oldestBackup)),
|
||||
)
|
||||
|
||||
file.WriteString(summaryHTML)
|
||||
|
||||
// Table header
|
||||
tableHeader := `
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Database</th>
|
||||
<th>Created</th>
|
||||
<th>Size</th>
|
||||
<th>Type</th>
|
||||
<th>Duration</th>
|
||||
<th>Status</th>
|
||||
<th>Attributes</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
`
|
||||
file.WriteString(tableHeader)
|
||||
|
||||
// Table rows
|
||||
for _, entry := range entries {
|
||||
badges := []string{}
|
||||
if entry.Encrypted {
|
||||
badges = append(badges, `<span class="badge badge-encrypted">Encrypted</span>`)
|
||||
}
|
||||
if entry.VerifyValid != nil && *entry.VerifyValid {
|
||||
badges = append(badges, `<span class="badge badge-verified">Verified</span>`)
|
||||
}
|
||||
if entry.DrillSuccess != nil && *entry.DrillSuccess {
|
||||
badges = append(badges, `<span class="badge badge-tested">DR Tested</span>`)
|
||||
}
|
||||
|
||||
statusClass := "status-success"
|
||||
statusText := string(entry.Status)
|
||||
if entry.Status == catalog.StatusFailed {
|
||||
statusClass = "status-fail"
|
||||
}
|
||||
|
||||
row := fmt.Sprintf(`
|
||||
<tr>
|
||||
<td>%s</td>
|
||||
<td>%s</td>
|
||||
<td>%s</td>
|
||||
<td>%s</td>
|
||||
<td>%.1fs</td>
|
||||
<td class="%s">%s</td>
|
||||
<td>%s</td>
|
||||
</tr>`,
|
||||
html.EscapeString(entry.Database),
|
||||
entry.CreatedAt.Format("2006-01-02 15:04:05"),
|
||||
catalog.FormatSize(entry.SizeBytes),
|
||||
html.EscapeString(entry.BackupType),
|
||||
entry.Duration,
|
||||
statusClass,
|
||||
html.EscapeString(statusText),
|
||||
strings.Join(badges, " "),
|
||||
)
|
||||
file.WriteString(row)
|
||||
}
|
||||
|
||||
// Table footer and close HTML
|
||||
htmlFooter := `
|
||||
</tbody>
|
||||
</table>
|
||||
<div class="footer">
|
||||
Generated by dbbackup on ` + time.Now().Format("2006-01-02 15:04:05") + `
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
file.WriteString(htmlFooter)
|
||||
|
||||
fmt.Printf("✅ Exported %d backups to HTML: %s\n", len(entries), outputPath)
|
||||
fmt.Printf(" Open in browser: file://%s\n", filepath.Join(os.Getenv("PWD"), exportOutput))
|
||||
return nil
|
||||
}
|
||||
|
||||
// exportJSON exports entries to JSON format
|
||||
func exportJSON(entries []*catalog.Entry, outputPath string) error {
|
||||
file, err := os.Create(outputPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
encoder := json.NewEncoder(file)
|
||||
encoder.SetIndent("", " ")
|
||||
|
||||
if err := encoder.Encode(entries); err != nil {
|
||||
return fmt.Errorf("failed to encode JSON: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("✅ Exported %d backups to JSON: %s\n", len(entries), outputPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// formatTime formats *time.Time to string
|
||||
func formatTime(t *time.Time) string {
|
||||
if t == nil {
|
||||
return ""
|
||||
}
|
||||
return t.Format(time.RFC3339)
|
||||
}
|
||||
|
||||
// formatBool formats *bool to string
|
||||
func formatBool(b *bool) string {
|
||||
if b == nil {
|
||||
return ""
|
||||
}
|
||||
if *b {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
|
||||
// formatExportDuration formats *time.Duration to string
|
||||
func formatExportDuration(d *time.Duration) string {
|
||||
if d == nil {
|
||||
return ""
|
||||
}
|
||||
return d.String()
|
||||
}
|
||||
|
||||
// formatTimeSpan formats a duration in human-readable form
|
||||
func formatTimeSpan(d time.Duration) string {
|
||||
days := int(d.Hours() / 24)
|
||||
if days > 365 {
|
||||
years := days / 365
|
||||
return fmt.Sprintf("%d years", years)
|
||||
}
|
||||
if days > 30 {
|
||||
months := days / 30
|
||||
return fmt.Sprintf("%d months", months)
|
||||
}
|
||||
if days > 0 {
|
||||
return fmt.Sprintf("%d days", days)
|
||||
}
|
||||
return fmt.Sprintf("%.0f hours", d.Hours())
|
||||
}
|
||||
335
cmd/cloud_sync.go
Normal file
335
cmd/cloud_sync.go
Normal file
@ -0,0 +1,335 @@
|
||||
// Package cmd - cloud sync command
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"dbbackup/internal/cloud"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
syncDryRun bool
|
||||
syncDelete bool
|
||||
syncNewerOnly bool
|
||||
syncDatabaseFilter string
|
||||
)
|
||||
|
||||
var cloudSyncCmd = &cobra.Command{
|
||||
Use: "sync [local-dir]",
|
||||
Short: "Sync local backups to cloud storage",
|
||||
Long: `Sync local backup directory with cloud storage.
|
||||
|
||||
Uploads new and updated backups to cloud, optionally deleting
|
||||
files in cloud that no longer exist locally.
|
||||
|
||||
Examples:
|
||||
# Sync backup directory to cloud
|
||||
dbbackup cloud sync /backups
|
||||
|
||||
# Dry run - show what would be synced
|
||||
dbbackup cloud sync /backups --dry-run
|
||||
|
||||
# Sync and delete orphaned cloud files
|
||||
dbbackup cloud sync /backups --delete
|
||||
|
||||
# Only upload newer files
|
||||
dbbackup cloud sync /backups --newer-only
|
||||
|
||||
# Sync specific database backups
|
||||
dbbackup cloud sync /backups --database mydb`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runCloudSync,
|
||||
}
|
||||
|
||||
func init() {
|
||||
cloudCmd.AddCommand(cloudSyncCmd)
|
||||
|
||||
// Sync-specific flags
|
||||
cloudSyncCmd.Flags().BoolVar(&syncDryRun, "dry-run", false, "Show what would be synced without uploading")
|
||||
cloudSyncCmd.Flags().BoolVar(&syncDelete, "delete", false, "Delete cloud files that don't exist locally")
|
||||
cloudSyncCmd.Flags().BoolVar(&syncNewerOnly, "newer-only", false, "Only upload files newer than cloud version")
|
||||
cloudSyncCmd.Flags().StringVar(&syncDatabaseFilter, "database", "", "Only sync backups for specific database")
|
||||
|
||||
// Cloud configuration flags
|
||||
cloudSyncCmd.Flags().StringVar(&cloudProvider, "cloud-provider", getEnv("DBBACKUP_CLOUD_PROVIDER", "s3"), "Cloud provider (s3, minio, b2)")
|
||||
cloudSyncCmd.Flags().StringVar(&cloudBucket, "cloud-bucket", getEnv("DBBACKUP_CLOUD_BUCKET", ""), "Bucket name")
|
||||
cloudSyncCmd.Flags().StringVar(&cloudRegion, "cloud-region", getEnv("DBBACKUP_CLOUD_REGION", "us-east-1"), "Region")
|
||||
cloudSyncCmd.Flags().StringVar(&cloudEndpoint, "cloud-endpoint", getEnv("DBBACKUP_CLOUD_ENDPOINT", ""), "Custom endpoint (for MinIO)")
|
||||
cloudSyncCmd.Flags().StringVar(&cloudAccessKey, "cloud-access-key", getEnv("DBBACKUP_CLOUD_ACCESS_KEY", getEnv("AWS_ACCESS_KEY_ID", "")), "Access key")
|
||||
cloudSyncCmd.Flags().StringVar(&cloudSecretKey, "cloud-secret-key", getEnv("DBBACKUP_CLOUD_SECRET_KEY", getEnv("AWS_SECRET_ACCESS_KEY", "")), "Secret key")
|
||||
cloudSyncCmd.Flags().StringVar(&cloudPrefix, "cloud-prefix", getEnv("DBBACKUP_CLOUD_PREFIX", ""), "Key prefix")
|
||||
cloudSyncCmd.Flags().StringVar(&cloudBandwidthLimit, "bandwidth-limit", getEnv("DBBACKUP_BANDWIDTH_LIMIT", ""), "Bandwidth limit (e.g., 10MB/s, 100Mbps)")
|
||||
cloudSyncCmd.Flags().BoolVarP(&cloudVerbose, "verbose", "v", false, "Verbose output")
|
||||
}
|
||||
|
||||
type syncAction struct {
|
||||
Action string // "upload", "skip", "delete"
|
||||
Filename string
|
||||
Size int64
|
||||
Reason string
|
||||
}
|
||||
|
||||
func runCloudSync(cmd *cobra.Command, args []string) error {
|
||||
localDir := args[0]
|
||||
|
||||
// Validate local directory
|
||||
info, err := os.Stat(localDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot access directory: %w", err)
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return fmt.Errorf("not a directory: %s", localDir)
|
||||
}
|
||||
|
||||
backend, err := getCloudBackend()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("╔═══════════════════════════════════════════════════════════════╗")
|
||||
fmt.Println("║ Cloud Sync ║")
|
||||
fmt.Println("╠═══════════════════════════════════════════════════════════════╣")
|
||||
fmt.Printf("║ Local: %-52s ║\n", truncateSyncString(localDir, 52))
|
||||
fmt.Printf("║ Cloud: %-52s ║\n", truncateSyncString(fmt.Sprintf("%s/%s", backend.Name(), cloudBucket), 52))
|
||||
if syncDryRun {
|
||||
fmt.Println("║ Mode: DRY RUN (no changes will be made) ║")
|
||||
}
|
||||
fmt.Println("╚═══════════════════════════════════════════════════════════════╝")
|
||||
fmt.Println()
|
||||
|
||||
// Get local files
|
||||
localFiles := make(map[string]os.FileInfo)
|
||||
err = filepath.Walk(localDir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only include backup files
|
||||
ext := strings.ToLower(filepath.Ext(path))
|
||||
if !isSyncBackupFile(ext) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply database filter
|
||||
if syncDatabaseFilter != "" && !strings.Contains(filepath.Base(path), syncDatabaseFilter) {
|
||||
return nil
|
||||
}
|
||||
|
||||
relPath, _ := filepath.Rel(localDir, path)
|
||||
localFiles[relPath] = info
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to scan local directory: %w", err)
|
||||
}
|
||||
|
||||
// Get cloud files
|
||||
cloudBackups, err := backend.List(ctx, cloudPrefix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list cloud files: %w", err)
|
||||
}
|
||||
|
||||
cloudFiles := make(map[string]cloud.BackupInfo)
|
||||
for _, b := range cloudBackups {
|
||||
cloudFiles[b.Name] = b
|
||||
}
|
||||
|
||||
// Analyze sync actions
|
||||
var actions []syncAction
|
||||
var uploadCount, skipCount, deleteCount int
|
||||
var uploadSize int64
|
||||
|
||||
// Check local files
|
||||
for filename, info := range localFiles {
|
||||
cloudInfo, existsInCloud := cloudFiles[filename]
|
||||
|
||||
if !existsInCloud {
|
||||
// New file - needs upload
|
||||
actions = append(actions, syncAction{
|
||||
Action: "upload",
|
||||
Filename: filename,
|
||||
Size: info.Size(),
|
||||
Reason: "new file",
|
||||
})
|
||||
uploadCount++
|
||||
uploadSize += info.Size()
|
||||
} else if syncNewerOnly {
|
||||
// Check if local is newer
|
||||
if info.ModTime().After(cloudInfo.LastModified) {
|
||||
actions = append(actions, syncAction{
|
||||
Action: "upload",
|
||||
Filename: filename,
|
||||
Size: info.Size(),
|
||||
Reason: "local is newer",
|
||||
})
|
||||
uploadCount++
|
||||
uploadSize += info.Size()
|
||||
} else {
|
||||
actions = append(actions, syncAction{
|
||||
Action: "skip",
|
||||
Filename: filename,
|
||||
Size: info.Size(),
|
||||
Reason: "cloud is up to date",
|
||||
})
|
||||
skipCount++
|
||||
}
|
||||
} else {
|
||||
// Check by size (simpler than hash)
|
||||
if info.Size() != cloudInfo.Size {
|
||||
actions = append(actions, syncAction{
|
||||
Action: "upload",
|
||||
Filename: filename,
|
||||
Size: info.Size(),
|
||||
Reason: "size mismatch",
|
||||
})
|
||||
uploadCount++
|
||||
uploadSize += info.Size()
|
||||
} else {
|
||||
actions = append(actions, syncAction{
|
||||
Action: "skip",
|
||||
Filename: filename,
|
||||
Size: info.Size(),
|
||||
Reason: "already synced",
|
||||
})
|
||||
skipCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for cloud files to delete
|
||||
if syncDelete {
|
||||
for cloudFile := range cloudFiles {
|
||||
if _, existsLocally := localFiles[cloudFile]; !existsLocally {
|
||||
actions = append(actions, syncAction{
|
||||
Action: "delete",
|
||||
Filename: cloudFile,
|
||||
Size: cloudFiles[cloudFile].Size,
|
||||
Reason: "not in local",
|
||||
})
|
||||
deleteCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Show summary
|
||||
fmt.Printf("📊 Sync Summary\n")
|
||||
fmt.Printf(" Local files: %d\n", len(localFiles))
|
||||
fmt.Printf(" Cloud files: %d\n", len(cloudFiles))
|
||||
fmt.Printf(" To upload: %d (%s)\n", uploadCount, cloud.FormatSize(uploadSize))
|
||||
fmt.Printf(" To skip: %d\n", skipCount)
|
||||
if syncDelete {
|
||||
fmt.Printf(" To delete: %d\n", deleteCount)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
if uploadCount == 0 && deleteCount == 0 {
|
||||
fmt.Println("✅ Already in sync - nothing to do!")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verbose action list
|
||||
if cloudVerbose || syncDryRun {
|
||||
fmt.Println("📋 Actions:")
|
||||
for _, action := range actions {
|
||||
if action.Action == "skip" && !cloudVerbose {
|
||||
continue
|
||||
}
|
||||
icon := "📤"
|
||||
if action.Action == "skip" {
|
||||
icon = "⏭️"
|
||||
} else if action.Action == "delete" {
|
||||
icon = "🗑️"
|
||||
}
|
||||
fmt.Printf(" %s %-8s %-40s (%s)\n", icon, action.Action, truncateSyncString(action.Filename, 40), action.Reason)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
if syncDryRun {
|
||||
fmt.Println("🔍 Dry run complete - no changes made")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Execute sync
|
||||
fmt.Println("🚀 Starting sync...")
|
||||
fmt.Println()
|
||||
|
||||
var successUploads, successDeletes int
|
||||
var failedUploads, failedDeletes int
|
||||
|
||||
for _, action := range actions {
|
||||
switch action.Action {
|
||||
case "upload":
|
||||
localPath := filepath.Join(localDir, action.Filename)
|
||||
fmt.Printf("📤 Uploading: %s\n", action.Filename)
|
||||
|
||||
err := backend.Upload(ctx, localPath, action.Filename, nil)
|
||||
if err != nil {
|
||||
fmt.Printf(" ❌ Failed: %v\n", err)
|
||||
failedUploads++
|
||||
} else {
|
||||
fmt.Printf(" ✅ Done (%s)\n", cloud.FormatSize(action.Size))
|
||||
successUploads++
|
||||
}
|
||||
|
||||
case "delete":
|
||||
fmt.Printf("🗑️ Deleting: %s\n", action.Filename)
|
||||
|
||||
err := backend.Delete(ctx, action.Filename)
|
||||
if err != nil {
|
||||
fmt.Printf(" ❌ Failed: %v\n", err)
|
||||
failedDeletes++
|
||||
} else {
|
||||
fmt.Printf(" ✅ Deleted\n")
|
||||
successDeletes++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Final summary
|
||||
fmt.Println()
|
||||
fmt.Println("═══════════════════════════════════════════════════════════════")
|
||||
fmt.Printf("✅ Sync Complete\n")
|
||||
fmt.Printf(" Uploaded: %d/%d\n", successUploads, uploadCount)
|
||||
if syncDelete {
|
||||
fmt.Printf(" Deleted: %d/%d\n", successDeletes, deleteCount)
|
||||
}
|
||||
if failedUploads > 0 || failedDeletes > 0 {
|
||||
fmt.Printf(" ⚠️ Failures: %d\n", failedUploads+failedDeletes)
|
||||
}
|
||||
fmt.Println("═══════════════════════════════════════════════════════════════")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isSyncBackupFile(ext string) bool {
|
||||
backupExts := []string{
|
||||
".dump", ".sql", ".gz", ".xz", ".zst",
|
||||
".backup", ".bak", ".dmp",
|
||||
}
|
||||
for _, e := range backupExts {
|
||||
if ext == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func truncateSyncString(s string, max int) string {
|
||||
if len(s) <= max {
|
||||
return s
|
||||
}
|
||||
return s[:max-3] + "..."
|
||||
}
|
||||
80
cmd/completion.go
Normal file
80
cmd/completion.go
Normal file
@ -0,0 +1,80 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var completionCmd = &cobra.Command{
|
||||
Use: "completion [bash|zsh|fish|powershell]",
|
||||
Short: "Generate shell completion scripts",
|
||||
Long: `Generate shell completion scripts for dbbackup commands.
|
||||
|
||||
The completion script allows tab-completion of:
|
||||
- Commands and subcommands
|
||||
- Flags and their values
|
||||
- File paths for backup/restore operations
|
||||
|
||||
Installation Instructions:
|
||||
|
||||
Bash:
|
||||
# Add to ~/.bashrc or ~/.bash_profile:
|
||||
source <(dbbackup completion bash)
|
||||
|
||||
# Or save to file and source it:
|
||||
dbbackup completion bash > ~/.dbbackup-completion.bash
|
||||
echo 'source ~/.dbbackup-completion.bash' >> ~/.bashrc
|
||||
|
||||
Zsh:
|
||||
# Add to ~/.zshrc:
|
||||
source <(dbbackup completion zsh)
|
||||
|
||||
# Or save to completion directory:
|
||||
dbbackup completion zsh > "${fpath[1]}/_dbbackup"
|
||||
|
||||
# For custom location:
|
||||
dbbackup completion zsh > ~/.dbbackup-completion.zsh
|
||||
echo 'source ~/.dbbackup-completion.zsh' >> ~/.zshrc
|
||||
|
||||
Fish:
|
||||
# Save to fish completion directory:
|
||||
dbbackup completion fish > ~/.config/fish/completions/dbbackup.fish
|
||||
|
||||
PowerShell:
|
||||
# Add to your PowerShell profile:
|
||||
dbbackup completion powershell | Out-String | Invoke-Expression
|
||||
|
||||
# Or save to profile:
|
||||
dbbackup completion powershell >> $PROFILE
|
||||
|
||||
After installation, restart your shell or source the completion file.
|
||||
|
||||
Note: Some flags may have conflicting shorthand letters across different
|
||||
subcommands (e.g., -d for both db-type and database). Tab completion will
|
||||
work correctly for the command you're using.`,
|
||||
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
|
||||
Args: cobra.ExactArgs(1),
|
||||
DisableFlagParsing: true, // Don't parse flags for completion generation
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
shell := args[0]
|
||||
|
||||
// Get root command without triggering flag merging
|
||||
root := cmd.Root()
|
||||
|
||||
switch shell {
|
||||
case "bash":
|
||||
root.GenBashCompletionV2(os.Stdout, true)
|
||||
case "zsh":
|
||||
root.GenZshCompletion(os.Stdout)
|
||||
case "fish":
|
||||
root.GenFishCompletion(os.Stdout, true)
|
||||
case "powershell":
|
||||
root.GenPowerShellCompletionWithDesc(os.Stdout)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(completionCmd)
|
||||
}
|
||||
@ -105,7 +105,7 @@ func runEstimateSingle(cmd *cobra.Command, args []string) error {
|
||||
if !estimate.HasSufficientSpace {
|
||||
fmt.Println()
|
||||
fmt.Println("⚠️ WARNING: Insufficient disk space!")
|
||||
fmt.Printf(" Need %s more space to proceed safely.\n",
|
||||
fmt.Printf(" Need %s more space to proceed safely.\n",
|
||||
formatBytes(estimate.RequiredDiskSpace-estimate.AvailableDiskSpace))
|
||||
fmt.Println()
|
||||
fmt.Println(" Recommended actions:")
|
||||
@ -191,7 +191,7 @@ func runEstimateCluster(cmd *cobra.Command, args []string) error {
|
||||
if !estimate.HasSufficientSpace {
|
||||
fmt.Println()
|
||||
fmt.Println("⚠️ WARNING: Insufficient disk space!")
|
||||
fmt.Printf(" Need %s more space to proceed safely.\n",
|
||||
fmt.Printf(" Need %s more space to proceed safely.\n",
|
||||
formatBytes(estimate.RequiredDiskSpace-estimate.AvailableDiskSpace))
|
||||
fmt.Println()
|
||||
fmt.Println(" Recommended actions:")
|
||||
|
||||
182
cmd/man.go
Normal file
182
cmd/man.go
Normal file
@ -0,0 +1,182 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
var (
|
||||
manOutputDir string
|
||||
)
|
||||
|
||||
var manCmd = &cobra.Command{
|
||||
Use: "man",
|
||||
Short: "Generate man pages for dbbackup",
|
||||
Long: `Generate Unix manual (man) pages for all dbbackup commands.
|
||||
|
||||
Man pages are generated in standard groff format and can be viewed
|
||||
with the 'man' command or installed system-wide.
|
||||
|
||||
Installation:
|
||||
# Generate pages
|
||||
dbbackup man --output /tmp/man
|
||||
|
||||
# Install system-wide (requires root)
|
||||
sudo cp /tmp/man/*.1 /usr/local/share/man/man1/
|
||||
sudo mandb # Update man database
|
||||
|
||||
# View pages
|
||||
man dbbackup
|
||||
man dbbackup-backup
|
||||
man dbbackup-restore
|
||||
|
||||
Examples:
|
||||
# Generate to current directory
|
||||
dbbackup man
|
||||
|
||||
# Generate to specific directory
|
||||
dbbackup man --output ./docs/man
|
||||
|
||||
# Generate and install system-wide
|
||||
dbbackup man --output /tmp/man && \
|
||||
sudo cp /tmp/man/*.1 /usr/local/share/man/man1/ && \
|
||||
sudo mandb`,
|
||||
DisableFlagParsing: true, // Avoid shorthand conflicts during generation
|
||||
RunE: runGenerateMan,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(manCmd)
|
||||
manCmd.Flags().StringVarP(&manOutputDir, "output", "o", "./man", "Output directory for man pages")
|
||||
|
||||
// Parse flags manually since DisableFlagParsing is enabled
|
||||
manCmd.SetHelpFunc(func(cmd *cobra.Command, args []string) {
|
||||
cmd.Parent().HelpFunc()(cmd, args)
|
||||
})
|
||||
}
|
||||
|
||||
func runGenerateMan(cmd *cobra.Command, args []string) error {
|
||||
// Parse flags manually since DisableFlagParsing is enabled
|
||||
outputDir := "./man"
|
||||
for i := 0; i < len(args); i++ {
|
||||
if args[i] == "--output" || args[i] == "-o" {
|
||||
if i+1 < len(args) {
|
||||
outputDir = args[i+1]
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create output directory
|
||||
if err := os.MkdirAll(outputDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
// Generate man pages for root and all subcommands
|
||||
header := &doc.GenManHeader{
|
||||
Title: "DBBACKUP",
|
||||
Section: "1",
|
||||
Source: "dbbackup",
|
||||
Manual: "Database Backup Tool",
|
||||
}
|
||||
|
||||
// Due to shorthand flag conflicts in some subcommands (-d for db-type vs database),
|
||||
// we generate man pages command-by-command, catching any errors
|
||||
root := cmd.Root()
|
||||
generatedCount := 0
|
||||
failedCount := 0
|
||||
|
||||
// Helper to generate man page for a single command
|
||||
genManForCommand := func(c *cobra.Command) {
|
||||
// Recover from panic due to flag conflicts
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
failedCount++
|
||||
// Silently skip commands with flag conflicts
|
||||
}
|
||||
}()
|
||||
|
||||
filename := filepath.Join(outputDir, c.CommandPath()+".1")
|
||||
// Replace spaces with hyphens for filename
|
||||
filename = filepath.Join(outputDir, filepath.Base(c.CommandPath())+".1")
|
||||
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
failedCount++
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := doc.GenMan(c, header, f); err != nil {
|
||||
failedCount++
|
||||
os.Remove(filename) // Clean up partial file
|
||||
} else {
|
||||
generatedCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Generate for root command
|
||||
genManForCommand(root)
|
||||
|
||||
// Walk through all commands
|
||||
var walkCommands func(*cobra.Command)
|
||||
walkCommands = func(c *cobra.Command) {
|
||||
for _, sub := range c.Commands() {
|
||||
// Skip hidden commands
|
||||
if sub.Hidden {
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to generate man page
|
||||
genManForCommand(sub)
|
||||
|
||||
// Recurse into subcommands
|
||||
walkCommands(sub)
|
||||
}
|
||||
}
|
||||
|
||||
walkCommands(root)
|
||||
|
||||
fmt.Printf("✅ Generated %d man pages in %s", generatedCount, outputDir)
|
||||
if failedCount > 0 {
|
||||
fmt.Printf(" (%d skipped due to flag conflicts)\n", failedCount)
|
||||
} else {
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("📖 Installation Instructions:")
|
||||
fmt.Println()
|
||||
fmt.Println(" 1. Install system-wide (requires root):")
|
||||
fmt.Printf(" sudo cp %s/*.1 /usr/local/share/man/man1/\n", outputDir)
|
||||
fmt.Println(" sudo mandb")
|
||||
fmt.Println()
|
||||
fmt.Println(" 2. Test locally (no installation):")
|
||||
fmt.Printf(" man -l %s/dbbackup.1\n", outputDir)
|
||||
fmt.Println()
|
||||
fmt.Println(" 3. View installed pages:")
|
||||
fmt.Println(" man dbbackup")
|
||||
fmt.Println(" man dbbackup-backup")
|
||||
fmt.Println(" man dbbackup-restore")
|
||||
fmt.Println()
|
||||
|
||||
// Show some example pages
|
||||
files, err := filepath.Glob(filepath.Join(outputDir, "*.1"))
|
||||
if err == nil && len(files) > 0 {
|
||||
fmt.Println("📋 Generated Pages (sample):")
|
||||
for i, file := range files {
|
||||
if i >= 5 {
|
||||
fmt.Printf(" ... and %d more\n", len(files)-5)
|
||||
break
|
||||
}
|
||||
fmt.Printf(" - %s\n", filepath.Base(file))
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
168
cmd/version.go
Normal file
168
cmd/version.go
Normal file
@ -0,0 +1,168 @@
|
||||
// Package cmd - version command showing detailed build and system info
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var versionOutputFormat string
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Show detailed version and system information",
|
||||
Long: `Display comprehensive version information including:
|
||||
|
||||
- dbbackup version, build time, and git commit
|
||||
- Go runtime version
|
||||
- Operating system and architecture
|
||||
- Installed database tool versions (pg_dump, mysqldump, etc.)
|
||||
- System information
|
||||
|
||||
Useful for troubleshooting and bug reports.
|
||||
|
||||
Examples:
|
||||
# Show version info
|
||||
dbbackup version
|
||||
|
||||
# JSON output for scripts
|
||||
dbbackup version --format json
|
||||
|
||||
# Short version only
|
||||
dbbackup version --format short`,
|
||||
Run: runVersionCmd,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(versionCmd)
|
||||
versionCmd.Flags().StringVar(&versionOutputFormat, "format", "table", "Output format (table, json, short)")
|
||||
}
|
||||
|
||||
type versionInfo struct {
|
||||
Version string `json:"version"`
|
||||
BuildTime string `json:"build_time"`
|
||||
GitCommit string `json:"git_commit"`
|
||||
GoVersion string `json:"go_version"`
|
||||
OS string `json:"os"`
|
||||
Arch string `json:"arch"`
|
||||
NumCPU int `json:"num_cpu"`
|
||||
DatabaseTools map[string]string `json:"database_tools"`
|
||||
}
|
||||
|
||||
func runVersionCmd(cmd *cobra.Command, args []string) {
|
||||
info := collectVersionInfo()
|
||||
|
||||
switch versionOutputFormat {
|
||||
case "json":
|
||||
outputVersionJSON(info)
|
||||
case "short":
|
||||
fmt.Printf("dbbackup %s\n", info.Version)
|
||||
default:
|
||||
outputTable(info)
|
||||
}
|
||||
}
|
||||
|
||||
func collectVersionInfo() versionInfo {
|
||||
info := versionInfo{
|
||||
Version: cfg.Version,
|
||||
BuildTime: cfg.BuildTime,
|
||||
GitCommit: cfg.GitCommit,
|
||||
GoVersion: runtime.Version(),
|
||||
OS: runtime.GOOS,
|
||||
Arch: runtime.GOARCH,
|
||||
NumCPU: runtime.NumCPU(),
|
||||
DatabaseTools: make(map[string]string),
|
||||
}
|
||||
|
||||
// Check database tools
|
||||
tools := []struct {
|
||||
name string
|
||||
command string
|
||||
args []string
|
||||
}{
|
||||
{"pg_dump", "pg_dump", []string{"--version"}},
|
||||
{"pg_restore", "pg_restore", []string{"--version"}},
|
||||
{"psql", "psql", []string{"--version"}},
|
||||
{"mysqldump", "mysqldump", []string{"--version"}},
|
||||
{"mysql", "mysql", []string{"--version"}},
|
||||
{"mariadb-dump", "mariadb-dump", []string{"--version"}},
|
||||
}
|
||||
|
||||
for _, tool := range tools {
|
||||
version := getToolVersion(tool.command, tool.args)
|
||||
if version != "" {
|
||||
info.DatabaseTools[tool.name] = version
|
||||
}
|
||||
}
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
func getToolVersion(command string, args []string) string {
|
||||
cmd := exec.Command(command, args...)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Parse first line and extract version
|
||||
line := strings.Split(string(output), "\n")[0]
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
// Try to extract just the version number
|
||||
// e.g., "pg_dump (PostgreSQL) 16.1" -> "16.1"
|
||||
// e.g., "mysqldump Ver 8.0.35" -> "8.0.35"
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) > 0 {
|
||||
// Return last part which is usually the version
|
||||
return parts[len(parts)-1]
|
||||
}
|
||||
|
||||
return line
|
||||
}
|
||||
|
||||
func outputVersionJSON(info versionInfo) {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
enc.Encode(info)
|
||||
}
|
||||
|
||||
func outputTable(info versionInfo) {
|
||||
fmt.Println()
|
||||
fmt.Println("╔═══════════════════════════════════════════════════════════════╗")
|
||||
fmt.Println("║ dbbackup Version Info ║")
|
||||
fmt.Println("╠═══════════════════════════════════════════════════════════════╣")
|
||||
fmt.Printf("║ %-20s %-40s ║\n", "Version:", info.Version)
|
||||
fmt.Printf("║ %-20s %-40s ║\n", "Build Time:", info.BuildTime)
|
||||
|
||||
// Truncate commit if too long
|
||||
commit := info.GitCommit
|
||||
if len(commit) > 40 {
|
||||
commit = commit[:40]
|
||||
}
|
||||
fmt.Printf("║ %-20s %-40s ║\n", "Git Commit:", commit)
|
||||
fmt.Println("╠═══════════════════════════════════════════════════════════════╣")
|
||||
fmt.Printf("║ %-20s %-40s ║\n", "Go Version:", info.GoVersion)
|
||||
fmt.Printf("║ %-20s %-40s ║\n", "OS/Arch:", fmt.Sprintf("%s/%s", info.OS, info.Arch))
|
||||
fmt.Printf("║ %-20s %-40d ║\n", "CPU Cores:", info.NumCPU)
|
||||
fmt.Println("╠═══════════════════════════════════════════════════════════════╣")
|
||||
fmt.Println("║ Database Tools ║")
|
||||
fmt.Println("╟───────────────────────────────────────────────────────────────╢")
|
||||
|
||||
if len(info.DatabaseTools) == 0 {
|
||||
fmt.Println("║ (none detected) ║")
|
||||
} else {
|
||||
for tool, version := range info.DatabaseTools {
|
||||
fmt.Printf("║ %-18s %-41s ║\n", tool+":", version)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("╚═══════════════════════════════════════════════════════════════╝")
|
||||
fmt.Println()
|
||||
}
|
||||
5
go.mod
5
go.mod
@ -23,6 +23,7 @@ require (
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/jackc/pgx/v5 v5.7.6
|
||||
github.com/klauspost/pgzip v1.2.6
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/schollz/progressbar/v3 v3.19.0
|
||||
github.com/shirou/gopsutil/v3 v3.24.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
@ -69,6 +70,7 @@ require (
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
@ -90,7 +92,6 @@ require (
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
||||
@ -102,6 +103,7 @@ require (
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
@ -130,6 +132,7 @@ require (
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect
|
||||
google.golang.org/grpc v1.76.0 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
modernc.org/libc v1.67.6 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
|
||||
10
go.sum
10
go.sum
@ -106,6 +106,7 @@ github.com/chengxilo/virtualterm v1.0.4 h1:Z6IpERbRVlfB8WkOmtbHiDbBANU7cimRIof7m
|
||||
github.com/chengxilo/virtualterm v1.0.4/go.mod h1:DyxxBZz/x1iqJjFxTFcr6/x+jSpqN0iwWCOK1q10rlY=
|
||||
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls=
|
||||
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@ -177,6 +178,10 @@ github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+
|
||||
github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||
@ -216,6 +221,9 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qq
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/schollz/progressbar/v3 v3.19.0 h1:Ea18xuIRQXLAUidVDox3AbwfUhD0/1IvohyTutOIFoc=
|
||||
github.com/schollz/progressbar/v3 v3.19.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec=
|
||||
@ -312,6 +320,8 @@ google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94U
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@ -15,68 +15,68 @@ import (
|
||||
|
||||
// SizeEstimate contains backup size estimation results
|
||||
type SizeEstimate struct {
|
||||
DatabaseName string `json:"database_name"`
|
||||
EstimatedRawSize int64 `json:"estimated_raw_size_bytes"`
|
||||
EstimatedCompressed int64 `json:"estimated_compressed_bytes"`
|
||||
CompressionRatio float64 `json:"compression_ratio"`
|
||||
TableCount int `json:"table_count"`
|
||||
LargestTable string `json:"largest_table,omitempty"`
|
||||
LargestTableSize int64 `json:"largest_table_size_bytes,omitempty"`
|
||||
EstimatedDuration time.Duration `json:"estimated_duration"`
|
||||
RecommendedProfile string `json:"recommended_profile"`
|
||||
RequiredDiskSpace int64 `json:"required_disk_space_bytes"`
|
||||
AvailableDiskSpace int64 `json:"available_disk_space_bytes"`
|
||||
HasSufficientSpace bool `json:"has_sufficient_space"`
|
||||
EstimationTime time.Duration `json:"estimation_time"`
|
||||
DatabaseName string `json:"database_name"`
|
||||
EstimatedRawSize int64 `json:"estimated_raw_size_bytes"`
|
||||
EstimatedCompressed int64 `json:"estimated_compressed_bytes"`
|
||||
CompressionRatio float64 `json:"compression_ratio"`
|
||||
TableCount int `json:"table_count"`
|
||||
LargestTable string `json:"largest_table,omitempty"`
|
||||
LargestTableSize int64 `json:"largest_table_size_bytes,omitempty"`
|
||||
EstimatedDuration time.Duration `json:"estimated_duration"`
|
||||
RecommendedProfile string `json:"recommended_profile"`
|
||||
RequiredDiskSpace int64 `json:"required_disk_space_bytes"`
|
||||
AvailableDiskSpace int64 `json:"available_disk_space_bytes"`
|
||||
HasSufficientSpace bool `json:"has_sufficient_space"`
|
||||
EstimationTime time.Duration `json:"estimation_time"`
|
||||
}
|
||||
|
||||
// ClusterSizeEstimate contains cluster-wide size estimation
|
||||
type ClusterSizeEstimate struct {
|
||||
TotalDatabases int `json:"total_databases"`
|
||||
TotalRawSize int64 `json:"total_raw_size_bytes"`
|
||||
TotalCompressed int64 `json:"total_compressed_bytes"`
|
||||
LargestDatabase string `json:"largest_database,omitempty"`
|
||||
LargestDatabaseSize int64 `json:"largest_database_size_bytes,omitempty"`
|
||||
EstimatedDuration time.Duration `json:"estimated_duration"`
|
||||
RequiredDiskSpace int64 `json:"required_disk_space_bytes"`
|
||||
AvailableDiskSpace int64 `json:"available_disk_space_bytes"`
|
||||
HasSufficientSpace bool `json:"has_sufficient_space"`
|
||||
DatabaseEstimates map[string]*SizeEstimate `json:"database_estimates,omitempty"`
|
||||
EstimationTime time.Duration `json:"estimation_time"`
|
||||
TotalDatabases int `json:"total_databases"`
|
||||
TotalRawSize int64 `json:"total_raw_size_bytes"`
|
||||
TotalCompressed int64 `json:"total_compressed_bytes"`
|
||||
LargestDatabase string `json:"largest_database,omitempty"`
|
||||
LargestDatabaseSize int64 `json:"largest_database_size_bytes,omitempty"`
|
||||
EstimatedDuration time.Duration `json:"estimated_duration"`
|
||||
RequiredDiskSpace int64 `json:"required_disk_space_bytes"`
|
||||
AvailableDiskSpace int64 `json:"available_disk_space_bytes"`
|
||||
HasSufficientSpace bool `json:"has_sufficient_space"`
|
||||
DatabaseEstimates map[string]*SizeEstimate `json:"database_estimates,omitempty"`
|
||||
EstimationTime time.Duration `json:"estimation_time"`
|
||||
}
|
||||
|
||||
// EstimateBackupSize estimates the size of a single database backup
|
||||
func EstimateBackupSize(ctx context.Context, cfg *config.Config, log logger.Logger, databaseName string) (*SizeEstimate, error) {
|
||||
startTime := time.Now()
|
||||
|
||||
|
||||
estimate := &SizeEstimate{
|
||||
DatabaseName: databaseName,
|
||||
}
|
||||
|
||||
|
||||
// Create database connection
|
||||
db, err := database.New(cfg, log)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create database instance: %w", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
|
||||
if err := db.Connect(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to database: %w", err)
|
||||
}
|
||||
|
||||
|
||||
// Get database size based on engine type
|
||||
rawSize, err := db.GetDatabaseSize(ctx, databaseName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get database size: %w", err)
|
||||
}
|
||||
estimate.EstimatedRawSize = rawSize
|
||||
|
||||
|
||||
// Get table statistics
|
||||
tables, err := db.ListTables(ctx, databaseName)
|
||||
if err == nil {
|
||||
estimate.TableCount = len(tables)
|
||||
}
|
||||
|
||||
|
||||
// For PostgreSQL and MySQL, get additional detailed statistics
|
||||
if cfg.IsPostgreSQL() {
|
||||
pg := db.(*database.PostgreSQL)
|
||||
@ -89,24 +89,24 @@ func EstimateBackupSize(ctx context.Context, cfg *config.Config, log logger.Logg
|
||||
log.Debug("Could not get detailed MySQL stats: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Calculate compression ratio (typical: 70-80% for databases)
|
||||
estimate.CompressionRatio = 0.25 // Assume 75% compression (1/4 of original size)
|
||||
if cfg.CompressionLevel >= 6 {
|
||||
estimate.CompressionRatio = 0.20 // Better compression with higher levels
|
||||
}
|
||||
estimate.EstimatedCompressed = int64(float64(estimate.EstimatedRawSize) * estimate.CompressionRatio)
|
||||
|
||||
|
||||
// Estimate duration (rough: 50 MB/s for pg_dump, 100 MB/s for mysqldump)
|
||||
throughputMBps := 50.0
|
||||
if cfg.IsMySQL() {
|
||||
throughputMBps = 100.0
|
||||
}
|
||||
|
||||
|
||||
sizeGB := float64(estimate.EstimatedRawSize) / (1024 * 1024 * 1024)
|
||||
durationMinutes := (sizeGB * 1024) / throughputMBps / 60
|
||||
estimate.EstimatedDuration = time.Duration(durationMinutes * float64(time.Minute))
|
||||
|
||||
|
||||
// Recommend profile based on size
|
||||
if sizeGB < 1 {
|
||||
estimate.RecommendedProfile = "balanced"
|
||||
@ -117,10 +117,10 @@ func EstimateBackupSize(ctx context.Context, cfg *config.Config, log logger.Logg
|
||||
} else {
|
||||
estimate.RecommendedProfile = "conservative" // Large DB, be careful
|
||||
}
|
||||
|
||||
|
||||
// Calculate required disk space (3x compressed size for safety: temp + compressed + checksum)
|
||||
estimate.RequiredDiskSpace = estimate.EstimatedCompressed * 3
|
||||
|
||||
|
||||
// Check available disk space
|
||||
if cfg.BackupDir != "" {
|
||||
if usage, err := disk.Usage(cfg.BackupDir); err == nil {
|
||||
@ -128,7 +128,7 @@ func EstimateBackupSize(ctx context.Context, cfg *config.Config, log logger.Logg
|
||||
estimate.HasSufficientSpace = estimate.AvailableDiskSpace > estimate.RequiredDiskSpace
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
estimate.EstimationTime = time.Since(startTime)
|
||||
return estimate, nil
|
||||
}
|
||||
@ -136,30 +136,30 @@ func EstimateBackupSize(ctx context.Context, cfg *config.Config, log logger.Logg
|
||||
// EstimateClusterBackupSize estimates the size of a full cluster backup
|
||||
func EstimateClusterBackupSize(ctx context.Context, cfg *config.Config, log logger.Logger) (*ClusterSizeEstimate, error) {
|
||||
startTime := time.Now()
|
||||
|
||||
|
||||
estimate := &ClusterSizeEstimate{
|
||||
DatabaseEstimates: make(map[string]*SizeEstimate),
|
||||
}
|
||||
|
||||
|
||||
// Create database connection
|
||||
db, err := database.New(cfg, log)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create database instance: %w", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
|
||||
if err := db.Connect(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to database: %w", err)
|
||||
}
|
||||
|
||||
|
||||
// List all databases
|
||||
databases, err := db.ListDatabases(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list databases: %w", err)
|
||||
}
|
||||
|
||||
|
||||
estimate.TotalDatabases = len(databases)
|
||||
|
||||
|
||||
// Estimate each database
|
||||
for _, dbName := range databases {
|
||||
dbEstimate, err := EstimateBackupSize(ctx, cfg, log, dbName)
|
||||
@ -167,36 +167,36 @@ func EstimateClusterBackupSize(ctx context.Context, cfg *config.Config, log logg
|
||||
log.Warn("Failed to estimate database size", "database", dbName, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
estimate.DatabaseEstimates[dbName] = dbEstimate
|
||||
estimate.TotalRawSize += dbEstimate.EstimatedRawSize
|
||||
estimate.TotalCompressed += dbEstimate.EstimatedCompressed
|
||||
|
||||
|
||||
// Track largest database
|
||||
if dbEstimate.EstimatedRawSize > estimate.LargestDatabaseSize {
|
||||
estimate.LargestDatabase = dbName
|
||||
estimate.LargestDatabaseSize = dbEstimate.EstimatedRawSize
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Estimate total duration (assume some parallelism)
|
||||
parallelism := float64(cfg.Jobs)
|
||||
if parallelism < 1 {
|
||||
parallelism = 1
|
||||
}
|
||||
|
||||
|
||||
// Calculate serial duration first
|
||||
var serialDuration time.Duration
|
||||
for _, dbEst := range estimate.DatabaseEstimates {
|
||||
serialDuration += dbEst.EstimatedDuration
|
||||
}
|
||||
|
||||
|
||||
// Adjust for parallelism (not perfect but reasonable)
|
||||
estimate.EstimatedDuration = time.Duration(float64(serialDuration) / parallelism)
|
||||
|
||||
|
||||
// Calculate required disk space
|
||||
estimate.RequiredDiskSpace = estimate.TotalCompressed * 3
|
||||
|
||||
|
||||
// Check available disk space
|
||||
if cfg.BackupDir != "" {
|
||||
if usage, err := disk.Usage(cfg.BackupDir); err == nil {
|
||||
@ -204,7 +204,7 @@ func EstimateClusterBackupSize(ctx context.Context, cfg *config.Config, log logg
|
||||
estimate.HasSufficientSpace = estimate.AvailableDiskSpace > estimate.RequiredDiskSpace
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
estimate.EstimationTime = time.Since(startTime)
|
||||
return estimate, nil
|
||||
}
|
||||
@ -212,7 +212,7 @@ func EstimateClusterBackupSize(ctx context.Context, cfg *config.Config, log logg
|
||||
// estimatePostgresSize gets detailed statistics from PostgreSQL
|
||||
func estimatePostgresSize(ctx context.Context, conn *sql.DB, databaseName string, estimate *SizeEstimate) error {
|
||||
// Note: EstimatedRawSize and TableCount are already set by interface methods
|
||||
|
||||
|
||||
// Get largest table size
|
||||
largestQuery := `
|
||||
SELECT
|
||||
@ -229,14 +229,14 @@ func estimatePostgresSize(ctx context.Context, conn *sql.DB, databaseName string
|
||||
estimate.LargestTable = tableName
|
||||
estimate.LargestTableSize = tableSize
|
||||
}
|
||||
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// estimateMySQLSize gets detailed statistics from MySQL/MariaDB
|
||||
func estimateMySQLSize(ctx context.Context, conn *sql.DB, databaseName string, estimate *SizeEstimate) error {
|
||||
// Note: EstimatedRawSize and TableCount are already set by interface methods
|
||||
|
||||
|
||||
// Get largest table
|
||||
largestQuery := `
|
||||
SELECT
|
||||
@ -253,7 +253,7 @@ func estimateMySQLSize(ctx context.Context, conn *sql.DB, databaseName string, e
|
||||
estimate.LargestTable = tableName
|
||||
estimate.LargestTableSize = tableSize
|
||||
}
|
||||
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -339,8 +339,9 @@ func (p *PostgreSQL) BuildBackupCommand(database, outputFile string, options Bac
|
||||
cmd = append(cmd, "--compress="+strconv.Itoa(options.Compression))
|
||||
}
|
||||
|
||||
// Parallel jobs (only for directory format)
|
||||
if options.Parallel > 1 && options.Format == "directory" {
|
||||
// Parallel jobs (supported for directory and custom formats since PostgreSQL 9.3)
|
||||
// NOTE: plain format does NOT support --jobs (it's single-threaded by design)
|
||||
if options.Parallel > 1 && (options.Format == "directory" || options.Format == "custom") {
|
||||
cmd = append(cmd, "--jobs="+strconv.Itoa(options.Parallel))
|
||||
}
|
||||
|
||||
|
||||
@ -402,16 +402,22 @@ func (m RestorePreviewModel) View() string {
|
||||
// Estimate RTO
|
||||
profile := m.config.GetCurrentProfile()
|
||||
if profile != nil {
|
||||
extractTime := m.archive.Size / (500 * 1024 * 1024) // 500 MB/s extraction
|
||||
if extractTime < 1 {
|
||||
extractTime = 1
|
||||
// Calculate extraction time in seconds (500 MB/s decompression speed)
|
||||
extractSeconds := m.archive.Size / (500 * 1024 * 1024)
|
||||
if extractSeconds < 1 {
|
||||
extractSeconds = 1
|
||||
}
|
||||
restoreSpeed := int64(50 * 1024 * 1024 * int64(profile.Jobs)) // 50MB/s per job
|
||||
restoreTime := uncompressedEst / restoreSpeed
|
||||
if restoreTime < 1 {
|
||||
restoreTime = 1
|
||||
// Calculate restore time in seconds (50 MB/s per parallel job)
|
||||
restoreSpeed := int64(50 * 1024 * 1024 * int64(profile.Jobs))
|
||||
restoreSeconds := uncompressedEst / restoreSpeed
|
||||
if restoreSeconds < 1 {
|
||||
restoreSeconds = 1
|
||||
}
|
||||
// Convert total seconds to minutes
|
||||
totalMinutes := (extractSeconds + restoreSeconds) / 60
|
||||
if totalMinutes < 1 {
|
||||
totalMinutes = 1
|
||||
}
|
||||
totalMinutes := extractTime + restoreTime
|
||||
s.WriteString(fmt.Sprintf(" Estimated RTO: ~%dm (with %s profile)\n", totalMinutes, profile.Name))
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user