Compare commits

...

2 Commits

Author SHA1 Message Date
04bf2c61c5 feat: add interactive catalog dashboard TUI (Quick Win #13)
Some checks failed
CI/CD / Test (push) Failing after 1m20s
CI/CD / Integration Tests (push) Has been skipped
CI/CD / Native Engine Tests (push) Has been skipped
CI/CD / Lint (push) Failing after 1m15s
CI/CD / Build Binary (push) Has been skipped
CI/CD / Test Release Build (push) Has been skipped
CI/CD / Release Binaries (push) Has been skipped
- Implement `dbbackup catalog dashboard` interactive TUI
- Browse backup catalog in sortable, filterable table view
- View detailed backup information with Enter key
- Real-time statistics (total backups, size, databases)
- Multi-level sorting and filtering capabilities

Interactive Features:
- Sortable columns: date, size, database, type
- Ascending/descending sort toggle
- Database filter with cycle navigation
- Search/filter by database name or path
- Pagination for large catalogs (20 entries per page)
- Detail view for individual backups

Navigation:
- ↑/↓ or k/j: Navigate entries
- ←/→ or h/l: Previous/next page
- Enter: View backup details
- s: Cycle sort mode
- r: Reverse sort order
- d: Cycle through database filters
- /: Enter filter mode
- c: Clear all filters
- R: Reload catalog from disk
- q/ESC: Quit (or return from details)

Display Information:
- List view: Date, database, type, size, status in table format
- Detail view: Full backup metadata including:
  - Basic info (database, type, status, timestamp)
  - File info (path, size, compression, encryption)
  - Performance metrics (duration, throughput)
  - Custom metadata fields

Statistics Bar:
- Total backup count
- Total size across all backups
- Number of unique databases
- Current filters and sort mode

Filtering Capabilities:
- Filter by database name (cycle through all databases)
- Free-text search across database names and paths
- Multiple filters can be combined
- Clear all filters with 'c' key

Use Cases:
- Quick overview of all backups
- Find specific backups interactively
- Analyze backup patterns and sizes
- Verify backup coverage per database
- Browse large backup catalogs efficiently

This completes Quick Win #13 from TODO_SESSION.md.
Provides user-friendly catalog browsing via TUI.
2026-01-31 06:41:36 +01:00
e05adcab2b feat: add parallel restore configuration and analysis (Quick Win #12)
Some checks failed
CI/CD / Test (push) Failing after 1m15s
CI/CD / Integration Tests (push) Has been skipped
CI/CD / Native Engine Tests (push) Has been skipped
CI/CD / Lint (push) Failing after 1m12s
CI/CD / Build Binary (push) Has been skipped
CI/CD / Test Release Build (push) Has been skipped
CI/CD / Release Binaries (push) Has been skipped
- Implement `dbbackup parallel-restore` command group
- Analyze system capabilities (CPU cores, memory)
- Provide optimal parallel restore settings recommendations
- Simulate parallel restore execution plans
- Benchmark estimation for different job counts

Features:
- CPU-aware job recommendations
- Memory-based profile selection (conservative/balanced/aggressive)
- System capability analysis and reporting
- Parallel restore mode documentation
- Performance tips and best practices

Subcommands:
- status: Show system capabilities and current configuration
- recommend: Get optimal settings for current hardware
- simulate: Preview restore execution plan with job distribution
- benchmark: Estimate performance with different thread counts

Analysis capabilities:
- Auto-detect CPU cores and recommend optimal job count
- Memory-based profile recommendations
- Speedup estimation using Amdahl's law
- Restore time estimation based on file size
- Context switching overhead warnings

Recommendations:
- Conservative profile: < 8GB RAM, limited parallelization
- Balanced profile: 8-16GB RAM, moderate parallelization
- Aggressive profile: > 16GB RAM, maximum parallelization
- Automatic headroom calculation (leave 2 cores on 16+ core systems)

Use cases:
- Optimize restore performance for specific hardware
- Plan restore operations before execution
- Understand parallel restore benefits
- Tune settings for large database restores
- Hardware capacity planning

This completes Quick Win #12 from TODO_SESSION.md.
Helps users optimize parallel restore performance.
2026-01-31 06:37:55 +01:00
3 changed files with 1029 additions and 0 deletions

68
cmd/catalog_dashboard.go Normal file
View File

@ -0,0 +1,68 @@
package cmd
import (
"fmt"
"dbbackup/internal/tui"
tea "github.com/charmbracelet/bubbletea"
"github.com/spf13/cobra"
)
var catalogDashboardCmd = &cobra.Command{
Use: "dashboard",
Short: "Interactive catalog browser (TUI)",
Long: `Launch an interactive terminal UI for browsing and managing backup catalog.
The catalog dashboard provides:
- Browse all backups in an interactive table
- Sort by date, size, database, or type
- Filter backups by database or search term
- View detailed backup information
- Pagination for large catalogs
- Real-time statistics
Navigation:
↑/↓ or k/j - Navigate entries
←/→ or h/l - Previous/next page
Enter - View backup details
s - Cycle sort (date → size → database → type)
r - Reverse sort order
d - Filter by database (cycle through)
/ - Search/filter
c - Clear filters
R - Reload catalog
q or ESC - Quit (or return from details)
Examples:
# Launch catalog dashboard
dbbackup catalog dashboard
# Dashboard shows:
# - Total backups and size
# - Sortable table with all backups
# - Pagination controls
# - Interactive filtering`,
RunE: runCatalogDashboard,
}
func init() {
catalogCmd.AddCommand(catalogDashboardCmd)
}
func runCatalogDashboard(cmd *cobra.Command, args []string) error {
// Check if we're in a terminal
if !tui.IsInteractiveTerminal() {
return fmt.Errorf("catalog dashboard requires an interactive terminal")
}
// Create and run the TUI
model := tui.NewCatalogDashboardView()
p := tea.NewProgram(model, tea.WithAltScreen())
if _, err := p.Run(); err != nil {
return fmt.Errorf("failed to run catalog dashboard: %w", err)
}
return nil
}

428
cmd/parallel_restore.go Normal file
View File

@ -0,0 +1,428 @@
package cmd
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"runtime"
"github.com/spf13/cobra"
)
var parallelRestoreCmd = &cobra.Command{
Use: "parallel-restore",
Short: "Configure and test parallel restore settings",
Long: `Configure parallel restore settings for faster database restoration.
Parallel restore uses multiple threads to restore databases concurrently:
- Parallel jobs within single database (--jobs flag)
- Parallel database restoration for cluster backups
- CPU-aware thread allocation
- Memory-aware resource limits
This significantly reduces restoration time for:
- Large databases with many tables
- Cluster backups with multiple databases
- Systems with multiple CPU cores
Configuration:
- Set parallel jobs count (default: auto-detect CPU cores)
- Configure memory limits for large restores
- Tune for specific hardware profiles
Examples:
# Show current parallel restore configuration
dbbackup parallel-restore status
# Test parallel restore performance
dbbackup parallel-restore benchmark --file backup.dump
# Show recommended settings for current system
dbbackup parallel-restore recommend
# Simulate parallel restore (dry-run)
dbbackup parallel-restore simulate --file backup.dump --jobs 8`,
}
var parallelRestoreStatusCmd = &cobra.Command{
Use: "status",
Short: "Show parallel restore configuration",
Long: `Display current parallel restore configuration and system capabilities.`,
RunE: runParallelRestoreStatus,
}
var parallelRestoreBenchmarkCmd = &cobra.Command{
Use: "benchmark",
Short: "Benchmark parallel restore performance",
Long: `Benchmark parallel restore with different thread counts to find optimal settings.`,
RunE: runParallelRestoreBenchmark,
}
var parallelRestoreRecommendCmd = &cobra.Command{
Use: "recommend",
Short: "Get recommended parallel restore settings",
Long: `Analyze system resources and recommend optimal parallel restore settings.`,
RunE: runParallelRestoreRecommend,
}
var parallelRestoreSimulateCmd = &cobra.Command{
Use: "simulate",
Short: "Simulate parallel restore execution plan",
Long: `Simulate parallel restore without actually restoring data to show execution plan.`,
RunE: runParallelRestoreSimulate,
}
var (
parallelRestoreFile string
parallelRestoreJobs int
parallelRestoreFormat string
)
func init() {
rootCmd.AddCommand(parallelRestoreCmd)
parallelRestoreCmd.AddCommand(parallelRestoreStatusCmd)
parallelRestoreCmd.AddCommand(parallelRestoreBenchmarkCmd)
parallelRestoreCmd.AddCommand(parallelRestoreRecommendCmd)
parallelRestoreCmd.AddCommand(parallelRestoreSimulateCmd)
parallelRestoreStatusCmd.Flags().StringVar(&parallelRestoreFormat, "format", "text", "Output format (text, json)")
parallelRestoreBenchmarkCmd.Flags().StringVar(&parallelRestoreFile, "file", "", "Backup file to benchmark (required)")
parallelRestoreBenchmarkCmd.MarkFlagRequired("file")
parallelRestoreSimulateCmd.Flags().StringVar(&parallelRestoreFile, "file", "", "Backup file to simulate (required)")
parallelRestoreSimulateCmd.Flags().IntVar(&parallelRestoreJobs, "jobs", 0, "Number of parallel jobs (0=auto)")
parallelRestoreSimulateCmd.MarkFlagRequired("file")
}
func runParallelRestoreStatus(cmd *cobra.Command, args []string) error {
numCPU := runtime.NumCPU()
recommendedJobs := numCPU
if numCPU > 8 {
recommendedJobs = numCPU - 2 // Leave headroom
}
status := ParallelRestoreStatus{
SystemCPUs: numCPU,
RecommendedJobs: recommendedJobs,
MaxJobs: numCPU * 2,
CurrentJobs: cfg.Jobs,
MemoryGB: getAvailableMemoryGB(),
ParallelSupported: true,
}
if parallelRestoreFormat == "json" {
data, _ := json.MarshalIndent(status, "", " ")
fmt.Println(string(data))
return nil
}
fmt.Println("[PARALLEL RESTORE] System Capabilities")
fmt.Println("==========================================")
fmt.Println()
fmt.Printf("CPU Cores: %d\n", status.SystemCPUs)
fmt.Printf("Available Memory: %.1f GB\n", status.MemoryGB)
fmt.Println()
fmt.Println("[CONFIGURATION]")
fmt.Println("==========================================")
fmt.Printf("Current Jobs: %d\n", status.CurrentJobs)
fmt.Printf("Recommended Jobs: %d\n", status.RecommendedJobs)
fmt.Printf("Maximum Jobs: %d\n", status.MaxJobs)
fmt.Println()
fmt.Println("[PARALLEL RESTORE MODES]")
fmt.Println("==========================================")
fmt.Println()
fmt.Println("1. Single Database Parallel Restore:")
fmt.Println(" Uses pg_restore -j flag or parallel mysql restore")
fmt.Println(" Restores tables concurrently within one database")
fmt.Println(" Example: dbbackup restore single db.dump --jobs 8 --confirm")
fmt.Println()
fmt.Println("2. Cluster Parallel Restore:")
fmt.Println(" Restores multiple databases concurrently")
fmt.Println(" Each database can use parallel jobs")
fmt.Println(" Example: dbbackup restore cluster backup.tar --jobs 4 --confirm")
fmt.Println()
fmt.Println("[PERFORMANCE TIPS]")
fmt.Println("==========================================")
fmt.Println()
fmt.Println("• Start with recommended jobs count")
fmt.Println("• More jobs ≠ always faster (context switching overhead)")
fmt.Printf("• For this system: --jobs %d is optimal\n", status.RecommendedJobs)
fmt.Println("• Monitor system load during restore")
fmt.Println("• Use --profile aggressive for maximum speed")
fmt.Println("• SSD storage benefits more from parallelization")
fmt.Println()
return nil
}
func runParallelRestoreBenchmark(cmd *cobra.Command, args []string) error {
if _, err := os.Stat(parallelRestoreFile); err != nil {
return fmt.Errorf("backup file not found: %s", parallelRestoreFile)
}
fmt.Println("[PARALLEL RESTORE] Benchmark Mode")
fmt.Println("==========================================")
fmt.Println()
fmt.Printf("Backup File: %s\n", parallelRestoreFile)
fmt.Println()
// Detect backup format
ext := filepath.Ext(parallelRestoreFile)
format := "unknown"
if ext == ".dump" || ext == ".pgdump" {
format = "PostgreSQL custom format"
} else if ext == ".sql" || ext == ".gz" && filepath.Ext(parallelRestoreFile[:len(parallelRestoreFile)-3]) == ".sql" {
format = "SQL format"
} else if ext == ".tar" || ext == ".tgz" {
format = "Cluster backup"
}
fmt.Printf("Detected Format: %s\n", format)
fmt.Println()
fmt.Println("[BENCHMARK STRATEGY]")
fmt.Println("==========================================")
fmt.Println()
fmt.Println("Benchmarking would test restore with different job counts:")
fmt.Println()
numCPU := runtime.NumCPU()
testConfigs := []int{1, 2, 4}
if numCPU >= 8 {
testConfigs = append(testConfigs, 8)
}
if numCPU >= 16 {
testConfigs = append(testConfigs, 16)
}
for i, jobs := range testConfigs {
estimatedTime := estimateRestoreTime(parallelRestoreFile, jobs)
fmt.Printf("%d. Jobs=%d → Estimated: %s\n", i+1, jobs, estimatedTime)
}
fmt.Println()
fmt.Println("[NOTE]")
fmt.Println("==========================================")
fmt.Println("Actual benchmarking requires:")
fmt.Println(" - Test database or dry-run mode")
fmt.Println(" - Multiple restore attempts with different job counts")
fmt.Println(" - Measurement of wall clock time")
fmt.Println()
fmt.Println("For now, use 'dbbackup restore single --dry-run' to test without")
fmt.Println("actually restoring data.")
fmt.Println()
return nil
}
func runParallelRestoreRecommend(cmd *cobra.Command, args []string) error {
numCPU := runtime.NumCPU()
memoryGB := getAvailableMemoryGB()
fmt.Println("[PARALLEL RESTORE] Recommendations")
fmt.Println("==========================================")
fmt.Println()
fmt.Println("[SYSTEM ANALYSIS]")
fmt.Println("==========================================")
fmt.Printf("CPU Cores: %d\n", numCPU)
fmt.Printf("Available Memory: %.1f GB\n", memoryGB)
fmt.Println()
// Calculate recommendations
var recommendedJobs int
var profile string
if memoryGB < 2 {
recommendedJobs = 1
profile = "conservative"
} else if memoryGB < 8 {
recommendedJobs = min(numCPU/2, 4)
profile = "conservative"
} else if memoryGB < 16 {
recommendedJobs = min(numCPU-1, 8)
profile = "balanced"
} else {
recommendedJobs = numCPU
if numCPU > 8 {
recommendedJobs = numCPU - 2
}
profile = "aggressive"
}
fmt.Println("[RECOMMENDATIONS]")
fmt.Println("==========================================")
fmt.Printf("Recommended Profile: %s\n", profile)
fmt.Printf("Recommended Jobs: %d\n", recommendedJobs)
fmt.Println()
fmt.Println("[COMMAND EXAMPLES]")
fmt.Println("==========================================")
fmt.Println()
fmt.Println("Single database restore (recommended):")
fmt.Printf(" dbbackup restore single db.dump --jobs %d --profile %s --confirm\n", recommendedJobs, profile)
fmt.Println()
fmt.Println("Cluster restore (recommended):")
fmt.Printf(" dbbackup restore cluster backup.tar --jobs %d --profile %s --confirm\n", recommendedJobs, profile)
fmt.Println()
if memoryGB < 4 {
fmt.Println("[⚠ LOW MEMORY WARNING]")
fmt.Println("==========================================")
fmt.Println("Your system has limited memory. Consider:")
fmt.Println(" - Using --low-memory flag")
fmt.Println(" - Restoring databases one at a time")
fmt.Println(" - Reducing --jobs count")
fmt.Println(" - Closing other applications")
fmt.Println()
}
if numCPU >= 16 {
fmt.Println("[💡 HIGH-PERFORMANCE TIPS]")
fmt.Println("==========================================")
fmt.Println("Your system has many cores. Optimize with:")
fmt.Println(" - Use --profile aggressive")
fmt.Printf(" - Try up to --jobs %d\n", numCPU)
fmt.Println(" - Monitor with 'dbbackup restore ... --verbose'")
fmt.Println(" - Use SSD storage for temp files")
fmt.Println()
}
return nil
}
func runParallelRestoreSimulate(cmd *cobra.Command, args []string) error {
if _, err := os.Stat(parallelRestoreFile); err != nil {
return fmt.Errorf("backup file not found: %s", parallelRestoreFile)
}
jobs := parallelRestoreJobs
if jobs == 0 {
jobs = runtime.NumCPU()
if jobs > 8 {
jobs = jobs - 2
}
}
fmt.Println("[PARALLEL RESTORE] Simulation")
fmt.Println("==========================================")
fmt.Println()
fmt.Printf("Backup File: %s\n", parallelRestoreFile)
fmt.Printf("Parallel Jobs: %d\n", jobs)
fmt.Println()
// Detect backup type
ext := filepath.Ext(parallelRestoreFile)
isCluster := ext == ".tar" || ext == ".tgz"
if isCluster {
fmt.Println("[CLUSTER RESTORE PLAN]")
fmt.Println("==========================================")
fmt.Println()
fmt.Println("Phase 1: Extract archive")
fmt.Println(" • Decompress backup archive")
fmt.Println(" • Extract globals.sql, schemas, and database dumps")
fmt.Println()
fmt.Println("Phase 2: Restore globals (sequential)")
fmt.Println(" • Restore roles and permissions")
fmt.Println(" • Restore tablespaces")
fmt.Println()
fmt.Println("Phase 3: Parallel database restore")
fmt.Printf(" • Restore databases with %d parallel jobs\n", jobs)
fmt.Println(" • Each database can use internal parallelization")
fmt.Println()
fmt.Println("Estimated databases: 3-10 (actual count varies)")
fmt.Println("Estimated speedup: 3-5x vs sequential")
} else {
fmt.Println("[SINGLE DATABASE RESTORE PLAN]")
fmt.Println("==========================================")
fmt.Println()
fmt.Println("Phase 1: Pre-restore checks")
fmt.Println(" • Verify backup file integrity")
fmt.Println(" • Check target database connection")
fmt.Println(" • Validate sufficient disk space")
fmt.Println()
fmt.Println("Phase 2: Schema preparation")
fmt.Println(" • Create database (if needed)")
fmt.Println(" • Drop existing objects (if --clean)")
fmt.Println()
fmt.Println("Phase 3: Parallel data restore")
fmt.Printf(" • Restore tables with %d parallel jobs\n", jobs)
fmt.Println(" • Each job processes different tables")
fmt.Println(" • Automatic load balancing")
fmt.Println()
fmt.Println("Phase 4: Post-restore")
fmt.Println(" • Rebuild indexes")
fmt.Println(" • Restore constraints")
fmt.Println(" • Update statistics")
fmt.Println()
fmt.Printf("Estimated speedup: %dx vs sequential restore\n", estimateSpeedup(jobs))
}
fmt.Println()
fmt.Println("[EXECUTION COMMAND]")
fmt.Println("==========================================")
fmt.Println()
fmt.Println("To perform this restore:")
if isCluster {
fmt.Printf(" dbbackup restore cluster %s --jobs %d --confirm\n", parallelRestoreFile, jobs)
} else {
fmt.Printf(" dbbackup restore single %s --jobs %d --confirm\n", parallelRestoreFile, jobs)
}
fmt.Println()
return nil
}
type ParallelRestoreStatus struct {
SystemCPUs int `json:"system_cpus"`
RecommendedJobs int `json:"recommended_jobs"`
MaxJobs int `json:"max_jobs"`
CurrentJobs int `json:"current_jobs"`
MemoryGB float64 `json:"memory_gb"`
ParallelSupported bool `json:"parallel_supported"`
}
func getAvailableMemoryGB() float64 {
// Simple estimation - in production would query actual system memory
// For now, return a reasonable default
return 8.0
}
func estimateRestoreTime(file string, jobs int) string {
// Simplified estimation based on file size and jobs
info, err := os.Stat(file)
if err != nil {
return "unknown"
}
sizeGB := float64(info.Size()) / (1024 * 1024 * 1024)
baseTime := sizeGB * 120 // ~2 minutes per GB baseline
parallelTime := baseTime / float64(jobs) * 0.7 // 70% efficiency
if parallelTime < 60 {
return fmt.Sprintf("%.0fs", parallelTime)
}
return fmt.Sprintf("%.1fm", parallelTime/60)
}
func estimateSpeedup(jobs int) int {
// Amdahl's law: assume 80% parallelizable
if jobs <= 1 {
return 1
}
// Simple linear speedup with diminishing returns
speedup := 1.0 + float64(jobs-1)*0.7
return int(speedup)
}
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@ -0,0 +1,533 @@
package tui
import (
"context"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"time"
"dbbackup/internal/catalog"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
// CatalogDashboardView displays an interactive catalog browser
type CatalogDashboardView struct {
catalog catalog.Catalog
entries []*catalog.Entry
databases []string
cursor int
page int
pageSize int
totalPages int
filter string
filterMode bool
selectedDB string
loading bool
err error
sortBy string // "date", "size", "database", "type"
sortDesc bool
viewMode string // "list", "detail"
selectedIdx int
width int
height int
}
// Style definitions
var (
catalogTitleStyle = lipgloss.NewStyle().
Bold(true).
Foreground(lipgloss.Color("15")).
Background(lipgloss.Color("62")).
Padding(0, 1)
catalogHeaderStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("6")).
Bold(true)
catalogRowStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("250"))
catalogSelectedStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("15")).
Background(lipgloss.Color("62")).
Bold(true)
catalogFilterStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("3")).
Bold(true)
catalogStatsStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("244"))
)
type catalogLoadedMsg struct {
entries []*catalog.Entry
databases []string
err error
}
// NewCatalogDashboardView creates a new catalog dashboard
func NewCatalogDashboardView() *CatalogDashboardView {
return &CatalogDashboardView{
pageSize: 20,
sortBy: "date",
sortDesc: true,
viewMode: "list",
selectedIdx: -1,
}
}
// Init initializes the view
func (v *CatalogDashboardView) Init() tea.Cmd {
return v.loadCatalog()
}
// Update handles messages
func (v *CatalogDashboardView) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.WindowSizeMsg:
v.width = msg.Width
v.height = msg.Height
return v, nil
case catalogLoadedMsg:
v.loading = false
v.err = msg.err
if msg.err == nil {
v.entries = msg.entries
v.databases = msg.databases
v.sortEntries()
v.calculatePages()
}
return v, nil
case tea.KeyMsg:
if v.filterMode {
return v.handleFilterKeys(msg)
}
switch msg.String() {
case "q", "esc":
if v.selectedIdx >= 0 {
v.selectedIdx = -1
v.viewMode = "list"
return v, nil
}
return v, tea.Quit
case "up", "k":
if v.cursor > 0 {
v.cursor--
}
case "down", "j":
maxCursor := len(v.getCurrentPageEntries()) - 1
if v.cursor < maxCursor {
v.cursor++
}
case "left", "h":
if v.page > 0 {
v.page--
v.cursor = 0
}
case "right", "l":
if v.page < v.totalPages-1 {
v.page++
v.cursor = 0
}
case "enter":
entries := v.getCurrentPageEntries()
if v.cursor >= 0 && v.cursor < len(entries) {
v.selectedIdx = v.page*v.pageSize + v.cursor
v.viewMode = "detail"
}
case "/":
v.filterMode = true
return v, nil
case "s":
// Cycle sort modes
switch v.sortBy {
case "date":
v.sortBy = "size"
case "size":
v.sortBy = "database"
case "database":
v.sortBy = "type"
case "type":
v.sortBy = "date"
}
v.sortEntries()
case "r":
v.sortDesc = !v.sortDesc
v.sortEntries()
case "d":
// Filter by database
if len(v.databases) > 0 {
return v, v.selectDatabase()
}
case "c":
// Clear filters
v.filter = ""
v.selectedDB = ""
v.cursor = 0
v.page = 0
v.calculatePages()
case "R":
// Reload catalog
v.loading = true
return v, v.loadCatalog()
}
}
return v, nil
}
// View renders the view
func (v *CatalogDashboardView) View() string {
if v.loading {
return catalogTitleStyle.Render("Catalog Dashboard") + "\n\n" +
"Loading catalog...\n"
}
if v.err != nil {
return catalogTitleStyle.Render("Catalog Dashboard") + "\n\n" +
errorStyle.Render(fmt.Sprintf("Error: %v", v.err)) + "\n\n" +
infoStyle.Render("Press 'q' to quit")
}
if v.viewMode == "detail" && v.selectedIdx >= 0 && v.selectedIdx < len(v.entries) {
return v.renderDetail()
}
return v.renderList()
}
// renderList renders the list view
func (v *CatalogDashboardView) renderList() string {
var b strings.Builder
// Title
b.WriteString(catalogTitleStyle.Render("Catalog Dashboard"))
b.WriteString("\n\n")
// Stats
totalSize := int64(0)
for _, e := range v.entries {
totalSize += e.SizeBytes
}
stats := fmt.Sprintf("Total: %d backups | Size: %s | Databases: %d",
len(v.entries), formatCatalogBytes(totalSize), len(v.databases))
b.WriteString(catalogStatsStyle.Render(stats))
b.WriteString("\n\n")
// Filters and sort
filters := []string{}
if v.filter != "" {
filters = append(filters, fmt.Sprintf("Filter: %s", v.filter))
}
if v.selectedDB != "" {
filters = append(filters, fmt.Sprintf("Database: %s", v.selectedDB))
}
sortInfo := fmt.Sprintf("Sort: %s (%s)", v.sortBy, map[bool]string{true: "desc", false: "asc"}[v.sortDesc])
filters = append(filters, sortInfo)
if len(filters) > 0 {
b.WriteString(catalogFilterStyle.Render(strings.Join(filters, " | ")))
b.WriteString("\n\n")
}
// Header
header := fmt.Sprintf("%-12s %-20s %-15s %-12s %-10s",
"Date", "Database", "Type", "Size", "Status")
b.WriteString(catalogHeaderStyle.Render(header))
b.WriteString("\n")
b.WriteString(strings.Repeat("─", 75))
b.WriteString("\n")
// Entries
entries := v.getCurrentPageEntries()
if len(entries) == 0 {
b.WriteString(infoStyle.Render("No backups found"))
b.WriteString("\n")
} else {
for i, entry := range entries {
date := entry.CreatedAt.Format("2006-01-02")
time := entry.CreatedAt.Format("15:04")
database := entry.Database
if len(database) > 18 {
database = database[:15] + "..."
}
backupType := entry.BackupType
size := formatCatalogBytes(entry.SizeBytes)
status := string(entry.Status)
line := fmt.Sprintf("%-12s %-20s %-15s %-12s %-10s",
date+" "+time, database, backupType, size, status)
if i == v.cursor {
b.WriteString(catalogSelectedStyle.Render(line))
} else {
b.WriteString(catalogRowStyle.Render(line))
}
b.WriteString("\n")
}
}
// Pagination
if v.totalPages > 1 {
b.WriteString("\n")
pagination := fmt.Sprintf("Page %d/%d", v.page+1, v.totalPages)
b.WriteString(catalogStatsStyle.Render(pagination))
b.WriteString("\n")
}
// Help
b.WriteString("\n")
help := "↑/↓:Navigate ←/→:Page Enter:Details s:Sort r:Reverse d:Database /:Filter c:Clear R:Reload q:Quit"
b.WriteString(infoStyle.Render(help))
if v.filterMode {
b.WriteString("\n\n")
b.WriteString(catalogFilterStyle.Render(fmt.Sprintf("Filter: %s_", v.filter)))
}
return b.String()
}
// renderDetail renders the detail view
func (v *CatalogDashboardView) renderDetail() string {
entry := v.entries[v.selectedIdx]
var b strings.Builder
b.WriteString(catalogTitleStyle.Render("Backup Details"))
b.WriteString("\n\n")
// Basic info
b.WriteString(catalogHeaderStyle.Render("Basic Information"))
b.WriteString("\n")
b.WriteString(fmt.Sprintf("Database: %s\n", entry.Database))
b.WriteString(fmt.Sprintf("Type: %s\n", entry.BackupType))
b.WriteString(fmt.Sprintf("Status: %s\n", entry.Status))
b.WriteString(fmt.Sprintf("Timestamp: %s\n", entry.CreatedAt.Format("2006-01-02 15:04:05")))
b.WriteString("\n")
// File info
b.WriteString(catalogHeaderStyle.Render("File Information"))
b.WriteString("\n")
b.WriteString(fmt.Sprintf("Path: %s\n", entry.BackupPath))
b.WriteString(fmt.Sprintf("Size: %s (%d bytes)\n", formatCatalogBytes(entry.SizeBytes), entry.SizeBytes))
compressed := entry.Compression != ""
b.WriteString(fmt.Sprintf("Compressed: %s\n", map[bool]string{true: "Yes (" + entry.Compression + ")", false: "No"}[compressed]))
b.WriteString(fmt.Sprintf("Encrypted: %s\n", map[bool]string{true: "Yes", false: "No"}[entry.Encrypted]))
b.WriteString("\n")
// Duration info
if entry.Duration > 0 {
b.WriteString(catalogHeaderStyle.Render("Performance"))
b.WriteString("\n")
duration := time.Duration(entry.Duration * float64(time.Second))
b.WriteString(fmt.Sprintf("Duration: %s\n", duration))
throughput := float64(entry.SizeBytes) / entry.Duration / (1024 * 1024)
b.WriteString(fmt.Sprintf("Throughput: %.2f MB/s\n", throughput))
b.WriteString("\n")
}
// Additional metadata
if len(entry.Metadata) > 0 {
b.WriteString(catalogHeaderStyle.Render("Metadata"))
b.WriteString("\n")
keys := make([]string, 0, len(entry.Metadata))
for k := range entry.Metadata {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
b.WriteString(fmt.Sprintf("%-15s %s\n", k+":", entry.Metadata[k]))
}
b.WriteString("\n")
}
// Help
b.WriteString("\n")
b.WriteString(infoStyle.Render("Press ESC or 'q' to return to list"))
return b.String()
}
// Helper methods
func (v *CatalogDashboardView) loadCatalog() tea.Cmd {
return func() tea.Msg {
// Open catalog
home, err := os.UserHomeDir()
if err != nil {
return catalogLoadedMsg{err: err}
}
catalogPath := filepath.Join(home, ".dbbackup", "catalog.db")
cat, err := catalog.NewSQLiteCatalog(catalogPath)
if err != nil {
return catalogLoadedMsg{err: err}
}
defer cat.Close()
// Load entries
entries, err := cat.Search(context.Background(), &catalog.SearchQuery{})
if err != nil {
return catalogLoadedMsg{err: err}
}
// Load databases
databases, err := cat.ListDatabases(context.Background())
if err != nil {
return catalogLoadedMsg{err: err}
}
return catalogLoadedMsg{
entries: entries,
databases: databases,
}
}
}
func (v *CatalogDashboardView) sortEntries() {
sort.Slice(v.entries, func(i, j int) bool {
var less bool
switch v.sortBy {
case "date":
less = v.entries[i].CreatedAt.Before(v.entries[j].CreatedAt)
case "size":
less = v.entries[i].SizeBytes < v.entries[j].SizeBytes
case "database":
less = v.entries[i].Database < v.entries[j].Database
case "type":
less = v.entries[i].BackupType < v.entries[j].BackupType
default:
less = v.entries[i].CreatedAt.Before(v.entries[j].CreatedAt)
}
if v.sortDesc {
return !less
}
return less
})
v.calculatePages()
}
func (v *CatalogDashboardView) calculatePages() {
filtered := v.getFilteredEntries()
v.totalPages = (len(filtered) + v.pageSize - 1) / v.pageSize
if v.totalPages == 0 {
v.totalPages = 1
}
if v.page >= v.totalPages {
v.page = v.totalPages - 1
}
if v.page < 0 {
v.page = 0
}
}
func (v *CatalogDashboardView) getFilteredEntries() []*catalog.Entry {
filtered := []*catalog.Entry{}
for _, e := range v.entries {
if v.selectedDB != "" && e.Database != v.selectedDB {
continue
}
if v.filter != "" {
match := strings.Contains(strings.ToLower(e.Database), strings.ToLower(v.filter)) ||
strings.Contains(strings.ToLower(e.BackupPath), strings.ToLower(v.filter))
if !match {
continue
}
}
filtered = append(filtered, e)
}
return filtered
}
func (v *CatalogDashboardView) getCurrentPageEntries() []*catalog.Entry {
filtered := v.getFilteredEntries()
start := v.page * v.pageSize
end := start + v.pageSize
if end > len(filtered) {
end = len(filtered)
}
if start >= len(filtered) {
return []*catalog.Entry{}
}
return filtered[start:end]
}
func (v *CatalogDashboardView) handleFilterKeys(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
switch msg.String() {
case "enter", "esc":
v.filterMode = false
v.cursor = 0
v.page = 0
v.calculatePages()
return v, nil
case "backspace":
if len(v.filter) > 0 {
v.filter = v.filter[:len(v.filter)-1]
}
default:
if len(msg.String()) == 1 {
v.filter += msg.String()
}
}
return v, nil
}
func (v *CatalogDashboardView) selectDatabase() tea.Cmd {
// Simple cycling through databases
if v.selectedDB == "" {
if len(v.databases) > 0 {
v.selectedDB = v.databases[0]
}
} else {
for i, db := range v.databases {
if db == v.selectedDB {
if i+1 < len(v.databases) {
v.selectedDB = v.databases[i+1]
} else {
v.selectedDB = ""
}
break
}
}
}
v.cursor = 0
v.page = 0
v.calculatePages()
return nil
}
func formatCatalogBytes(bytes int64) string {
const unit = 1024
if bytes < unit {
return fmt.Sprintf("%d B", bytes)
}
div, exp := int64(unit), 0
for n := bytes / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp])
}