Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 09a917766f | |||
| eeacbfa007 | |||
| 7711a206ab | |||
| ba6e8a2b39 | |||
| ec5e89eab7 |
30
CHANGELOG.md
30
CHANGELOG.md
@@ -5,6 +5,36 @@ All notable changes to dbbackup will be documented in this file.
|
|||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
|
## [3.42.35] - 2026-01-15 "TUI Detailed Progress"
|
||||||
|
|
||||||
|
### Added - Enhanced TUI Progress Display
|
||||||
|
- **Detailed progress bar in TUI restore** - schollz-style progress bar with:
|
||||||
|
- Byte progress display (e.g., `245 MB / 1.2 GB`)
|
||||||
|
- Transfer speed calculation (e.g., `45 MB/s`)
|
||||||
|
- ETA prediction for long operations
|
||||||
|
- Unicode block-based visual bar
|
||||||
|
- **Real-time extraction progress** - Archive extraction now reports actual bytes processed
|
||||||
|
- **Go-native tar extraction** - Uses Go's `archive/tar` + `compress/gzip` when progress callback is set
|
||||||
|
- **New `DetailedProgress` component** in TUI package:
|
||||||
|
- `NewDetailedProgress(total, description)` - Byte-based progress
|
||||||
|
- `NewDetailedProgressItems(total, description)` - Item count progress
|
||||||
|
- `NewDetailedProgressSpinner(description)` - Indeterminate spinner
|
||||||
|
- `RenderProgressBar(width)` - Generate schollz-style output
|
||||||
|
- **Progress callback API** in restore engine:
|
||||||
|
- `SetProgressCallback(func(current, total int64, description string))`
|
||||||
|
- Allows TUI to receive real-time progress updates from restore operations
|
||||||
|
- **Shared progress state** pattern for Bubble Tea integration
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- TUI restore execution now shows detailed byte progress during archive extraction
|
||||||
|
- Cluster restore shows extraction progress instead of just spinner
|
||||||
|
- Falls back to shell `tar` command when no progress callback is set (faster)
|
||||||
|
|
||||||
|
### Technical Details
|
||||||
|
- `progressReader` wrapper tracks bytes read through gzip/tar pipeline
|
||||||
|
- Throttled progress updates (every 100ms) to avoid UI flooding
|
||||||
|
- Thread-safe shared state pattern for cross-goroutine progress updates
|
||||||
|
|
||||||
## [3.42.34] - 2026-01-14 "Filesystem Abstraction"
|
## [3.42.34] - 2026-01-14 "Filesystem Abstraction"
|
||||||
|
|
||||||
### Added - spf13/afero for Filesystem Abstraction
|
### Added - spf13/afero for Filesystem Abstraction
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ Download from [releases](https://git.uuxo.net/UUXO/dbbackup/releases):
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Linux x86_64
|
# Linux x86_64
|
||||||
wget https://git.uuxo.net/UUXO/dbbackup/releases/download/v3.42.1/dbbackup-linux-amd64
|
wget https://git.uuxo.net/UUXO/dbbackup/releases/download/v3.42.35/dbbackup-linux-amd64
|
||||||
chmod +x dbbackup-linux-amd64
|
chmod +x dbbackup-linux-amd64
|
||||||
sudo mv dbbackup-linux-amd64 /usr/local/bin/dbbackup
|
sudo mv dbbackup-linux-amd64 /usr/local/bin/dbbackup
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ This directory contains pre-compiled binaries for the DB Backup Tool across mult
|
|||||||
|
|
||||||
## Build Information
|
## Build Information
|
||||||
- **Version**: 3.42.34
|
- **Version**: 3.42.34
|
||||||
- **Build Time**: 2026-01-14_15:24:20_UTC
|
- **Build Time**: 2026-01-15_14:16:33_UTC
|
||||||
- **Git Commit**: 721e53f
|
- **Git Commit**: eeacbfa
|
||||||
|
|
||||||
## Recent Updates (v1.1.0)
|
## Recent Updates (v1.1.0)
|
||||||
- ✅ Fixed TUI progress display with line-by-line output
|
- ✅ Fixed TUI progress display with line-by-line output
|
||||||
|
|||||||
@@ -28,6 +28,12 @@ import (
|
|||||||
"dbbackup/internal/swap"
|
"dbbackup/internal/swap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ProgressCallback is called with byte-level progress updates during backup operations
|
||||||
|
type ProgressCallback func(current, total int64, description string)
|
||||||
|
|
||||||
|
// DatabaseProgressCallback is called with database count progress during cluster backup
|
||||||
|
type DatabaseProgressCallback func(done, total int, dbName string)
|
||||||
|
|
||||||
// Engine handles backup operations
|
// Engine handles backup operations
|
||||||
type Engine struct {
|
type Engine struct {
|
||||||
cfg *config.Config
|
cfg *config.Config
|
||||||
@@ -36,6 +42,8 @@ type Engine struct {
|
|||||||
progress progress.Indicator
|
progress progress.Indicator
|
||||||
detailedReporter *progress.DetailedReporter
|
detailedReporter *progress.DetailedReporter
|
||||||
silent bool // Silent mode for TUI
|
silent bool // Silent mode for TUI
|
||||||
|
progressCallback ProgressCallback
|
||||||
|
dbProgressCallback DatabaseProgressCallback
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new backup engine
|
// New creates a new backup engine
|
||||||
@@ -86,6 +94,30 @@ func NewSilent(cfg *config.Config, log logger.Logger, db database.Database, prog
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetProgressCallback sets a callback for detailed progress reporting (for TUI mode)
|
||||||
|
func (e *Engine) SetProgressCallback(cb ProgressCallback) {
|
||||||
|
e.progressCallback = cb
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDatabaseProgressCallback sets a callback for database count progress during cluster backup
|
||||||
|
func (e *Engine) SetDatabaseProgressCallback(cb DatabaseProgressCallback) {
|
||||||
|
e.dbProgressCallback = cb
|
||||||
|
}
|
||||||
|
|
||||||
|
// reportProgress reports progress to the callback if set
|
||||||
|
func (e *Engine) reportProgress(current, total int64, description string) {
|
||||||
|
if e.progressCallback != nil {
|
||||||
|
e.progressCallback(current, total, description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// reportDatabaseProgress reports database count progress to the callback if set
|
||||||
|
func (e *Engine) reportDatabaseProgress(done, total int, dbName string) {
|
||||||
|
if e.dbProgressCallback != nil {
|
||||||
|
e.dbProgressCallback(done, total, dbName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// loggerAdapter adapts our logger to the progress.Logger interface
|
// loggerAdapter adapts our logger to the progress.Logger interface
|
||||||
type loggerAdapter struct {
|
type loggerAdapter struct {
|
||||||
logger logger.Logger
|
logger logger.Logger
|
||||||
@@ -465,6 +497,8 @@ func (e *Engine) BackupCluster(ctx context.Context) error {
|
|||||||
estimator.UpdateProgress(idx)
|
estimator.UpdateProgress(idx)
|
||||||
e.printf(" [%d/%d] Backing up database: %s\n", idx+1, len(databases), name)
|
e.printf(" [%d/%d] Backing up database: %s\n", idx+1, len(databases), name)
|
||||||
quietProgress.Update(fmt.Sprintf("Backing up database %d/%d: %s", idx+1, len(databases), name))
|
quietProgress.Update(fmt.Sprintf("Backing up database %d/%d: %s", idx+1, len(databases), name))
|
||||||
|
// Report database progress to TUI callback
|
||||||
|
e.reportDatabaseProgress(idx+1, len(databases), name)
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
|
|
||||||
// Check database size and warn if very large
|
// Check database size and warn if very large
|
||||||
|
|||||||
@@ -368,7 +368,7 @@ func (d *Diagnoser) diagnoseSQLScript(filePath string, compressed bool, result *
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Store last line for termination check
|
// Store last line for termination check
|
||||||
if lineNumber > 0 && (lineNumber%100000 == 0) && d.verbose {
|
if lineNumber > 0 && (lineNumber%100000 == 0) && d.verbose && d.log != nil {
|
||||||
d.log.Debug("Scanning SQL file", "lines_processed", lineNumber)
|
d.log.Debug("Scanning SQL file", "lines_processed", lineNumber)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -430,9 +430,11 @@ func (d *Diagnoser) diagnoseClusterArchive(filePath string, result *DiagnoseResu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.log != nil {
|
||||||
d.log.Info("Verifying cluster archive integrity",
|
d.log.Info("Verifying cluster archive integrity",
|
||||||
"size", fmt.Sprintf("%.1f GB", float64(result.FileSize)/(1024*1024*1024)),
|
"size", fmt.Sprintf("%.1f GB", float64(result.FileSize)/(1024*1024*1024)),
|
||||||
"timeout", fmt.Sprintf("%d min", timeoutMinutes))
|
"timeout", fmt.Sprintf("%d min", timeoutMinutes))
|
||||||
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutMinutes)*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutMinutes)*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@@ -561,7 +563,7 @@ func (d *Diagnoser) diagnoseClusterArchive(filePath string, result *DiagnoseResu
|
|||||||
}
|
}
|
||||||
|
|
||||||
// For verbose mode, diagnose individual dumps inside the archive
|
// For verbose mode, diagnose individual dumps inside the archive
|
||||||
if d.verbose && len(dumpFiles) > 0 {
|
if d.verbose && len(dumpFiles) > 0 && d.log != nil {
|
||||||
d.log.Info("Cluster archive contains databases", "count", len(dumpFiles))
|
d.log.Info("Cluster archive contains databases", "count", len(dumpFiles))
|
||||||
for _, df := range dumpFiles {
|
for _, df := range dumpFiles {
|
||||||
d.log.Info(" - " + df)
|
d.log.Info(" - " + df)
|
||||||
@@ -684,9 +686,11 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.log != nil {
|
||||||
d.log.Info("Listing cluster archive contents",
|
d.log.Info("Listing cluster archive contents",
|
||||||
"size", fmt.Sprintf("%.1f GB", float64(archiveInfo.Size())/(1024*1024*1024)),
|
"size", fmt.Sprintf("%.1f GB", float64(archiveInfo.Size())/(1024*1024*1024)),
|
||||||
"timeout", fmt.Sprintf("%d min", timeoutMinutes))
|
"timeout", fmt.Sprintf("%d min", timeoutMinutes))
|
||||||
|
}
|
||||||
|
|
||||||
listCtx, listCancel := context.WithTimeout(context.Background(), time.Duration(timeoutMinutes)*time.Minute)
|
listCtx, listCancel := context.WithTimeout(context.Background(), time.Duration(timeoutMinutes)*time.Minute)
|
||||||
defer listCancel()
|
defer listCancel()
|
||||||
@@ -766,7 +770,9 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
return []*DiagnoseResult{errResult}, nil
|
return []*DiagnoseResult{errResult}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.log != nil {
|
||||||
d.log.Debug("Archive listing streamed successfully", "total_files", fileCount, "relevant_files", len(files))
|
d.log.Debug("Archive listing streamed successfully", "total_files", fileCount, "relevant_files", len(files))
|
||||||
|
}
|
||||||
|
|
||||||
// Check if we have enough disk space (estimate 4x archive size needed)
|
// Check if we have enough disk space (estimate 4x archive size needed)
|
||||||
// archiveInfo already obtained at function start
|
// archiveInfo already obtained at function start
|
||||||
@@ -781,7 +787,9 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
testCancel()
|
testCancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.log != nil {
|
||||||
d.log.Info("Archive listing successful", "files", len(files))
|
d.log.Info("Archive listing successful", "files", len(files))
|
||||||
|
}
|
||||||
|
|
||||||
// Try full extraction - NO TIMEOUT here as large archives can take a long time
|
// Try full extraction - NO TIMEOUT here as large archives can take a long time
|
||||||
// Use a generous timeout (30 minutes) for very large archives
|
// Use a generous timeout (30 minutes) for very large archives
|
||||||
@@ -870,11 +878,15 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
|
|||||||
}
|
}
|
||||||
|
|
||||||
dumpPath := filepath.Join(dumpsDir, name)
|
dumpPath := filepath.Join(dumpsDir, name)
|
||||||
|
if d.log != nil {
|
||||||
d.log.Info("Diagnosing dump file", "file", name)
|
d.log.Info("Diagnosing dump file", "file", name)
|
||||||
|
}
|
||||||
|
|
||||||
result, err := d.DiagnoseFile(dumpPath)
|
result, err := d.DiagnoseFile(dumpPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if d.log != nil {
|
||||||
d.log.Warn("Failed to diagnose file", "file", name, "error", err)
|
d.log.Warn("Failed to diagnose file", "file", name, "error", err)
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
results = append(results, result)
|
results = append(results, result)
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
package restore
|
package restore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"compress/gzip"
|
||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -24,6 +27,13 @@ import (
|
|||||||
_ "github.com/jackc/pgx/v5/stdlib" // PostgreSQL driver
|
_ "github.com/jackc/pgx/v5/stdlib" // PostgreSQL driver
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ProgressCallback is called with progress updates during long operations
|
||||||
|
// Parameters: current bytes/items done, total bytes/items, description
|
||||||
|
type ProgressCallback func(current, total int64, description string)
|
||||||
|
|
||||||
|
// DatabaseProgressCallback is called with database count progress during cluster restore
|
||||||
|
type DatabaseProgressCallback func(done, total int, dbName string)
|
||||||
|
|
||||||
// Engine handles database restore operations
|
// Engine handles database restore operations
|
||||||
type Engine struct {
|
type Engine struct {
|
||||||
cfg *config.Config
|
cfg *config.Config
|
||||||
@@ -33,6 +43,10 @@ type Engine struct {
|
|||||||
detailedReporter *progress.DetailedReporter
|
detailedReporter *progress.DetailedReporter
|
||||||
dryRun bool
|
dryRun bool
|
||||||
debugLogPath string // Path to save debug log on error
|
debugLogPath string // Path to save debug log on error
|
||||||
|
|
||||||
|
// TUI progress callback for detailed progress reporting
|
||||||
|
progressCallback ProgressCallback
|
||||||
|
dbProgressCallback DatabaseProgressCallback
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new restore engine
|
// New creates a new restore engine
|
||||||
@@ -88,6 +102,30 @@ func (e *Engine) SetDebugLogPath(path string) {
|
|||||||
e.debugLogPath = path
|
e.debugLogPath = path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetProgressCallback sets a callback for detailed progress reporting (for TUI mode)
|
||||||
|
func (e *Engine) SetProgressCallback(cb ProgressCallback) {
|
||||||
|
e.progressCallback = cb
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDatabaseProgressCallback sets a callback for database count progress during cluster restore
|
||||||
|
func (e *Engine) SetDatabaseProgressCallback(cb DatabaseProgressCallback) {
|
||||||
|
e.dbProgressCallback = cb
|
||||||
|
}
|
||||||
|
|
||||||
|
// reportProgress safely calls the progress callback if set
|
||||||
|
func (e *Engine) reportProgress(current, total int64, description string) {
|
||||||
|
if e.progressCallback != nil {
|
||||||
|
e.progressCallback(current, total, description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// reportDatabaseProgress safely calls the database progress callback if set
|
||||||
|
func (e *Engine) reportDatabaseProgress(done, total int, dbName string) {
|
||||||
|
if e.dbProgressCallback != nil {
|
||||||
|
e.dbProgressCallback(done, total, dbName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// loggerAdapter adapts our logger to the progress.Logger interface
|
// loggerAdapter adapts our logger to the progress.Logger interface
|
||||||
type loggerAdapter struct {
|
type loggerAdapter struct {
|
||||||
logger logger.Logger
|
logger logger.Logger
|
||||||
@@ -1040,6 +1078,8 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
statusMsg := fmt.Sprintf("Restoring database %s (%d/%d)", dbName, idx+1, totalDBs)
|
statusMsg := fmt.Sprintf("Restoring database %s (%d/%d)", dbName, idx+1, totalDBs)
|
||||||
e.progress.Update(statusMsg)
|
e.progress.Update(statusMsg)
|
||||||
e.log.Info("Restoring database", "name", dbName, "file", dumpFile, "progress", dbProgress)
|
e.log.Info("Restoring database", "name", dbName, "file", dumpFile, "progress", dbProgress)
|
||||||
|
// Report database progress for TUI
|
||||||
|
e.reportDatabaseProgress(idx, totalDBs, dbName)
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
|
|
||||||
// STEP 1: Drop existing database completely (clean slate)
|
// STEP 1: Drop existing database completely (clean slate)
|
||||||
@@ -1146,8 +1186,144 @@ func (e *Engine) RestoreCluster(ctx context.Context, archivePath string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractArchive extracts a tar.gz archive
|
// extractArchive extracts a tar.gz archive with progress reporting
|
||||||
func (e *Engine) extractArchive(ctx context.Context, archivePath, destDir string) error {
|
func (e *Engine) extractArchive(ctx context.Context, archivePath, destDir string) error {
|
||||||
|
// If progress callback is set, use Go's archive/tar for progress tracking
|
||||||
|
if e.progressCallback != nil {
|
||||||
|
return e.extractArchiveWithProgress(ctx, archivePath, destDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise use fast shell tar (no progress)
|
||||||
|
return e.extractArchiveShell(ctx, archivePath, destDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractArchiveWithProgress extracts using Go's archive/tar with detailed progress reporting
|
||||||
|
func (e *Engine) extractArchiveWithProgress(ctx context.Context, archivePath, destDir string) error {
|
||||||
|
// Get archive size for progress calculation
|
||||||
|
archiveInfo, err := os.Stat(archivePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to stat archive: %w", err)
|
||||||
|
}
|
||||||
|
totalSize := archiveInfo.Size()
|
||||||
|
|
||||||
|
// Open the archive file
|
||||||
|
file, err := os.Open(archivePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open archive: %w", err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
// Wrap with progress reader
|
||||||
|
progressReader := &progressReader{
|
||||||
|
reader: file,
|
||||||
|
totalSize: totalSize,
|
||||||
|
callback: e.progressCallback,
|
||||||
|
desc: "Extracting archive",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create gzip reader
|
||||||
|
gzReader, err := gzip.NewReader(progressReader)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create gzip reader: %w", err)
|
||||||
|
}
|
||||||
|
defer gzReader.Close()
|
||||||
|
|
||||||
|
// Create tar reader
|
||||||
|
tarReader := tar.NewReader(gzReader)
|
||||||
|
|
||||||
|
// Extract files
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
header, err := tarReader.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break // End of archive
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read tar header: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanitize and validate path
|
||||||
|
targetPath := filepath.Join(destDir, header.Name)
|
||||||
|
|
||||||
|
// Security check: ensure path is within destDir (prevent path traversal)
|
||||||
|
if !strings.HasPrefix(filepath.Clean(targetPath), filepath.Clean(destDir)) {
|
||||||
|
e.log.Warn("Skipping potentially malicious path in archive", "path", header.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch header.Typeflag {
|
||||||
|
case tar.TypeDir:
|
||||||
|
if err := os.MkdirAll(targetPath, 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to create directory %s: %w", targetPath, err)
|
||||||
|
}
|
||||||
|
case tar.TypeReg:
|
||||||
|
// Ensure parent directory exists
|
||||||
|
if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to create parent directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the file
|
||||||
|
outFile, err := os.OpenFile(targetPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(header.Mode))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create file %s: %w", targetPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy file contents
|
||||||
|
if _, err := io.Copy(outFile, tarReader); err != nil {
|
||||||
|
outFile.Close()
|
||||||
|
return fmt.Errorf("failed to write file %s: %w", targetPath, err)
|
||||||
|
}
|
||||||
|
outFile.Close()
|
||||||
|
case tar.TypeSymlink:
|
||||||
|
// Handle symlinks (common in some archives)
|
||||||
|
if err := os.Symlink(header.Linkname, targetPath); err != nil {
|
||||||
|
// Ignore symlink errors (may already exist or not supported)
|
||||||
|
e.log.Debug("Could not create symlink", "path", targetPath, "target", header.Linkname)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final progress update
|
||||||
|
e.reportProgress(totalSize, totalSize, "Extraction complete")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// progressReader wraps an io.Reader to report read progress
|
||||||
|
type progressReader struct {
|
||||||
|
reader io.Reader
|
||||||
|
totalSize int64
|
||||||
|
bytesRead int64
|
||||||
|
callback ProgressCallback
|
||||||
|
desc string
|
||||||
|
lastReport time.Time
|
||||||
|
reportEvery time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pr *progressReader) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = pr.reader.Read(p)
|
||||||
|
pr.bytesRead += int64(n)
|
||||||
|
|
||||||
|
// Throttle progress reporting to every 100ms
|
||||||
|
if pr.reportEvery == 0 {
|
||||||
|
pr.reportEvery = 100 * time.Millisecond
|
||||||
|
}
|
||||||
|
if time.Since(pr.lastReport) > pr.reportEvery {
|
||||||
|
if pr.callback != nil {
|
||||||
|
pr.callback(pr.bytesRead, pr.totalSize, pr.desc)
|
||||||
|
}
|
||||||
|
pr.lastReport = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractArchiveShell extracts using shell tar command (faster but no progress)
|
||||||
|
func (e *Engine) extractArchiveShell(ctx context.Context, archivePath, destDir string) error {
|
||||||
cmd := exec.CommandContext(ctx, "tar", "-xzf", archivePath, "-C", destDir)
|
cmd := exec.CommandContext(ctx, "tar", "-xzf", archivePath, "-C", destDir)
|
||||||
|
|
||||||
// Stream stderr to avoid memory issues - tar can produce lots of output for large archives
|
// Stream stderr to avoid memory issues - tar can produce lots of output for large archives
|
||||||
|
|||||||
@@ -255,7 +255,9 @@ func (s *Safety) CheckDiskSpaceAt(archivePath string, checkDir string, multiplie
|
|||||||
// Get available disk space
|
// Get available disk space
|
||||||
availableSpace, err := getDiskSpace(checkDir)
|
availableSpace, err := getDiskSpace(checkDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if s.log != nil {
|
||||||
s.log.Warn("Cannot check disk space", "error", err)
|
s.log.Warn("Cannot check disk space", "error", err)
|
||||||
|
}
|
||||||
return nil // Don't fail if we can't check
|
return nil // Don't fail if we can't check
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -278,10 +280,12 @@ func (s *Safety) CheckDiskSpaceAt(archivePath string, checkDir string, multiplie
|
|||||||
checkDir)
|
checkDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.log != nil {
|
||||||
s.log.Info("Disk space check passed",
|
s.log.Info("Disk space check passed",
|
||||||
"location", checkDir,
|
"location", checkDir,
|
||||||
"required", FormatBytes(requiredSpace),
|
"required", FormatBytes(requiredSpace),
|
||||||
"available", FormatBytes(availableSpace))
|
"available", FormatBytes(availableSpace))
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
124
internal/tui/backup_exec.go
Executable file → Normal file
124
internal/tui/backup_exec.go
Executable file → Normal file
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
tea "github.com/charmbracelet/bubbletea"
|
tea "github.com/charmbracelet/bubbletea"
|
||||||
@@ -33,6 +34,56 @@ type BackupExecutionModel struct {
|
|||||||
startTime time.Time
|
startTime time.Time
|
||||||
details []string
|
details []string
|
||||||
spinnerFrame int
|
spinnerFrame int
|
||||||
|
|
||||||
|
// Database count progress (for cluster backup)
|
||||||
|
dbTotal int
|
||||||
|
dbDone int
|
||||||
|
dbName string // Current database being backed up
|
||||||
|
}
|
||||||
|
|
||||||
|
// sharedBackupProgressState holds progress state that can be safely accessed from callbacks
|
||||||
|
type sharedBackupProgressState struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
dbTotal int
|
||||||
|
dbDone int
|
||||||
|
dbName string
|
||||||
|
hasUpdate bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Package-level shared progress state for backup operations
|
||||||
|
var (
|
||||||
|
currentBackupProgressMu sync.Mutex
|
||||||
|
currentBackupProgressState *sharedBackupProgressState
|
||||||
|
)
|
||||||
|
|
||||||
|
func setCurrentBackupProgress(state *sharedBackupProgressState) {
|
||||||
|
currentBackupProgressMu.Lock()
|
||||||
|
defer currentBackupProgressMu.Unlock()
|
||||||
|
currentBackupProgressState = state
|
||||||
|
}
|
||||||
|
|
||||||
|
func clearCurrentBackupProgress() {
|
||||||
|
currentBackupProgressMu.Lock()
|
||||||
|
defer currentBackupProgressMu.Unlock()
|
||||||
|
currentBackupProgressState = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCurrentBackupProgress() (dbTotal, dbDone int, dbName string, hasUpdate bool) {
|
||||||
|
currentBackupProgressMu.Lock()
|
||||||
|
defer currentBackupProgressMu.Unlock()
|
||||||
|
|
||||||
|
if currentBackupProgressState == nil {
|
||||||
|
return 0, 0, "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
currentBackupProgressState.mu.Lock()
|
||||||
|
defer currentBackupProgressState.mu.Unlock()
|
||||||
|
|
||||||
|
hasUpdate = currentBackupProgressState.hasUpdate
|
||||||
|
currentBackupProgressState.hasUpdate = false
|
||||||
|
|
||||||
|
return currentBackupProgressState.dbTotal, currentBackupProgressState.dbDone,
|
||||||
|
currentBackupProgressState.dbName, hasUpdate
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBackupExecution(cfg *config.Config, log logger.Logger, parent tea.Model, ctx context.Context, backupType, dbName string, ratio int) BackupExecutionModel {
|
func NewBackupExecution(cfg *config.Config, log logger.Logger, parent tea.Model, ctx context.Context, backupType, dbName string, ratio int) BackupExecutionModel {
|
||||||
@@ -55,7 +106,6 @@ func NewBackupExecution(cfg *config.Config, log logger.Logger, parent tea.Model,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m BackupExecutionModel) Init() tea.Cmd {
|
func (m BackupExecutionModel) Init() tea.Cmd {
|
||||||
// TUI handles all display through View() - no progress callbacks needed
|
|
||||||
return tea.Batch(
|
return tea.Batch(
|
||||||
executeBackupWithTUIProgress(m.ctx, m.config, m.logger, m.backupType, m.databaseName, m.ratio),
|
executeBackupWithTUIProgress(m.ctx, m.config, m.logger, m.backupType, m.databaseName, m.ratio),
|
||||||
backupTickCmd(),
|
backupTickCmd(),
|
||||||
@@ -91,6 +141,11 @@ func executeBackupWithTUIProgress(parentCtx context.Context, cfg *config.Config,
|
|||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
|
// Setup shared progress state for TUI polling
|
||||||
|
progressState := &sharedBackupProgressState{}
|
||||||
|
setCurrentBackupProgress(progressState)
|
||||||
|
defer clearCurrentBackupProgress()
|
||||||
|
|
||||||
dbClient, err := database.New(cfg, log)
|
dbClient, err := database.New(cfg, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backupCompleteMsg{
|
return backupCompleteMsg{
|
||||||
@@ -110,6 +165,16 @@ func executeBackupWithTUIProgress(parentCtx context.Context, cfg *config.Config,
|
|||||||
// Pass nil as indicator - TUI itself handles all display, no stdout printing
|
// Pass nil as indicator - TUI itself handles all display, no stdout printing
|
||||||
engine := backup.NewSilent(cfg, log, dbClient, nil)
|
engine := backup.NewSilent(cfg, log, dbClient, nil)
|
||||||
|
|
||||||
|
// Set database progress callback for cluster backups
|
||||||
|
engine.SetDatabaseProgressCallback(func(done, total int, currentDB string) {
|
||||||
|
progressState.mu.Lock()
|
||||||
|
progressState.dbDone = done
|
||||||
|
progressState.dbTotal = total
|
||||||
|
progressState.dbName = currentDB
|
||||||
|
progressState.hasUpdate = true
|
||||||
|
progressState.mu.Unlock()
|
||||||
|
})
|
||||||
|
|
||||||
var backupErr error
|
var backupErr error
|
||||||
switch backupType {
|
switch backupType {
|
||||||
case "single":
|
case "single":
|
||||||
@@ -157,10 +222,21 @@ func (m BackupExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
// Increment spinner frame for smooth animation
|
// Increment spinner frame for smooth animation
|
||||||
m.spinnerFrame = (m.spinnerFrame + 1) % len(spinnerFrames)
|
m.spinnerFrame = (m.spinnerFrame + 1) % len(spinnerFrames)
|
||||||
|
|
||||||
// Update status based on elapsed time to show progress
|
// Poll for database progress updates from callbacks
|
||||||
|
dbTotal, dbDone, dbName, hasUpdate := getCurrentBackupProgress()
|
||||||
|
if hasUpdate {
|
||||||
|
m.dbTotal = dbTotal
|
||||||
|
m.dbDone = dbDone
|
||||||
|
m.dbName = dbName
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update status based on progress and elapsed time
|
||||||
elapsedSec := int(time.Since(m.startTime).Seconds())
|
elapsedSec := int(time.Since(m.startTime).Seconds())
|
||||||
|
|
||||||
if elapsedSec < 2 {
|
if m.dbTotal > 0 && m.dbDone > 0 {
|
||||||
|
// We have real progress from cluster backup
|
||||||
|
m.status = fmt.Sprintf("Backing up database: %s", m.dbName)
|
||||||
|
} else if elapsedSec < 2 {
|
||||||
m.status = "Initializing backup..."
|
m.status = "Initializing backup..."
|
||||||
} else if elapsedSec < 5 {
|
} else if elapsedSec < 5 {
|
||||||
if m.backupType == "cluster" {
|
if m.backupType == "cluster" {
|
||||||
@@ -234,6 +310,34 @@ func (m BackupExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// renderDatabaseProgressBar renders a progress bar for database count progress
|
||||||
|
func renderBackupDatabaseProgressBar(done, total int, dbName string, width int) string {
|
||||||
|
if total == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate progress percentage
|
||||||
|
percent := float64(done) / float64(total)
|
||||||
|
if percent > 1.0 {
|
||||||
|
percent = 1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate filled width
|
||||||
|
barWidth := width - 20 // Leave room for label and percentage
|
||||||
|
if barWidth < 10 {
|
||||||
|
barWidth = 10
|
||||||
|
}
|
||||||
|
filled := int(float64(barWidth) * percent)
|
||||||
|
if filled > barWidth {
|
||||||
|
filled = barWidth
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build progress bar
|
||||||
|
bar := strings.Repeat("█", filled) + strings.Repeat("░", barWidth-filled)
|
||||||
|
|
||||||
|
return fmt.Sprintf(" Database: [%s] %d/%d", bar, done, total)
|
||||||
|
}
|
||||||
|
|
||||||
func (m BackupExecutionModel) View() string {
|
func (m BackupExecutionModel) View() string {
|
||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
s.Grow(512) // Pre-allocate estimated capacity for better performance
|
s.Grow(512) // Pre-allocate estimated capacity for better performance
|
||||||
@@ -255,12 +359,24 @@ func (m BackupExecutionModel) View() string {
|
|||||||
s.WriteString(fmt.Sprintf(" %-10s %s\n", "Duration:", time.Since(m.startTime).Round(time.Second)))
|
s.WriteString(fmt.Sprintf(" %-10s %s\n", "Duration:", time.Since(m.startTime).Round(time.Second)))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
// Status with spinner
|
// Status display
|
||||||
if !m.done {
|
if !m.done {
|
||||||
|
// Show database progress bar if we have progress data (cluster backup)
|
||||||
|
if m.dbTotal > 0 && m.dbDone > 0 {
|
||||||
|
// Show progress bar instead of spinner when we have real progress
|
||||||
|
progressBar := renderBackupDatabaseProgressBar(m.dbDone, m.dbTotal, m.dbName, 50)
|
||||||
|
s.WriteString(progressBar + "\n")
|
||||||
|
s.WriteString(fmt.Sprintf(" %s\n", m.status))
|
||||||
|
} else {
|
||||||
|
// Show spinner during initial phases
|
||||||
if m.cancelling {
|
if m.cancelling {
|
||||||
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
s.WriteString(fmt.Sprintf(" %s %s\n", spinnerFrames[m.spinnerFrame], m.status))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !m.cancelling {
|
||||||
s.WriteString("\n [KEY] Press Ctrl+C or ESC to cancel\n")
|
s.WriteString("\n [KEY] Press Ctrl+C or ESC to cancel\n")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
406
internal/tui/detailed_progress.go
Normal file
406
internal/tui/detailed_progress.go
Normal file
@@ -0,0 +1,406 @@
|
|||||||
|
package tui
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DetailedProgress provides schollz-like progress information for TUI rendering
|
||||||
|
// This is a data structure that can be queried by Bubble Tea's View() method
|
||||||
|
type DetailedProgress struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
|
||||||
|
// Core progress
|
||||||
|
Total int64 // Total bytes or items
|
||||||
|
Current int64 // Current bytes or items done
|
||||||
|
|
||||||
|
// Display info
|
||||||
|
Description string // What operation is happening
|
||||||
|
Unit string // "bytes", "files", "databases", etc.
|
||||||
|
|
||||||
|
// Timing for ETA/speed calculation
|
||||||
|
StartTime time.Time
|
||||||
|
LastUpdate time.Time
|
||||||
|
SpeedWindow []speedSample // Rolling window for speed calculation
|
||||||
|
|
||||||
|
// State
|
||||||
|
IsIndeterminate bool // True if total is unknown (spinner mode)
|
||||||
|
IsComplete bool
|
||||||
|
IsFailed bool
|
||||||
|
ErrorMessage string
|
||||||
|
}
|
||||||
|
|
||||||
|
type speedSample struct {
|
||||||
|
timestamp time.Time
|
||||||
|
bytes int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDetailedProgress creates a progress tracker with known total
|
||||||
|
func NewDetailedProgress(total int64, description string) *DetailedProgress {
|
||||||
|
return &DetailedProgress{
|
||||||
|
Total: total,
|
||||||
|
Description: description,
|
||||||
|
Unit: "bytes",
|
||||||
|
StartTime: time.Now(),
|
||||||
|
LastUpdate: time.Now(),
|
||||||
|
SpeedWindow: make([]speedSample, 0, 20),
|
||||||
|
IsIndeterminate: total <= 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDetailedProgressItems creates a progress tracker for item counts
|
||||||
|
func NewDetailedProgressItems(total int, description string) *DetailedProgress {
|
||||||
|
return &DetailedProgress{
|
||||||
|
Total: int64(total),
|
||||||
|
Description: description,
|
||||||
|
Unit: "items",
|
||||||
|
StartTime: time.Now(),
|
||||||
|
LastUpdate: time.Now(),
|
||||||
|
SpeedWindow: make([]speedSample, 0, 20),
|
||||||
|
IsIndeterminate: total <= 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDetailedProgressSpinner creates an indeterminate progress tracker
|
||||||
|
func NewDetailedProgressSpinner(description string) *DetailedProgress {
|
||||||
|
return &DetailedProgress{
|
||||||
|
Total: -1,
|
||||||
|
Description: description,
|
||||||
|
Unit: "",
|
||||||
|
StartTime: time.Now(),
|
||||||
|
LastUpdate: time.Now(),
|
||||||
|
SpeedWindow: make([]speedSample, 0, 20),
|
||||||
|
IsIndeterminate: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds to the current progress
|
||||||
|
func (dp *DetailedProgress) Add(n int64) {
|
||||||
|
dp.mu.Lock()
|
||||||
|
defer dp.mu.Unlock()
|
||||||
|
|
||||||
|
dp.Current += n
|
||||||
|
dp.LastUpdate = time.Now()
|
||||||
|
|
||||||
|
// Add speed sample
|
||||||
|
dp.SpeedWindow = append(dp.SpeedWindow, speedSample{
|
||||||
|
timestamp: dp.LastUpdate,
|
||||||
|
bytes: dp.Current,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Keep only last 20 samples for speed calculation
|
||||||
|
if len(dp.SpeedWindow) > 20 {
|
||||||
|
dp.SpeedWindow = dp.SpeedWindow[len(dp.SpeedWindow)-20:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the current progress to a specific value
|
||||||
|
func (dp *DetailedProgress) Set(n int64) {
|
||||||
|
dp.mu.Lock()
|
||||||
|
defer dp.mu.Unlock()
|
||||||
|
|
||||||
|
dp.Current = n
|
||||||
|
dp.LastUpdate = time.Now()
|
||||||
|
|
||||||
|
// Add speed sample
|
||||||
|
dp.SpeedWindow = append(dp.SpeedWindow, speedSample{
|
||||||
|
timestamp: dp.LastUpdate,
|
||||||
|
bytes: dp.Current,
|
||||||
|
})
|
||||||
|
|
||||||
|
if len(dp.SpeedWindow) > 20 {
|
||||||
|
dp.SpeedWindow = dp.SpeedWindow[len(dp.SpeedWindow)-20:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTotal updates the total (useful when total becomes known during operation)
|
||||||
|
func (dp *DetailedProgress) SetTotal(total int64) {
|
||||||
|
dp.mu.Lock()
|
||||||
|
defer dp.mu.Unlock()
|
||||||
|
|
||||||
|
dp.Total = total
|
||||||
|
dp.IsIndeterminate = total <= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDescription updates the description
|
||||||
|
func (dp *DetailedProgress) SetDescription(desc string) {
|
||||||
|
dp.mu.Lock()
|
||||||
|
defer dp.mu.Unlock()
|
||||||
|
dp.Description = desc
|
||||||
|
}
|
||||||
|
|
||||||
|
// Complete marks the progress as complete
|
||||||
|
func (dp *DetailedProgress) Complete() {
|
||||||
|
dp.mu.Lock()
|
||||||
|
defer dp.mu.Unlock()
|
||||||
|
|
||||||
|
dp.IsComplete = true
|
||||||
|
dp.Current = dp.Total
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fail marks the progress as failed
|
||||||
|
func (dp *DetailedProgress) Fail(errMsg string) {
|
||||||
|
dp.mu.Lock()
|
||||||
|
defer dp.mu.Unlock()
|
||||||
|
|
||||||
|
dp.IsFailed = true
|
||||||
|
dp.ErrorMessage = errMsg
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPercent returns the progress percentage (0-100)
|
||||||
|
func (dp *DetailedProgress) GetPercent() int {
|
||||||
|
dp.mu.RLock()
|
||||||
|
defer dp.mu.RUnlock()
|
||||||
|
|
||||||
|
if dp.IsIndeterminate || dp.Total <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
percent := int((dp.Current * 100) / dp.Total)
|
||||||
|
if percent > 100 {
|
||||||
|
return 100
|
||||||
|
}
|
||||||
|
return percent
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSpeed returns the current transfer speed in bytes/second
|
||||||
|
func (dp *DetailedProgress) GetSpeed() float64 {
|
||||||
|
dp.mu.RLock()
|
||||||
|
defer dp.mu.RUnlock()
|
||||||
|
|
||||||
|
if len(dp.SpeedWindow) < 2 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use first and last samples in window for smoothed speed
|
||||||
|
first := dp.SpeedWindow[0]
|
||||||
|
last := dp.SpeedWindow[len(dp.SpeedWindow)-1]
|
||||||
|
|
||||||
|
elapsed := last.timestamp.Sub(first.timestamp).Seconds()
|
||||||
|
if elapsed <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
bytesTransferred := last.bytes - first.bytes
|
||||||
|
return float64(bytesTransferred) / elapsed
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetETA returns the estimated time remaining
|
||||||
|
func (dp *DetailedProgress) GetETA() time.Duration {
|
||||||
|
dp.mu.RLock()
|
||||||
|
defer dp.mu.RUnlock()
|
||||||
|
|
||||||
|
if dp.IsIndeterminate || dp.Total <= 0 || dp.Current >= dp.Total {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
speed := dp.getSpeedLocked()
|
||||||
|
if speed <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
remaining := dp.Total - dp.Current
|
||||||
|
seconds := float64(remaining) / speed
|
||||||
|
return time.Duration(seconds) * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dp *DetailedProgress) getSpeedLocked() float64 {
|
||||||
|
if len(dp.SpeedWindow) < 2 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
first := dp.SpeedWindow[0]
|
||||||
|
last := dp.SpeedWindow[len(dp.SpeedWindow)-1]
|
||||||
|
|
||||||
|
elapsed := last.timestamp.Sub(first.timestamp).Seconds()
|
||||||
|
if elapsed <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
bytesTransferred := last.bytes - first.bytes
|
||||||
|
return float64(bytesTransferred) / elapsed
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetElapsed returns the elapsed time since start
|
||||||
|
func (dp *DetailedProgress) GetElapsed() time.Duration {
|
||||||
|
dp.mu.RLock()
|
||||||
|
defer dp.mu.RUnlock()
|
||||||
|
return time.Since(dp.StartTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetState returns a snapshot of the current state for rendering
|
||||||
|
func (dp *DetailedProgress) GetState() DetailedProgressState {
|
||||||
|
dp.mu.RLock()
|
||||||
|
defer dp.mu.RUnlock()
|
||||||
|
|
||||||
|
return DetailedProgressState{
|
||||||
|
Description: dp.Description,
|
||||||
|
Current: dp.Current,
|
||||||
|
Total: dp.Total,
|
||||||
|
Percent: dp.getPercentLocked(),
|
||||||
|
Speed: dp.getSpeedLocked(),
|
||||||
|
ETA: dp.getETALocked(),
|
||||||
|
Elapsed: time.Since(dp.StartTime),
|
||||||
|
Unit: dp.Unit,
|
||||||
|
IsIndeterminate: dp.IsIndeterminate,
|
||||||
|
IsComplete: dp.IsComplete,
|
||||||
|
IsFailed: dp.IsFailed,
|
||||||
|
ErrorMessage: dp.ErrorMessage,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dp *DetailedProgress) getPercentLocked() int {
|
||||||
|
if dp.IsIndeterminate || dp.Total <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
percent := int((dp.Current * 100) / dp.Total)
|
||||||
|
if percent > 100 {
|
||||||
|
return 100
|
||||||
|
}
|
||||||
|
return percent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dp *DetailedProgress) getETALocked() time.Duration {
|
||||||
|
if dp.IsIndeterminate || dp.Total <= 0 || dp.Current >= dp.Total {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
speed := dp.getSpeedLocked()
|
||||||
|
if speed <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
remaining := dp.Total - dp.Current
|
||||||
|
seconds := float64(remaining) / speed
|
||||||
|
return time.Duration(seconds) * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetailedProgressState is an immutable snapshot for rendering
|
||||||
|
type DetailedProgressState struct {
|
||||||
|
Description string
|
||||||
|
Current int64
|
||||||
|
Total int64
|
||||||
|
Percent int
|
||||||
|
Speed float64 // bytes/sec
|
||||||
|
ETA time.Duration
|
||||||
|
Elapsed time.Duration
|
||||||
|
Unit string
|
||||||
|
IsIndeterminate bool
|
||||||
|
IsComplete bool
|
||||||
|
IsFailed bool
|
||||||
|
ErrorMessage string
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderProgressBar renders a TUI-friendly progress bar string
|
||||||
|
// Returns something like: "Extracting archive [████████░░░░░░░░░░░░] 45% 12.5 MB/s ETA: 2m 30s"
|
||||||
|
func (s DetailedProgressState) RenderProgressBar(width int) string {
|
||||||
|
if s.IsIndeterminate {
|
||||||
|
return s.renderIndeterminate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Progress bar
|
||||||
|
barWidth := 30
|
||||||
|
if width < 80 {
|
||||||
|
barWidth = 20
|
||||||
|
}
|
||||||
|
filled := (s.Percent * barWidth) / 100
|
||||||
|
if filled > barWidth {
|
||||||
|
filled = barWidth
|
||||||
|
}
|
||||||
|
|
||||||
|
bar := strings.Repeat("█", filled) + strings.Repeat("░", barWidth-filled)
|
||||||
|
|
||||||
|
// Format bytes
|
||||||
|
currentStr := FormatBytes(s.Current)
|
||||||
|
totalStr := FormatBytes(s.Total)
|
||||||
|
|
||||||
|
// Format speed
|
||||||
|
speedStr := ""
|
||||||
|
if s.Speed > 0 {
|
||||||
|
speedStr = fmt.Sprintf("%s/s", FormatBytes(int64(s.Speed)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format ETA
|
||||||
|
etaStr := ""
|
||||||
|
if s.ETA > 0 && !s.IsComplete {
|
||||||
|
etaStr = fmt.Sprintf("ETA: %s", FormatDurationShort(s.ETA))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the line
|
||||||
|
parts := []string{
|
||||||
|
fmt.Sprintf("[%s]", bar),
|
||||||
|
fmt.Sprintf("%3d%%", s.Percent),
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Unit == "bytes" && s.Total > 0 {
|
||||||
|
parts = append(parts, fmt.Sprintf("%s/%s", currentStr, totalStr))
|
||||||
|
} else if s.Total > 0 {
|
||||||
|
parts = append(parts, fmt.Sprintf("%d/%d", s.Current, s.Total))
|
||||||
|
}
|
||||||
|
|
||||||
|
if speedStr != "" {
|
||||||
|
parts = append(parts, speedStr)
|
||||||
|
}
|
||||||
|
if etaStr != "" {
|
||||||
|
parts = append(parts, etaStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(parts, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s DetailedProgressState) renderIndeterminate() string {
|
||||||
|
elapsed := FormatDurationShort(s.Elapsed)
|
||||||
|
return fmt.Sprintf("[spinner] %s Elapsed: %s", s.Description, elapsed)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderCompact renders a compact single-line progress string
|
||||||
|
func (s DetailedProgressState) RenderCompact() string {
|
||||||
|
if s.IsComplete {
|
||||||
|
return fmt.Sprintf("[OK] %s completed in %s", s.Description, FormatDurationShort(s.Elapsed))
|
||||||
|
}
|
||||||
|
if s.IsFailed {
|
||||||
|
return fmt.Sprintf("[FAIL] %s: %s", s.Description, s.ErrorMessage)
|
||||||
|
}
|
||||||
|
if s.IsIndeterminate {
|
||||||
|
return fmt.Sprintf("[...] %s (%s)", s.Description, FormatDurationShort(s.Elapsed))
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("[%3d%%] %s - %s/%s", s.Percent, s.Description,
|
||||||
|
FormatBytes(s.Current), FormatBytes(s.Total))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatBytes formats bytes in human-readable format
|
||||||
|
func FormatBytes(b int64) string {
|
||||||
|
const unit = 1024
|
||||||
|
if b < unit {
|
||||||
|
return fmt.Sprintf("%d B", b)
|
||||||
|
}
|
||||||
|
div, exp := int64(unit), 0
|
||||||
|
for n := b / unit; n >= unit; n /= unit {
|
||||||
|
div *= unit
|
||||||
|
exp++
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "KMGTPE"[exp])
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatDurationShort formats duration in short form
|
||||||
|
func FormatDurationShort(d time.Duration) string {
|
||||||
|
if d < time.Second {
|
||||||
|
return "<1s"
|
||||||
|
}
|
||||||
|
if d < time.Minute {
|
||||||
|
return fmt.Sprintf("%ds", int(d.Seconds()))
|
||||||
|
}
|
||||||
|
if d < time.Hour {
|
||||||
|
m := int(d.Minutes())
|
||||||
|
s := int(d.Seconds()) % 60
|
||||||
|
if s > 0 {
|
||||||
|
return fmt.Sprintf("%dm %ds", m, s)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%dm", m)
|
||||||
|
}
|
||||||
|
h := int(d.Hours())
|
||||||
|
m := int(d.Minutes()) % 60
|
||||||
|
return fmt.Sprintf("%dh %dm", h, m)
|
||||||
|
}
|
||||||
@@ -204,132 +204,111 @@ func (m DiagnoseViewModel) View() string {
|
|||||||
func (m DiagnoseViewModel) renderSingleResult(result *restore.DiagnoseResult) string {
|
func (m DiagnoseViewModel) renderSingleResult(result *restore.DiagnoseResult) string {
|
||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
|
|
||||||
// Status Box
|
// Validation Status
|
||||||
s.WriteString("+--[ VALIDATION STATUS ]" + strings.Repeat("-", 37) + "+\n")
|
s.WriteString(diagnoseHeaderStyle.Render("[STATUS] Validation"))
|
||||||
|
s.WriteString("\n")
|
||||||
|
|
||||||
if result.IsValid {
|
if result.IsValid {
|
||||||
s.WriteString("| " + diagnosePassStyle.Render("[OK] VALID - Archive passed all checks") + strings.Repeat(" ", 18) + "|\n")
|
s.WriteString(diagnosePassStyle.Render(" [OK] VALID - Archive passed all checks"))
|
||||||
|
s.WriteString("\n")
|
||||||
} else {
|
} else {
|
||||||
s.WriteString("| " + diagnoseFailStyle.Render("[FAIL] INVALID - Archive has problems") + strings.Repeat(" ", 19) + "|\n")
|
s.WriteString(diagnoseFailStyle.Render(" [FAIL] INVALID - Archive has problems"))
|
||||||
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.IsTruncated {
|
if result.IsTruncated {
|
||||||
s.WriteString("| " + diagnoseFailStyle.Render("[!] TRUNCATED - File is incomplete") + strings.Repeat(" ", 22) + "|\n")
|
s.WriteString(diagnoseFailStyle.Render(" [!] TRUNCATED - File is incomplete"))
|
||||||
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.IsCorrupted {
|
if result.IsCorrupted {
|
||||||
s.WriteString("| " + diagnoseFailStyle.Render("[!] CORRUPTED - File structure damaged") + strings.Repeat(" ", 18) + "|\n")
|
s.WriteString(diagnoseFailStyle.Render(" [!] CORRUPTED - File structure damaged"))
|
||||||
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString("+" + strings.Repeat("-", 60) + "+\n\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
// Details Box
|
// Details
|
||||||
if result.Details != nil {
|
if result.Details != nil {
|
||||||
s.WriteString("+--[ DETAILS ]" + strings.Repeat("-", 46) + "+\n")
|
s.WriteString(diagnoseHeaderStyle.Render("[INFO] Details"))
|
||||||
|
s.WriteString("\n")
|
||||||
|
|
||||||
if result.Details.HasPGDMPSignature {
|
if result.Details.HasPGDMPSignature {
|
||||||
s.WriteString("| " + diagnosePassStyle.Render("[+]") + " PostgreSQL custom format (PGDMP)" + strings.Repeat(" ", 20) + "|\n")
|
s.WriteString(diagnosePassStyle.Render(" [+]") + " PostgreSQL custom format (PGDMP)\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.HasSQLHeader {
|
if result.Details.HasSQLHeader {
|
||||||
s.WriteString("| " + diagnosePassStyle.Render("[+]") + " PostgreSQL SQL header found" + strings.Repeat(" ", 25) + "|\n")
|
s.WriteString(diagnosePassStyle.Render(" [+]") + " PostgreSQL SQL header found\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.GzipValid {
|
if result.Details.GzipValid {
|
||||||
s.WriteString("| " + diagnosePassStyle.Render("[+]") + " Gzip compression valid" + strings.Repeat(" ", 30) + "|\n")
|
s.WriteString(diagnosePassStyle.Render(" [+]") + " Gzip compression valid\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.PgRestoreListable {
|
if result.Details.PgRestoreListable {
|
||||||
tableInfo := fmt.Sprintf(" (%d tables)", result.Details.TableCount)
|
s.WriteString(diagnosePassStyle.Render(" [+]") + fmt.Sprintf(" pg_restore can list contents (%d tables)\n", result.Details.TableCount))
|
||||||
padding := 36 - len(tableInfo)
|
|
||||||
if padding < 0 {
|
|
||||||
padding = 0
|
|
||||||
}
|
|
||||||
s.WriteString("| " + diagnosePassStyle.Render("[+]") + " pg_restore can list contents" + tableInfo + strings.Repeat(" ", padding) + "|\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.CopyBlockCount > 0 {
|
if result.Details.CopyBlockCount > 0 {
|
||||||
blockInfo := fmt.Sprintf("%d COPY blocks found", result.Details.CopyBlockCount)
|
s.WriteString(fmt.Sprintf(" [-] %d COPY blocks found\n", result.Details.CopyBlockCount))
|
||||||
padding := 50 - len(blockInfo)
|
|
||||||
if padding < 0 {
|
|
||||||
padding = 0
|
|
||||||
}
|
|
||||||
s.WriteString("| [-] " + blockInfo + strings.Repeat(" ", padding) + "|\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.UnterminatedCopy {
|
if result.Details.UnterminatedCopy {
|
||||||
s.WriteString("| " + diagnoseFailStyle.Render("[-]") + " Unterminated COPY: " + truncate(result.Details.LastCopyTable, 30) + strings.Repeat(" ", 5) + "|\n")
|
s.WriteString(diagnoseFailStyle.Render(" [-]") + " Unterminated COPY: " + truncate(result.Details.LastCopyTable, 30) + "\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.ProperlyTerminated {
|
if result.Details.ProperlyTerminated {
|
||||||
s.WriteString("| " + diagnosePassStyle.Render("[+]") + " All COPY blocks properly terminated" + strings.Repeat(" ", 17) + "|\n")
|
s.WriteString(diagnosePassStyle.Render(" [+]") + " All COPY blocks properly terminated\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Details.ExpandedSize > 0 {
|
if result.Details.ExpandedSize > 0 {
|
||||||
sizeInfo := fmt.Sprintf("Expanded: %s (%.1fx)", formatSize(result.Details.ExpandedSize), result.Details.CompressionRatio)
|
s.WriteString(fmt.Sprintf(" [-] Expanded: %s (%.1fx)\n", formatSize(result.Details.ExpandedSize), result.Details.CompressionRatio))
|
||||||
padding := 50 - len(sizeInfo)
|
|
||||||
if padding < 0 {
|
|
||||||
padding = 0
|
|
||||||
}
|
|
||||||
s.WriteString("| [-] " + sizeInfo + strings.Repeat(" ", padding) + "|\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString("+" + strings.Repeat("-", 60) + "+\n")
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errors Box
|
// Errors
|
||||||
if len(result.Errors) > 0 {
|
if len(result.Errors) > 0 {
|
||||||
s.WriteString("\n+--[ ERRORS ]" + strings.Repeat("-", 47) + "+\n")
|
s.WriteString(diagnoseFailStyle.Render("[FAIL] Errors"))
|
||||||
|
s.WriteString("\n")
|
||||||
for i, e := range result.Errors {
|
for i, e := range result.Errors {
|
||||||
if i >= 5 {
|
if i >= 5 {
|
||||||
remaining := fmt.Sprintf("... and %d more errors", len(result.Errors)-5)
|
s.WriteString(fmt.Sprintf(" ... and %d more errors\n", len(result.Errors)-5))
|
||||||
padding := 56 - len(remaining)
|
|
||||||
s.WriteString("| " + remaining + strings.Repeat(" ", padding) + "|\n")
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
errText := truncate(e, 54)
|
s.WriteString(" " + truncate(e, 60) + "\n")
|
||||||
padding := 56 - len(errText)
|
|
||||||
if padding < 0 {
|
|
||||||
padding = 0
|
|
||||||
}
|
}
|
||||||
s.WriteString("| " + errText + strings.Repeat(" ", padding) + "|\n")
|
s.WriteString("\n")
|
||||||
}
|
|
||||||
s.WriteString("+" + strings.Repeat("-", 60) + "+\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warnings Box
|
// Warnings
|
||||||
if len(result.Warnings) > 0 {
|
if len(result.Warnings) > 0 {
|
||||||
s.WriteString("\n+--[ WARNINGS ]" + strings.Repeat("-", 45) + "+\n")
|
s.WriteString(diagnoseWarnStyle.Render("[WARN] Warnings"))
|
||||||
|
s.WriteString("\n")
|
||||||
for i, w := range result.Warnings {
|
for i, w := range result.Warnings {
|
||||||
if i >= 3 {
|
if i >= 3 {
|
||||||
remaining := fmt.Sprintf("... and %d more warnings", len(result.Warnings)-3)
|
s.WriteString(fmt.Sprintf(" ... and %d more warnings\n", len(result.Warnings)-3))
|
||||||
padding := 56 - len(remaining)
|
|
||||||
s.WriteString("| " + remaining + strings.Repeat(" ", padding) + "|\n")
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
warnText := truncate(w, 54)
|
s.WriteString(" " + truncate(w, 60) + "\n")
|
||||||
padding := 56 - len(warnText)
|
|
||||||
if padding < 0 {
|
|
||||||
padding = 0
|
|
||||||
}
|
}
|
||||||
s.WriteString("| " + warnText + strings.Repeat(" ", padding) + "|\n")
|
s.WriteString("\n")
|
||||||
}
|
|
||||||
s.WriteString("+" + strings.Repeat("-", 60) + "+\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recommendations Box
|
// Recommendations
|
||||||
if !result.IsValid {
|
if !result.IsValid {
|
||||||
s.WriteString("\n+--[ RECOMMENDATIONS ]" + strings.Repeat("-", 38) + "+\n")
|
s.WriteString(diagnoseInfoStyle.Render("[HINT] Recommendations"))
|
||||||
|
s.WriteString("\n")
|
||||||
if result.IsTruncated {
|
if result.IsTruncated {
|
||||||
s.WriteString("| 1. Re-run backup with current version (v3.42.12+) |\n")
|
s.WriteString(" 1. Re-run backup with current version (v3.42+)\n")
|
||||||
s.WriteString("| 2. Check disk space on backup server |\n")
|
s.WriteString(" 2. Check disk space on backup server\n")
|
||||||
s.WriteString("| 3. Verify network stability for remote backups |\n")
|
s.WriteString(" 3. Verify network stability for remote backups\n")
|
||||||
}
|
}
|
||||||
if result.IsCorrupted {
|
if result.IsCorrupted {
|
||||||
s.WriteString("| 1. Verify backup was transferred completely |\n")
|
s.WriteString(" 1. Verify backup was transferred completely\n")
|
||||||
s.WriteString("| 2. Try restoring from a previous backup |\n")
|
s.WriteString(" 2. Try restoring from a previous backup\n")
|
||||||
}
|
}
|
||||||
s.WriteString("+" + strings.Repeat("-", 60) + "+\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
@@ -349,10 +328,8 @@ func (m DiagnoseViewModel) renderClusterResults() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString(strings.Repeat("-", 60))
|
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(diagnoseHeaderStyle.Render(fmt.Sprintf("CLUSTER SUMMARY: %d databases\n", len(m.results))))
|
s.WriteString(diagnoseHeaderStyle.Render(fmt.Sprintf("[STATS] Cluster Summary: %d databases", len(m.results))))
|
||||||
s.WriteString(strings.Repeat("-", 60))
|
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
if invalidCount == 0 {
|
if invalidCount == 0 {
|
||||||
@@ -364,7 +341,7 @@ func (m DiagnoseViewModel) renderClusterResults() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// List all dumps with status
|
// List all dumps with status
|
||||||
s.WriteString(diagnoseHeaderStyle.Render("Database Dumps:"))
|
s.WriteString(diagnoseHeaderStyle.Render("[LIST] Database Dumps"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
// Show visible range based on cursor
|
// Show visible range based on cursor
|
||||||
@@ -413,9 +390,7 @@ func (m DiagnoseViewModel) renderClusterResults() string {
|
|||||||
if m.cursor < len(m.results) {
|
if m.cursor < len(m.results) {
|
||||||
selected := m.results[m.cursor]
|
selected := m.results[m.cursor]
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(strings.Repeat("-", 60))
|
s.WriteString(diagnoseHeaderStyle.Render("[INFO] Selected: " + selected.FileName))
|
||||||
s.WriteString("\n")
|
|
||||||
s.WriteString(diagnoseHeaderStyle.Render("Selected: " + selected.FileName))
|
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
// Show condensed details for selected
|
// Show condensed details for selected
|
||||||
|
|||||||
@@ -334,13 +334,13 @@ func (m *MenuModel) View() string {
|
|||||||
|
|
||||||
// handleSingleBackup opens database selector for single backup
|
// handleSingleBackup opens database selector for single backup
|
||||||
func (m *MenuModel) handleSingleBackup() (tea.Model, tea.Cmd) {
|
func (m *MenuModel) handleSingleBackup() (tea.Model, tea.Cmd) {
|
||||||
selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "[DB] Single Database Backup", "single")
|
selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "[SELECT] Single Database Backup", "single")
|
||||||
return selector, selector.Init()
|
return selector, selector.Init()
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleSampleBackup opens database selector for sample backup
|
// handleSampleBackup opens database selector for sample backup
|
||||||
func (m *MenuModel) handleSampleBackup() (tea.Model, tea.Cmd) {
|
func (m *MenuModel) handleSampleBackup() (tea.Model, tea.Cmd) {
|
||||||
selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "[STATS] Sample Database Backup", "sample")
|
selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "[SELECT] Sample Database Backup", "sample")
|
||||||
return selector, selector.Init()
|
return selector, selector.Init()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -356,7 +356,7 @@ func (m *MenuModel) handleClusterBackup() (tea.Model, tea.Cmd) {
|
|||||||
return executor, executor.Init()
|
return executor, executor.Init()
|
||||||
}
|
}
|
||||||
confirm := NewConfirmationModelWithAction(m.config, m.logger, m,
|
confirm := NewConfirmationModelWithAction(m.config, m.logger, m,
|
||||||
"[DB] Cluster Backup",
|
"[CHECK] Cluster Backup",
|
||||||
"This will backup ALL databases in the cluster. Continue?",
|
"This will backup ALL databases in the cluster. Continue?",
|
||||||
func() (tea.Model, tea.Cmd) {
|
func() (tea.Model, tea.Cmd) {
|
||||||
executor := NewBackupExecution(m.config, m.logger, m, m.ctx, "cluster", "", 0)
|
executor := NewBackupExecution(m.config, m.logger, m, m.ctx, "cluster", "", 0)
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
tea "github.com/charmbracelet/bubbletea"
|
tea "github.com/charmbracelet/bubbletea"
|
||||||
@@ -45,6 +46,17 @@ type RestoreExecutionModel struct {
|
|||||||
spinnerFrame int
|
spinnerFrame int
|
||||||
spinnerFrames []string
|
spinnerFrames []string
|
||||||
|
|
||||||
|
// Detailed byte progress for schollz-style display
|
||||||
|
bytesTotal int64
|
||||||
|
bytesDone int64
|
||||||
|
description string
|
||||||
|
showBytes bool // True when we have real byte progress to show
|
||||||
|
speed float64 // Rolling window speed in bytes/sec
|
||||||
|
|
||||||
|
// Database count progress (for cluster restore)
|
||||||
|
dbTotal int
|
||||||
|
dbDone int
|
||||||
|
|
||||||
// Results
|
// Results
|
||||||
done bool
|
done bool
|
||||||
cancelling bool // True when user has requested cancellation
|
cancelling bool // True when user has requested cancellation
|
||||||
@@ -101,6 +113,9 @@ type restoreProgressMsg struct {
|
|||||||
phase string
|
phase string
|
||||||
progress int
|
progress int
|
||||||
detail string
|
detail string
|
||||||
|
bytesTotal int64
|
||||||
|
bytesDone int64
|
||||||
|
description string
|
||||||
}
|
}
|
||||||
|
|
||||||
type restoreCompleteMsg struct {
|
type restoreCompleteMsg struct {
|
||||||
@@ -109,6 +124,102 @@ type restoreCompleteMsg struct {
|
|||||||
elapsed time.Duration
|
elapsed time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sharedProgressState holds progress state that can be safely accessed from callbacks
|
||||||
|
type sharedProgressState struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
bytesTotal int64
|
||||||
|
bytesDone int64
|
||||||
|
description string
|
||||||
|
hasUpdate bool
|
||||||
|
|
||||||
|
// Database count progress (for cluster restore)
|
||||||
|
dbTotal int
|
||||||
|
dbDone int
|
||||||
|
|
||||||
|
// Rolling window for speed calculation
|
||||||
|
speedSamples []restoreSpeedSample
|
||||||
|
}
|
||||||
|
|
||||||
|
type restoreSpeedSample struct {
|
||||||
|
timestamp time.Time
|
||||||
|
bytes int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Package-level shared progress state for restore operations
|
||||||
|
var (
|
||||||
|
currentRestoreProgressMu sync.Mutex
|
||||||
|
currentRestoreProgressState *sharedProgressState
|
||||||
|
)
|
||||||
|
|
||||||
|
func setCurrentRestoreProgress(state *sharedProgressState) {
|
||||||
|
currentRestoreProgressMu.Lock()
|
||||||
|
defer currentRestoreProgressMu.Unlock()
|
||||||
|
currentRestoreProgressState = state
|
||||||
|
}
|
||||||
|
|
||||||
|
func clearCurrentRestoreProgress() {
|
||||||
|
currentRestoreProgressMu.Lock()
|
||||||
|
defer currentRestoreProgressMu.Unlock()
|
||||||
|
currentRestoreProgressState = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCurrentRestoreProgress() (bytesTotal, bytesDone int64, description string, hasUpdate bool, dbTotal, dbDone int, speed float64) {
|
||||||
|
currentRestoreProgressMu.Lock()
|
||||||
|
defer currentRestoreProgressMu.Unlock()
|
||||||
|
|
||||||
|
if currentRestoreProgressState == nil {
|
||||||
|
return 0, 0, "", false, 0, 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
currentRestoreProgressState.mu.Lock()
|
||||||
|
defer currentRestoreProgressState.mu.Unlock()
|
||||||
|
|
||||||
|
// Calculate rolling window speed
|
||||||
|
speed = calculateRollingSpeed(currentRestoreProgressState.speedSamples)
|
||||||
|
|
||||||
|
return currentRestoreProgressState.bytesTotal, currentRestoreProgressState.bytesDone,
|
||||||
|
currentRestoreProgressState.description, currentRestoreProgressState.hasUpdate,
|
||||||
|
currentRestoreProgressState.dbTotal, currentRestoreProgressState.dbDone, speed
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateRollingSpeed calculates speed from recent samples (last 5 seconds)
|
||||||
|
func calculateRollingSpeed(samples []restoreSpeedSample) float64 {
|
||||||
|
if len(samples) < 2 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use samples from last 5 seconds for smoothed speed
|
||||||
|
now := time.Now()
|
||||||
|
cutoff := now.Add(-5 * time.Second)
|
||||||
|
|
||||||
|
var firstInWindow, lastInWindow *restoreSpeedSample
|
||||||
|
for i := range samples {
|
||||||
|
if samples[i].timestamp.After(cutoff) {
|
||||||
|
if firstInWindow == nil {
|
||||||
|
firstInWindow = &samples[i]
|
||||||
|
}
|
||||||
|
lastInWindow = &samples[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to first and last if window is empty
|
||||||
|
if firstInWindow == nil || lastInWindow == nil || firstInWindow == lastInWindow {
|
||||||
|
firstInWindow = &samples[0]
|
||||||
|
lastInWindow = &samples[len(samples)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
elapsed := lastInWindow.timestamp.Sub(firstInWindow.timestamp).Seconds()
|
||||||
|
if elapsed <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
bytesTransferred := lastInWindow.bytes - firstInWindow.bytes
|
||||||
|
return float64(bytesTransferred) / elapsed
|
||||||
|
}
|
||||||
|
|
||||||
|
// restoreProgressChannel allows sending progress updates from the restore goroutine
|
||||||
|
type restoreProgressChannel chan restoreProgressMsg
|
||||||
|
|
||||||
func executeRestoreWithTUIProgress(parentCtx context.Context, cfg *config.Config, log logger.Logger, archive ArchiveInfo, targetDB string, cleanFirst, createIfMissing bool, restoreType string, cleanClusterFirst bool, existingDBs []string, saveDebugLog bool) tea.Cmd {
|
func executeRestoreWithTUIProgress(parentCtx context.Context, cfg *config.Config, log logger.Logger, archive ArchiveInfo, targetDB string, cleanFirst, createIfMissing bool, restoreType string, cleanClusterFirst bool, existingDBs []string, saveDebugLog bool) tea.Cmd {
|
||||||
return func() tea.Msg {
|
return func() tea.Msg {
|
||||||
// NO TIMEOUT for restore operations - a restore takes as long as it takes
|
// NO TIMEOUT for restore operations - a restore takes as long as it takes
|
||||||
@@ -156,6 +267,48 @@ func executeRestoreWithTUIProgress(parentCtx context.Context, cfg *config.Config
|
|||||||
// STEP 2: Create restore engine with silent progress (no stdout interference with TUI)
|
// STEP 2: Create restore engine with silent progress (no stdout interference with TUI)
|
||||||
engine := restore.NewSilent(cfg, log, dbClient)
|
engine := restore.NewSilent(cfg, log, dbClient)
|
||||||
|
|
||||||
|
// Set up progress callback for detailed progress reporting
|
||||||
|
// We use a shared pointer that can be queried by the TUI ticker
|
||||||
|
progressState := &sharedProgressState{
|
||||||
|
speedSamples: make([]restoreSpeedSample, 0, 100),
|
||||||
|
}
|
||||||
|
engine.SetProgressCallback(func(current, total int64, description string) {
|
||||||
|
progressState.mu.Lock()
|
||||||
|
defer progressState.mu.Unlock()
|
||||||
|
progressState.bytesDone = current
|
||||||
|
progressState.bytesTotal = total
|
||||||
|
progressState.description = description
|
||||||
|
progressState.hasUpdate = true
|
||||||
|
|
||||||
|
// Add speed sample for rolling window calculation
|
||||||
|
progressState.speedSamples = append(progressState.speedSamples, restoreSpeedSample{
|
||||||
|
timestamp: time.Now(),
|
||||||
|
bytes: current,
|
||||||
|
})
|
||||||
|
// Keep only last 100 samples
|
||||||
|
if len(progressState.speedSamples) > 100 {
|
||||||
|
progressState.speedSamples = progressState.speedSamples[len(progressState.speedSamples)-100:]
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Set up database progress callback for cluster restore
|
||||||
|
engine.SetDatabaseProgressCallback(func(done, total int, dbName string) {
|
||||||
|
progressState.mu.Lock()
|
||||||
|
defer progressState.mu.Unlock()
|
||||||
|
progressState.dbDone = done
|
||||||
|
progressState.dbTotal = total
|
||||||
|
progressState.description = fmt.Sprintf("Restoring %s", dbName)
|
||||||
|
progressState.hasUpdate = true
|
||||||
|
// Clear byte progress when switching to db progress
|
||||||
|
progressState.bytesTotal = 0
|
||||||
|
progressState.bytesDone = 0
|
||||||
|
})
|
||||||
|
|
||||||
|
// Store progress state in a package-level variable for the ticker to access
|
||||||
|
// This is a workaround because tea messages can't be sent from callbacks
|
||||||
|
setCurrentRestoreProgress(progressState)
|
||||||
|
defer clearCurrentRestoreProgress()
|
||||||
|
|
||||||
// Enable debug logging if requested
|
// Enable debug logging if requested
|
||||||
if saveDebugLog {
|
if saveDebugLog {
|
||||||
// Generate debug log path using configured WorkDir
|
// Generate debug log path using configured WorkDir
|
||||||
@@ -165,9 +318,6 @@ func executeRestoreWithTUIProgress(parentCtx context.Context, cfg *config.Config
|
|||||||
log.Info("Debug logging enabled", "path", debugLogPath)
|
log.Info("Debug logging enabled", "path", debugLogPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up progress callback (but it won't work in goroutine - progress is already sent via logs)
|
|
||||||
// The TUI will just use spinner animation to show activity
|
|
||||||
|
|
||||||
// STEP 3: Execute restore based on type
|
// STEP 3: Execute restore based on type
|
||||||
var restoreErr error
|
var restoreErr error
|
||||||
if restoreType == "restore-cluster" {
|
if restoreType == "restore-cluster" {
|
||||||
@@ -206,7 +356,29 @@ func (m RestoreExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
m.spinnerFrame = (m.spinnerFrame + 1) % len(m.spinnerFrames)
|
m.spinnerFrame = (m.spinnerFrame + 1) % len(m.spinnerFrames)
|
||||||
m.elapsed = time.Since(m.startTime)
|
m.elapsed = time.Since(m.startTime)
|
||||||
|
|
||||||
// Update status based on elapsed time to show progress
|
// Poll shared progress state for real-time updates
|
||||||
|
bytesTotal, bytesDone, description, hasUpdate, dbTotal, dbDone, speed := getCurrentRestoreProgress()
|
||||||
|
if hasUpdate && bytesTotal > 0 {
|
||||||
|
m.bytesTotal = bytesTotal
|
||||||
|
m.bytesDone = bytesDone
|
||||||
|
m.description = description
|
||||||
|
m.showBytes = true
|
||||||
|
m.speed = speed
|
||||||
|
|
||||||
|
// Update status to reflect actual progress
|
||||||
|
m.status = description
|
||||||
|
m.phase = "Extracting"
|
||||||
|
m.progress = int((bytesDone * 100) / bytesTotal)
|
||||||
|
} else if hasUpdate && dbTotal > 0 {
|
||||||
|
// Database count progress for cluster restore
|
||||||
|
m.dbTotal = dbTotal
|
||||||
|
m.dbDone = dbDone
|
||||||
|
m.showBytes = false
|
||||||
|
m.status = fmt.Sprintf("Restoring database %d of %d...", dbDone+1, dbTotal)
|
||||||
|
m.phase = "Restore"
|
||||||
|
m.progress = int((dbDone * 100) / dbTotal)
|
||||||
|
} else {
|
||||||
|
// Fallback: Update status based on elapsed time to show progress
|
||||||
// This provides visual feedback even though we don't have real-time progress
|
// This provides visual feedback even though we don't have real-time progress
|
||||||
elapsedSec := int(m.elapsed.Seconds())
|
elapsedSec := int(m.elapsed.Seconds())
|
||||||
|
|
||||||
@@ -241,6 +413,7 @@ func (m RestoreExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
m.phase = "Restore"
|
m.phase = "Restore"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return m, restoreTickCmd()
|
return m, restoreTickCmd()
|
||||||
}
|
}
|
||||||
@@ -250,6 +423,15 @@ func (m RestoreExecutionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
m.status = msg.status
|
m.status = msg.status
|
||||||
m.phase = msg.phase
|
m.phase = msg.phase
|
||||||
m.progress = msg.progress
|
m.progress = msg.progress
|
||||||
|
|
||||||
|
// Update byte-level progress if available
|
||||||
|
if msg.bytesTotal > 0 {
|
||||||
|
m.bytesTotal = msg.bytesTotal
|
||||||
|
m.bytesDone = msg.bytesDone
|
||||||
|
m.description = msg.description
|
||||||
|
m.showBytes = true
|
||||||
|
}
|
||||||
|
|
||||||
if msg.detail != "" {
|
if msg.detail != "" {
|
||||||
m.details = append(m.details, msg.detail)
|
m.details = append(m.details, msg.detail)
|
||||||
// Keep only last 5 details
|
// Keep only last 5 details
|
||||||
@@ -356,19 +538,39 @@ func (m RestoreExecutionModel) View() string {
|
|||||||
// Show progress
|
// Show progress
|
||||||
s.WriteString(fmt.Sprintf("Phase: %s\n", m.phase))
|
s.WriteString(fmt.Sprintf("Phase: %s\n", m.phase))
|
||||||
|
|
||||||
// Show status with rotating spinner (unified indicator for all operations)
|
// Show detailed progress bar when we have byte-level information
|
||||||
|
// In this case, hide the spinner for cleaner display
|
||||||
|
if m.showBytes && m.bytesTotal > 0 {
|
||||||
|
// Status line without spinner (progress bar provides activity indication)
|
||||||
|
s.WriteString(fmt.Sprintf("Status: %s\n", m.status))
|
||||||
|
s.WriteString("\n")
|
||||||
|
|
||||||
|
// Render schollz-style progress bar with bytes, rolling speed, ETA
|
||||||
|
s.WriteString(renderDetailedProgressBarWithSpeed(m.bytesDone, m.bytesTotal, m.speed))
|
||||||
|
s.WriteString("\n\n")
|
||||||
|
} else if m.dbTotal > 0 {
|
||||||
|
// Database count progress for cluster restore
|
||||||
|
spinner := m.spinnerFrames[m.spinnerFrame]
|
||||||
|
s.WriteString(fmt.Sprintf("Status: %s %s\n", spinner, m.status))
|
||||||
|
s.WriteString("\n")
|
||||||
|
|
||||||
|
// Show database progress bar
|
||||||
|
s.WriteString(renderDatabaseProgressBar(m.dbDone, m.dbTotal))
|
||||||
|
s.WriteString("\n\n")
|
||||||
|
} else {
|
||||||
|
// Show status with rotating spinner (for phases without detailed progress)
|
||||||
spinner := m.spinnerFrames[m.spinnerFrame]
|
spinner := m.spinnerFrames[m.spinnerFrame]
|
||||||
s.WriteString(fmt.Sprintf("Status: %s %s\n", spinner, m.status))
|
s.WriteString(fmt.Sprintf("Status: %s %s\n", spinner, m.status))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
// Only show progress bar for single database restore
|
|
||||||
// Cluster restore uses spinner only (consistent with CLI behavior)
|
|
||||||
if m.restoreType == "restore-single" {
|
if m.restoreType == "restore-single" {
|
||||||
|
// Fallback to simple progress bar for single database restore
|
||||||
progressBar := renderProgressBar(m.progress)
|
progressBar := renderProgressBar(m.progress)
|
||||||
s.WriteString(progressBar)
|
s.WriteString(progressBar)
|
||||||
s.WriteString(fmt.Sprintf(" %d%%\n", m.progress))
|
s.WriteString(fmt.Sprintf(" %d%%\n", m.progress))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Elapsed time
|
// Elapsed time
|
||||||
s.WriteString(fmt.Sprintf("Elapsed: %s\n", formatDuration(m.elapsed)))
|
s.WriteString(fmt.Sprintf("Elapsed: %s\n", formatDuration(m.elapsed)))
|
||||||
@@ -390,6 +592,92 @@ func renderProgressBar(percent int) string {
|
|||||||
return successStyle.Render(bar) + infoStyle.Render(empty)
|
return successStyle.Render(bar) + infoStyle.Render(empty)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// renderDetailedProgressBar renders a schollz-style progress bar with bytes, speed, and ETA
|
||||||
|
// Uses elapsed time for speed calculation (fallback)
|
||||||
|
func renderDetailedProgressBar(done, total int64, elapsed time.Duration) string {
|
||||||
|
speed := 0.0
|
||||||
|
if elapsed.Seconds() > 0 {
|
||||||
|
speed = float64(done) / elapsed.Seconds()
|
||||||
|
}
|
||||||
|
return renderDetailedProgressBarWithSpeed(done, total, speed)
|
||||||
|
}
|
||||||
|
|
||||||
|
// renderDetailedProgressBarWithSpeed renders a schollz-style progress bar with pre-calculated rolling speed
|
||||||
|
func renderDetailedProgressBarWithSpeed(done, total int64, speed float64) string {
|
||||||
|
var s strings.Builder
|
||||||
|
|
||||||
|
// Calculate percentage
|
||||||
|
percent := 0
|
||||||
|
if total > 0 {
|
||||||
|
percent = int((done * 100) / total)
|
||||||
|
if percent > 100 {
|
||||||
|
percent = 100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render progress bar
|
||||||
|
width := 30
|
||||||
|
filled := (percent * width) / 100
|
||||||
|
barFilled := strings.Repeat("█", filled)
|
||||||
|
barEmpty := strings.Repeat("░", width-filled)
|
||||||
|
|
||||||
|
s.WriteString(successStyle.Render("["))
|
||||||
|
s.WriteString(successStyle.Render(barFilled))
|
||||||
|
s.WriteString(infoStyle.Render(barEmpty))
|
||||||
|
s.WriteString(successStyle.Render("]"))
|
||||||
|
|
||||||
|
// Percentage
|
||||||
|
s.WriteString(fmt.Sprintf(" %3d%%", percent))
|
||||||
|
|
||||||
|
// Bytes progress
|
||||||
|
s.WriteString(fmt.Sprintf(" %s / %s", FormatBytes(done), FormatBytes(total)))
|
||||||
|
|
||||||
|
// Speed display (using rolling window speed)
|
||||||
|
if speed > 0 {
|
||||||
|
s.WriteString(fmt.Sprintf(" %s/s", FormatBytes(int64(speed))))
|
||||||
|
|
||||||
|
// ETA calculation based on rolling speed
|
||||||
|
if done < total {
|
||||||
|
remaining := total - done
|
||||||
|
etaSeconds := float64(remaining) / speed
|
||||||
|
eta := time.Duration(etaSeconds) * time.Second
|
||||||
|
s.WriteString(fmt.Sprintf(" ETA: %s", FormatDurationShort(eta)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// renderDatabaseProgressBar renders a progress bar for database count (cluster restore)
|
||||||
|
func renderDatabaseProgressBar(done, total int) string {
|
||||||
|
var s strings.Builder
|
||||||
|
|
||||||
|
// Calculate percentage
|
||||||
|
percent := 0
|
||||||
|
if total > 0 {
|
||||||
|
percent = (done * 100) / total
|
||||||
|
if percent > 100 {
|
||||||
|
percent = 100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render progress bar
|
||||||
|
width := 30
|
||||||
|
filled := (percent * width) / 100
|
||||||
|
barFilled := strings.Repeat("█", filled)
|
||||||
|
barEmpty := strings.Repeat("░", width-filled)
|
||||||
|
|
||||||
|
s.WriteString(successStyle.Render("["))
|
||||||
|
s.WriteString(successStyle.Render(barFilled))
|
||||||
|
s.WriteString(infoStyle.Render(barEmpty))
|
||||||
|
s.WriteString(successStyle.Render("]"))
|
||||||
|
|
||||||
|
// Count and percentage
|
||||||
|
s.WriteString(fmt.Sprintf(" %3d%% %d / %d databases", percent, done, total))
|
||||||
|
|
||||||
|
return s.String()
|
||||||
|
}
|
||||||
|
|
||||||
// formatDuration formats duration in human readable format
|
// formatDuration formats duration in human readable format
|
||||||
func formatDuration(d time.Duration) string {
|
func formatDuration(d time.Duration) string {
|
||||||
if d < time.Minute {
|
if d < time.Minute {
|
||||||
|
|||||||
@@ -747,7 +747,7 @@ func (m SettingsModel) View() string {
|
|||||||
// Current configuration summary
|
// Current configuration summary
|
||||||
if !m.editing {
|
if !m.editing {
|
||||||
b.WriteString("\n")
|
b.WriteString("\n")
|
||||||
b.WriteString(infoStyle.Render("[LOG] Current Configuration:"))
|
b.WriteString(infoStyle.Render("[INFO] Current Configuration"))
|
||||||
b.WriteString("\n")
|
b.WriteString("\n")
|
||||||
|
|
||||||
summary := []string{
|
summary := []string{
|
||||||
|
|||||||
@@ -173,7 +173,7 @@ func (m StatusViewModel) View() string {
|
|||||||
s.WriteString(errorStyle.Render(fmt.Sprintf("[FAIL] Error: %v\n", m.err)))
|
s.WriteString(errorStyle.Render(fmt.Sprintf("[FAIL] Error: %v\n", m.err)))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
} else {
|
} else {
|
||||||
s.WriteString("Connection Status:\n")
|
s.WriteString("[CONN] Connection Status\n")
|
||||||
if m.connected {
|
if m.connected {
|
||||||
s.WriteString(successStyle.Render(" [+] Connected\n"))
|
s.WriteString(successStyle.Render(" [+] Connected\n"))
|
||||||
} else {
|
} else {
|
||||||
@@ -181,11 +181,12 @@ func (m StatusViewModel) View() string {
|
|||||||
}
|
}
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
s.WriteString(fmt.Sprintf("Database Type: %s (%s)\n", m.config.DisplayDatabaseType(), m.config.DatabaseType))
|
s.WriteString("[INFO] Server Details\n")
|
||||||
s.WriteString(fmt.Sprintf("Host: %s:%d\n", m.config.Host, m.config.Port))
|
s.WriteString(fmt.Sprintf(" Database Type: %s (%s)\n", m.config.DisplayDatabaseType(), m.config.DatabaseType))
|
||||||
s.WriteString(fmt.Sprintf("User: %s\n", m.config.User))
|
s.WriteString(fmt.Sprintf(" Host: %s:%d\n", m.config.Host, m.config.Port))
|
||||||
s.WriteString(fmt.Sprintf("Backup Directory: %s\n", m.config.BackupDir))
|
s.WriteString(fmt.Sprintf(" User: %s\n", m.config.User))
|
||||||
s.WriteString(fmt.Sprintf("Version: %s\n\n", m.dbVersion))
|
s.WriteString(fmt.Sprintf(" Backup Directory: %s\n", m.config.BackupDir))
|
||||||
|
s.WriteString(fmt.Sprintf(" Version: %s\n\n", m.dbVersion))
|
||||||
|
|
||||||
if m.dbCount > 0 {
|
if m.dbCount > 0 {
|
||||||
s.WriteString(fmt.Sprintf("Databases Found: %s\n", successStyle.Render(fmt.Sprintf("%d", m.dbCount))))
|
s.WriteString(fmt.Sprintf("Databases Found: %s\n", successStyle.Render(fmt.Sprintf("%d", m.dbCount))))
|
||||||
|
|||||||
Reference in New Issue
Block a user