feat(tui): add Work Directory setting for large archive operations
- Added WorkDir to Config for custom temp directory - TUI Settings: new 'Work Directory' option to set alternative temp location - Restore Preview: press 'w' to toggle work directory (uses backup dir as default) - Diagnose View: now uses configured WorkDir for cluster extraction - Config persistence: WorkDir saved to .dbbackup.conf This fixes diagnosis/restore failures when /tmp is too small for large archives. Use cases: servers with limited /tmp, 70GB+ archives needing 280GB+ extraction space.
This commit is contained in:
@@ -64,6 +64,9 @@ type Config struct {
|
|||||||
// Cluster parallelism
|
// Cluster parallelism
|
||||||
ClusterParallelism int // Number of concurrent databases during cluster operations (0 = sequential)
|
ClusterParallelism int // Number of concurrent databases during cluster operations (0 = sequential)
|
||||||
|
|
||||||
|
// Working directory for large operations (extraction, diagnosis)
|
||||||
|
WorkDir string // Alternative temp directory for large operations (default: system temp)
|
||||||
|
|
||||||
// Swap file management (for large backups)
|
// Swap file management (for large backups)
|
||||||
SwapFilePath string // Path to temporary swap file
|
SwapFilePath string // Path to temporary swap file
|
||||||
SwapFileSizeGB int // Size in GB (0 = disabled)
|
SwapFileSizeGB int // Size in GB (0 = disabled)
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ type LocalConfig struct {
|
|||||||
|
|
||||||
// Backup settings
|
// Backup settings
|
||||||
BackupDir string
|
BackupDir string
|
||||||
|
WorkDir string // Working directory for large operations
|
||||||
Compression int
|
Compression int
|
||||||
Jobs int
|
Jobs int
|
||||||
DumpJobs int
|
DumpJobs int
|
||||||
@@ -97,6 +98,8 @@ func LoadLocalConfig() (*LocalConfig, error) {
|
|||||||
switch key {
|
switch key {
|
||||||
case "backup_dir":
|
case "backup_dir":
|
||||||
cfg.BackupDir = value
|
cfg.BackupDir = value
|
||||||
|
case "work_dir":
|
||||||
|
cfg.WorkDir = value
|
||||||
case "compression":
|
case "compression":
|
||||||
if c, err := strconv.Atoi(value); err == nil {
|
if c, err := strconv.Atoi(value); err == nil {
|
||||||
cfg.Compression = c
|
cfg.Compression = c
|
||||||
@@ -174,6 +177,9 @@ func SaveLocalConfig(cfg *LocalConfig) error {
|
|||||||
if cfg.BackupDir != "" {
|
if cfg.BackupDir != "" {
|
||||||
sb.WriteString(fmt.Sprintf("backup_dir = %s\n", cfg.BackupDir))
|
sb.WriteString(fmt.Sprintf("backup_dir = %s\n", cfg.BackupDir))
|
||||||
}
|
}
|
||||||
|
if cfg.WorkDir != "" {
|
||||||
|
sb.WriteString(fmt.Sprintf("work_dir = %s\n", cfg.WorkDir))
|
||||||
|
}
|
||||||
if cfg.Compression != 0 {
|
if cfg.Compression != 0 {
|
||||||
sb.WriteString(fmt.Sprintf("compression = %d\n", cfg.Compression))
|
sb.WriteString(fmt.Sprintf("compression = %d\n", cfg.Compression))
|
||||||
}
|
}
|
||||||
@@ -244,6 +250,9 @@ func ApplyLocalConfig(cfg *Config, local *LocalConfig) {
|
|||||||
if local.BackupDir != "" {
|
if local.BackupDir != "" {
|
||||||
cfg.BackupDir = local.BackupDir
|
cfg.BackupDir = local.BackupDir
|
||||||
}
|
}
|
||||||
|
if local.WorkDir != "" {
|
||||||
|
cfg.WorkDir = local.WorkDir
|
||||||
|
}
|
||||||
if cfg.CompressionLevel == 6 && local.Compression != 0 {
|
if cfg.CompressionLevel == 6 && local.Compression != 0 {
|
||||||
cfg.CompressionLevel = local.Compression
|
cfg.CompressionLevel = local.Compression
|
||||||
}
|
}
|
||||||
@@ -280,6 +289,7 @@ func ConfigFromConfig(cfg *Config) *LocalConfig {
|
|||||||
Database: cfg.Database,
|
Database: cfg.Database,
|
||||||
SSLMode: cfg.SSLMode,
|
SSLMode: cfg.SSLMode,
|
||||||
BackupDir: cfg.BackupDir,
|
BackupDir: cfg.BackupDir,
|
||||||
|
WorkDir: cfg.WorkDir,
|
||||||
Compression: cfg.CompressionLevel,
|
Compression: cfg.CompressionLevel,
|
||||||
Jobs: cfg.Jobs,
|
Jobs: cfg.Jobs,
|
||||||
DumpJobs: cfg.DumpJobs,
|
DumpJobs: cfg.DumpJobs,
|
||||||
|
|||||||
@@ -88,8 +88,8 @@ func runDiagnosis(cfg *config.Config, log logger.Logger, archive ArchiveInfo) te
|
|||||||
|
|
||||||
// For cluster archives, we can do deep analysis
|
// For cluster archives, we can do deep analysis
|
||||||
if archive.Format.IsClusterBackup() {
|
if archive.Format.IsClusterBackup() {
|
||||||
// Create temp directory
|
// Create temp directory (use WorkDir if configured for large archives)
|
||||||
tempDir, err := createTempDir("dbbackup-diagnose-*")
|
tempDir, err := createTempDirIn(cfg.WorkDir, "dbbackup-diagnose-*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diagnoseCompleteMsg{err: fmt.Errorf("failed to create temp dir: %w", err)}
|
return diagnoseCompleteMsg{err: fmt.Errorf("failed to create temp dir: %w", err)}
|
||||||
}
|
}
|
||||||
@@ -445,6 +445,17 @@ func createTempDir(pattern string) (string, error) {
|
|||||||
return os.MkdirTemp("", pattern)
|
return os.MkdirTemp("", pattern)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createTempDirIn(baseDir, pattern string) (string, error) {
|
||||||
|
if baseDir == "" {
|
||||||
|
return os.MkdirTemp("", pattern)
|
||||||
|
}
|
||||||
|
// Ensure base directory exists
|
||||||
|
if err := os.MkdirAll(baseDir, 0755); err != nil {
|
||||||
|
return "", fmt.Errorf("cannot create work directory: %w", err)
|
||||||
|
}
|
||||||
|
return os.MkdirTemp(baseDir, pattern)
|
||||||
|
}
|
||||||
|
|
||||||
func removeTempDir(path string) error {
|
func removeTempDir(path string) error {
|
||||||
return os.RemoveAll(path)
|
return os.RemoveAll(path)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ type RestoreExecutionModel struct {
|
|||||||
cleanClusterFirst bool // Drop all user databases before cluster restore
|
cleanClusterFirst bool // Drop all user databases before cluster restore
|
||||||
existingDBs []string // List of databases to drop
|
existingDBs []string // List of databases to drop
|
||||||
saveDebugLog bool // Save detailed error report on failure
|
saveDebugLog bool // Save detailed error report on failure
|
||||||
|
workDir string // Custom work directory for extraction
|
||||||
|
|
||||||
// Progress tracking
|
// Progress tracking
|
||||||
status string
|
status string
|
||||||
@@ -50,7 +51,7 @@ type RestoreExecutionModel struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewRestoreExecution creates a new restore execution model
|
// NewRestoreExecution creates a new restore execution model
|
||||||
func NewRestoreExecution(cfg *config.Config, log logger.Logger, parent tea.Model, ctx context.Context, archive ArchiveInfo, targetDB string, cleanFirst, createIfMissing bool, restoreType string, cleanClusterFirst bool, existingDBs []string, saveDebugLog bool) RestoreExecutionModel {
|
func NewRestoreExecution(cfg *config.Config, log logger.Logger, parent tea.Model, ctx context.Context, archive ArchiveInfo, targetDB string, cleanFirst, createIfMissing bool, restoreType string, cleanClusterFirst bool, existingDBs []string, saveDebugLog bool, workDir string) RestoreExecutionModel {
|
||||||
return RestoreExecutionModel{
|
return RestoreExecutionModel{
|
||||||
config: cfg,
|
config: cfg,
|
||||||
logger: log,
|
logger: log,
|
||||||
@@ -64,6 +65,7 @@ func NewRestoreExecution(cfg *config.Config, log logger.Logger, parent tea.Model
|
|||||||
cleanClusterFirst: cleanClusterFirst,
|
cleanClusterFirst: cleanClusterFirst,
|
||||||
existingDBs: existingDBs,
|
existingDBs: existingDBs,
|
||||||
saveDebugLog: saveDebugLog,
|
saveDebugLog: saveDebugLog,
|
||||||
|
workDir: workDir,
|
||||||
status: "Initializing...",
|
status: "Initializing...",
|
||||||
phase: "Starting",
|
phase: "Starting",
|
||||||
startTime: time.Now(),
|
startTime: time.Now(),
|
||||||
|
|||||||
@@ -60,6 +60,7 @@ type RestorePreviewModel struct {
|
|||||||
canProceed bool
|
canProceed bool
|
||||||
message string
|
message string
|
||||||
saveDebugLog bool // Save detailed error report on failure
|
saveDebugLog bool // Save detailed error report on failure
|
||||||
|
workDir string // Custom work directory for extraction
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRestorePreview creates a new restore preview
|
// NewRestorePreview creates a new restore preview
|
||||||
@@ -81,6 +82,7 @@ func NewRestorePreview(cfg *config.Config, log logger.Logger, parent tea.Model,
|
|||||||
cleanFirst: false,
|
cleanFirst: false,
|
||||||
createIfMissing: true,
|
createIfMissing: true,
|
||||||
checking: true,
|
checking: true,
|
||||||
|
workDir: cfg.WorkDir, // Use configured work directory
|
||||||
safetyChecks: []SafetyCheck{
|
safetyChecks: []SafetyCheck{
|
||||||
{Name: "Archive integrity", Status: "pending", Critical: true},
|
{Name: "Archive integrity", Status: "pending", Critical: true},
|
||||||
{Name: "Dump validity", Status: "pending", Critical: true},
|
{Name: "Dump validity", Status: "pending", Critical: true},
|
||||||
@@ -280,6 +282,18 @@ func (m RestorePreviewModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
m.message = "Debug log: disabled"
|
m.message = "Debug log: disabled"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case "w":
|
||||||
|
// Toggle/set work directory
|
||||||
|
if m.workDir == "" {
|
||||||
|
// Set to backup directory as default alternative
|
||||||
|
m.workDir = m.config.BackupDir
|
||||||
|
m.message = infoStyle.Render(fmt.Sprintf("📁 Work directory set to: %s", m.workDir))
|
||||||
|
} else {
|
||||||
|
// Clear work directory (use system temp)
|
||||||
|
m.workDir = ""
|
||||||
|
m.message = "Work directory: using system temp"
|
||||||
|
}
|
||||||
|
|
||||||
case "enter", " ":
|
case "enter", " ":
|
||||||
if m.checking {
|
if m.checking {
|
||||||
m.message = "Please wait for safety checks to complete..."
|
m.message = "Please wait for safety checks to complete..."
|
||||||
@@ -292,7 +306,7 @@ func (m RestorePreviewModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Proceed to restore execution
|
// Proceed to restore execution
|
||||||
exec := NewRestoreExecution(m.config, m.logger, m.parent, m.ctx, m.archive, m.targetDB, m.cleanFirst, m.createIfMissing, m.mode, m.cleanClusterFirst, m.existingDBs, m.saveDebugLog)
|
exec := NewRestoreExecution(m.config, m.logger, m.parent, m.ctx, m.archive, m.targetDB, m.cleanFirst, m.createIfMissing, m.mode, m.cleanClusterFirst, m.existingDBs, m.saveDebugLog, m.workDir)
|
||||||
return exec, exec.Init()
|
return exec, exec.Init()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -430,6 +444,24 @@ func (m RestorePreviewModel) View() string {
|
|||||||
// Advanced Options
|
// Advanced Options
|
||||||
s.WriteString(archiveHeaderStyle.Render("⚙️ Advanced Options"))
|
s.WriteString(archiveHeaderStyle.Render("⚙️ Advanced Options"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
|
// Work directory option
|
||||||
|
workDirIcon := "✗"
|
||||||
|
workDirStyle := infoStyle
|
||||||
|
workDirValue := "(system temp)"
|
||||||
|
if m.workDir != "" {
|
||||||
|
workDirIcon = "✓"
|
||||||
|
workDirStyle = checkPassedStyle
|
||||||
|
workDirValue = m.workDir
|
||||||
|
}
|
||||||
|
s.WriteString(workDirStyle.Render(fmt.Sprintf(" %s Work Dir: %s (press 'w' to toggle)", workDirIcon, workDirValue)))
|
||||||
|
s.WriteString("\n")
|
||||||
|
if m.workDir == "" {
|
||||||
|
s.WriteString(infoStyle.Render(" ⚠️ Large archives need more space than /tmp may have"))
|
||||||
|
s.WriteString("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug log option
|
||||||
debugIcon := "✗"
|
debugIcon := "✗"
|
||||||
debugStyle := infoStyle
|
debugStyle := infoStyle
|
||||||
if m.saveDebugLog {
|
if m.saveDebugLog {
|
||||||
@@ -457,15 +489,15 @@ func (m RestorePreviewModel) View() string {
|
|||||||
s.WriteString(successStyle.Render("✅ Ready to restore"))
|
s.WriteString(successStyle.Render("✅ Ready to restore"))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
if m.mode == "restore-single" {
|
if m.mode == "restore-single" {
|
||||||
s.WriteString(infoStyle.Render("⌨️ t: Clean-first | c: Create | d: Debug log | Enter: Proceed | Esc: Cancel"))
|
s.WriteString(infoStyle.Render("⌨️ t: Clean-first | c: Create | w: WorkDir | d: Debug | Enter: Proceed | Esc: Cancel"))
|
||||||
} else if m.mode == "restore-cluster" {
|
} else if m.mode == "restore-cluster" {
|
||||||
if m.existingDBCount > 0 {
|
if m.existingDBCount > 0 {
|
||||||
s.WriteString(infoStyle.Render("⌨️ c: Cleanup | d: Debug log | Enter: Proceed | Esc: Cancel"))
|
s.WriteString(infoStyle.Render("⌨️ c: Cleanup | w: WorkDir | d: Debug | Enter: Proceed | Esc: Cancel"))
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(infoStyle.Render("⌨️ d: Debug log | Enter: Proceed | Esc: Cancel"))
|
s.WriteString(infoStyle.Render("⌨️ w: WorkDir | d: Debug | Enter: Proceed | Esc: Cancel"))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(infoStyle.Render("⌨️ d: Debug log | Enter: Proceed | Esc: Cancel"))
|
s.WriteString(infoStyle.Render("⌨️ w: WorkDir | d: Debug | Enter: Proceed | Esc: Cancel"))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s.WriteString(errorStyle.Render("❌ Cannot proceed - please fix errors above"))
|
s.WriteString(errorStyle.Render("❌ Cannot proceed - please fix errors above"))
|
||||||
|
|||||||
@@ -115,6 +115,26 @@ func NewSettingsModel(cfg *config.Config, log logger.Logger, parent tea.Model) S
|
|||||||
Type: "path",
|
Type: "path",
|
||||||
Description: "Directory where backup files will be stored",
|
Description: "Directory where backup files will be stored",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Key: "work_dir",
|
||||||
|
DisplayName: "Work Directory",
|
||||||
|
Value: func(c *config.Config) string {
|
||||||
|
if c.WorkDir == "" {
|
||||||
|
return "(system temp)"
|
||||||
|
}
|
||||||
|
return c.WorkDir
|
||||||
|
},
|
||||||
|
Update: func(c *config.Config, v string) error {
|
||||||
|
if v == "" || v == "(system temp)" {
|
||||||
|
c.WorkDir = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
c.WorkDir = filepath.Clean(v)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
Type: "path",
|
||||||
|
Description: "Working directory for large operations (extraction, diagnosis). Use when /tmp is too small.",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Key: "compression_level",
|
Key: "compression_level",
|
||||||
DisplayName: "Compression Level",
|
DisplayName: "Compression Level",
|
||||||
|
|||||||
Reference in New Issue
Block a user