Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| dc12a8e4b0 | |||
| f69a8e374b |
443
cmd/forecast.go
Normal file
443
cmd/forecast.go
Normal file
@ -0,0 +1,443 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"dbbackup/internal/catalog"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var forecastCmd = &cobra.Command{
|
||||
Use: "forecast [database]",
|
||||
Short: "Predict future disk space requirements",
|
||||
Long: `Analyze backup growth patterns and predict future disk space needs.
|
||||
|
||||
This command helps with:
|
||||
- Capacity planning (when will we run out of space?)
|
||||
- Budget forecasting (how much storage to provision?)
|
||||
- Growth trend analysis (is growth accelerating?)
|
||||
- Alert thresholds (when to add capacity?)
|
||||
|
||||
Uses historical backup data to calculate:
|
||||
- Average daily growth rate
|
||||
- Growth acceleration/deceleration
|
||||
- Time until space limit reached
|
||||
- Projected size at future dates
|
||||
|
||||
Examples:
|
||||
# Forecast for specific database
|
||||
dbbackup forecast mydb
|
||||
|
||||
# Forecast all databases
|
||||
dbbackup forecast --all
|
||||
|
||||
# Show projection for 90 days
|
||||
dbbackup forecast mydb --days 90
|
||||
|
||||
# Set capacity limit (alert when approaching)
|
||||
dbbackup forecast mydb --limit 100GB
|
||||
|
||||
# JSON output for automation
|
||||
dbbackup forecast mydb --format json`,
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: runForecast,
|
||||
}
|
||||
|
||||
var (
|
||||
forecastFormat string
|
||||
forecastAll bool
|
||||
forecastDays int
|
||||
forecastLimitSize string
|
||||
)
|
||||
|
||||
type ForecastResult struct {
|
||||
Database string `json:"database"`
|
||||
CurrentSize int64 `json:"current_size_bytes"`
|
||||
TotalBackups int `json:"total_backups"`
|
||||
OldestBackup time.Time `json:"oldest_backup"`
|
||||
NewestBackup time.Time `json:"newest_backup"`
|
||||
ObservationPeriod time.Duration `json:"observation_period_seconds"`
|
||||
DailyGrowthRate float64 `json:"daily_growth_bytes"`
|
||||
DailyGrowthPct float64 `json:"daily_growth_percent"`
|
||||
Projections []ForecastProjection `json:"projections"`
|
||||
TimeToLimit *time.Duration `json:"time_to_limit_seconds,omitempty"`
|
||||
SizeAtLimit *time.Time `json:"date_reaching_limit,omitempty"`
|
||||
Confidence string `json:"confidence"` // "high", "medium", "low"
|
||||
}
|
||||
|
||||
type ForecastProjection struct {
|
||||
Days int `json:"days_from_now"`
|
||||
Date time.Time `json:"date"`
|
||||
PredictedSize int64 `json:"predicted_size_bytes"`
|
||||
Confidence float64 `json:"confidence_percent"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(forecastCmd)
|
||||
|
||||
forecastCmd.Flags().StringVar(&forecastFormat, "format", "table", "Output format (table, json)")
|
||||
forecastCmd.Flags().BoolVar(&forecastAll, "all", false, "Show forecast for all databases")
|
||||
forecastCmd.Flags().IntVar(&forecastDays, "days", 90, "Days to project into future")
|
||||
forecastCmd.Flags().StringVar(&forecastLimitSize, "limit", "", "Capacity limit (e.g., '100GB', '1TB')")
|
||||
}
|
||||
|
||||
func runForecast(cmd *cobra.Command, args []string) error {
|
||||
cat, err := openCatalog()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cat.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
var forecasts []*ForecastResult
|
||||
|
||||
if forecastAll || len(args) == 0 {
|
||||
// Get all databases
|
||||
databases, err := cat.ListDatabases(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, db := range databases {
|
||||
forecast, err := calculateForecast(ctx, cat, db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if forecast != nil {
|
||||
forecasts = append(forecasts, forecast)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
database := args[0]
|
||||
forecast, err := calculateForecast(ctx, cat, database)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if forecast != nil {
|
||||
forecasts = append(forecasts, forecast)
|
||||
}
|
||||
}
|
||||
|
||||
if len(forecasts) == 0 {
|
||||
fmt.Println("No forecast data available.")
|
||||
fmt.Println("\nRun 'dbbackup catalog sync <directory>' to import backups.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse limit if provided
|
||||
var limitBytes int64
|
||||
if forecastLimitSize != "" {
|
||||
limitBytes, err = parseSize(forecastLimitSize)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid limit size: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Output results
|
||||
if forecastFormat == "json" {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(forecasts)
|
||||
}
|
||||
|
||||
// Table output
|
||||
for i, forecast := range forecasts {
|
||||
if i > 0 {
|
||||
fmt.Println()
|
||||
}
|
||||
printForecast(forecast, limitBytes)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func calculateForecast(ctx context.Context, cat *catalog.SQLiteCatalog, database string) (*ForecastResult, error) {
|
||||
// Get all backups for this database
|
||||
query := &catalog.SearchQuery{
|
||||
Database: database,
|
||||
Limit: 1000,
|
||||
OrderBy: "created_at",
|
||||
OrderDesc: false,
|
||||
}
|
||||
|
||||
entries, err := cat.Search(ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(entries) < 2 {
|
||||
return nil, nil // Need at least 2 backups for growth rate
|
||||
}
|
||||
|
||||
// Calculate metrics
|
||||
var totalSize int64
|
||||
oldest := entries[0].CreatedAt
|
||||
newest := entries[len(entries)-1].CreatedAt
|
||||
|
||||
for _, entry := range entries {
|
||||
totalSize += entry.SizeBytes
|
||||
}
|
||||
|
||||
// Calculate observation period
|
||||
observationPeriod := newest.Sub(oldest)
|
||||
if observationPeriod == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Calculate daily growth rate
|
||||
firstSize := entries[0].SizeBytes
|
||||
lastSize := entries[len(entries)-1].SizeBytes
|
||||
sizeDelta := float64(lastSize - firstSize)
|
||||
|
||||
daysObserved := observationPeriod.Hours() / 24
|
||||
dailyGrowthRate := sizeDelta / daysObserved
|
||||
|
||||
// Calculate daily growth percentage
|
||||
var dailyGrowthPct float64
|
||||
if firstSize > 0 {
|
||||
dailyGrowthPct = (dailyGrowthRate / float64(firstSize)) * 100
|
||||
}
|
||||
|
||||
// Determine confidence based on sample size and consistency
|
||||
confidence := determineConfidence(entries, dailyGrowthRate)
|
||||
|
||||
// Generate projections
|
||||
projections := make([]ForecastProjection, 0)
|
||||
projectionDates := []int{7, 30, 60, 90, 180, 365}
|
||||
|
||||
if forecastDays > 0 {
|
||||
// Use user-specified days
|
||||
projectionDates = []int{forecastDays}
|
||||
if forecastDays > 30 {
|
||||
projectionDates = []int{7, 30, forecastDays}
|
||||
}
|
||||
}
|
||||
|
||||
for _, days := range projectionDates {
|
||||
if days > 365 && forecastDays == 90 {
|
||||
continue // Skip longer projections unless explicitly requested
|
||||
}
|
||||
|
||||
predictedSize := lastSize + int64(dailyGrowthRate*float64(days))
|
||||
if predictedSize < 0 {
|
||||
predictedSize = 0
|
||||
}
|
||||
|
||||
// Confidence decreases with time
|
||||
confidencePct := calculateConfidence(days, confidence)
|
||||
|
||||
projections = append(projections, ForecastProjection{
|
||||
Days: days,
|
||||
Date: newest.Add(time.Duration(days) * 24 * time.Hour),
|
||||
PredictedSize: predictedSize,
|
||||
Confidence: confidencePct,
|
||||
})
|
||||
}
|
||||
|
||||
result := &ForecastResult{
|
||||
Database: database,
|
||||
CurrentSize: lastSize,
|
||||
TotalBackups: len(entries),
|
||||
OldestBackup: oldest,
|
||||
NewestBackup: newest,
|
||||
ObservationPeriod: observationPeriod,
|
||||
DailyGrowthRate: dailyGrowthRate,
|
||||
DailyGrowthPct: dailyGrowthPct,
|
||||
Projections: projections,
|
||||
Confidence: confidence,
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func determineConfidence(entries []*catalog.Entry, avgGrowth float64) string {
|
||||
if len(entries) < 5 {
|
||||
return "low"
|
||||
}
|
||||
if len(entries) < 15 {
|
||||
return "medium"
|
||||
}
|
||||
|
||||
// Calculate variance in growth rates
|
||||
var variance float64
|
||||
for i := 1; i < len(entries); i++ {
|
||||
timeDiff := entries[i].CreatedAt.Sub(entries[i-1].CreatedAt).Hours() / 24
|
||||
if timeDiff == 0 {
|
||||
continue
|
||||
}
|
||||
sizeDiff := float64(entries[i].SizeBytes - entries[i-1].SizeBytes)
|
||||
growthRate := sizeDiff / timeDiff
|
||||
variance += math.Pow(growthRate-avgGrowth, 2)
|
||||
}
|
||||
variance /= float64(len(entries) - 1)
|
||||
stdDev := math.Sqrt(variance)
|
||||
|
||||
// If standard deviation is more than 50% of average growth, confidence is low
|
||||
if stdDev > math.Abs(avgGrowth)*0.5 {
|
||||
return "medium"
|
||||
}
|
||||
|
||||
return "high"
|
||||
}
|
||||
|
||||
func calculateConfidence(daysAhead int, baseConfidence string) float64 {
|
||||
var base float64
|
||||
switch baseConfidence {
|
||||
case "high":
|
||||
base = 95.0
|
||||
case "medium":
|
||||
base = 75.0
|
||||
case "low":
|
||||
base = 50.0
|
||||
}
|
||||
|
||||
// Decay confidence over time (10% per 30 days)
|
||||
decay := float64(daysAhead) / 30.0 * 10.0
|
||||
confidence := base - decay
|
||||
|
||||
if confidence < 30 {
|
||||
confidence = 30
|
||||
}
|
||||
return confidence
|
||||
}
|
||||
|
||||
func printForecast(f *ForecastResult, limitBytes int64) {
|
||||
fmt.Printf("[FORECAST] %s\n", f.Database)
|
||||
fmt.Println(strings.Repeat("=", 60))
|
||||
|
||||
fmt.Printf("\n[CURRENT STATE]\n")
|
||||
fmt.Printf(" Size: %s\n", catalog.FormatSize(f.CurrentSize))
|
||||
fmt.Printf(" Backups: %d backups\n", f.TotalBackups)
|
||||
fmt.Printf(" Observed: %s (%.0f days)\n",
|
||||
formatForecastDuration(f.ObservationPeriod),
|
||||
f.ObservationPeriod.Hours()/24)
|
||||
|
||||
fmt.Printf("\n[GROWTH RATE]\n")
|
||||
if f.DailyGrowthRate > 0 {
|
||||
fmt.Printf(" Daily: +%s/day (%.2f%%/day)\n",
|
||||
catalog.FormatSize(int64(f.DailyGrowthRate)), f.DailyGrowthPct)
|
||||
fmt.Printf(" Weekly: +%s/week\n", catalog.FormatSize(int64(f.DailyGrowthRate*7)))
|
||||
fmt.Printf(" Monthly: +%s/month\n", catalog.FormatSize(int64(f.DailyGrowthRate*30)))
|
||||
fmt.Printf(" Annual: +%s/year\n", catalog.FormatSize(int64(f.DailyGrowthRate*365)))
|
||||
} else if f.DailyGrowthRate < 0 {
|
||||
fmt.Printf(" Daily: %s/day (shrinking)\n", catalog.FormatSize(int64(f.DailyGrowthRate)))
|
||||
} else {
|
||||
fmt.Printf(" Daily: No growth detected\n")
|
||||
}
|
||||
fmt.Printf(" Confidence: %s (%d samples)\n", f.Confidence, f.TotalBackups)
|
||||
|
||||
if len(f.Projections) > 0 {
|
||||
fmt.Printf("\n[PROJECTIONS]\n")
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
fmt.Fprintf(w, " Days\tDate\tPredicted Size\tConfidence\n")
|
||||
fmt.Fprintf(w, " ----\t----\t--------------\t----------\n")
|
||||
|
||||
for _, proj := range f.Projections {
|
||||
fmt.Fprintf(w, " %d\t%s\t%s\t%.0f%%\n",
|
||||
proj.Days,
|
||||
proj.Date.Format("2006-01-02"),
|
||||
catalog.FormatSize(proj.PredictedSize),
|
||||
proj.Confidence)
|
||||
}
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
// Check against limit
|
||||
if limitBytes > 0 {
|
||||
fmt.Printf("\n[CAPACITY LIMIT]\n")
|
||||
fmt.Printf(" Limit: %s\n", catalog.FormatSize(limitBytes))
|
||||
|
||||
currentPct := float64(f.CurrentSize) / float64(limitBytes) * 100
|
||||
fmt.Printf(" Current: %.1f%% used\n", currentPct)
|
||||
|
||||
if f.CurrentSize >= limitBytes {
|
||||
fmt.Printf(" Status: [WARN] LIMIT EXCEEDED\n")
|
||||
} else if currentPct >= 80 {
|
||||
fmt.Printf(" Status: [WARN] Approaching limit\n")
|
||||
} else {
|
||||
fmt.Printf(" Status: [OK] Within limit\n")
|
||||
}
|
||||
|
||||
// Calculate when we'll hit the limit
|
||||
if f.DailyGrowthRate > 0 {
|
||||
remaining := limitBytes - f.CurrentSize
|
||||
daysToLimit := float64(remaining) / f.DailyGrowthRate
|
||||
|
||||
if daysToLimit > 0 && daysToLimit < 1000 {
|
||||
dateAtLimit := f.NewestBackup.Add(time.Duration(daysToLimit*24) * time.Hour)
|
||||
fmt.Printf(" Estimated: Limit reached in %.0f days (%s)\n",
|
||||
daysToLimit, dateAtLimit.Format("2006-01-02"))
|
||||
|
||||
if daysToLimit < 30 {
|
||||
fmt.Printf(" Alert: [CRITICAL] Less than 30 days remaining!\n")
|
||||
} else if daysToLimit < 90 {
|
||||
fmt.Printf(" Alert: [WARN] Less than 90 days remaining\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func formatForecastDuration(d time.Duration) string {
|
||||
hours := d.Hours()
|
||||
if hours < 24 {
|
||||
return fmt.Sprintf("%.1f hours", hours)
|
||||
}
|
||||
days := hours / 24
|
||||
if days < 7 {
|
||||
return fmt.Sprintf("%.1f days", days)
|
||||
}
|
||||
weeks := days / 7
|
||||
if weeks < 4 {
|
||||
return fmt.Sprintf("%.1f weeks", weeks)
|
||||
}
|
||||
months := days / 30
|
||||
if months < 12 {
|
||||
return fmt.Sprintf("%.1f months", months)
|
||||
}
|
||||
years := days / 365
|
||||
return fmt.Sprintf("%.1f years", years)
|
||||
}
|
||||
|
||||
func parseSize(s string) (int64, error) {
|
||||
// Simple size parser (supports KB, MB, GB, TB)
|
||||
s = strings.ToUpper(strings.TrimSpace(s))
|
||||
|
||||
var multiplier int64 = 1
|
||||
var numStr string
|
||||
|
||||
if strings.HasSuffix(s, "TB") {
|
||||
multiplier = 1024 * 1024 * 1024 * 1024
|
||||
numStr = strings.TrimSuffix(s, "TB")
|
||||
} else if strings.HasSuffix(s, "GB") {
|
||||
multiplier = 1024 * 1024 * 1024
|
||||
numStr = strings.TrimSuffix(s, "GB")
|
||||
} else if strings.HasSuffix(s, "MB") {
|
||||
multiplier = 1024 * 1024
|
||||
numStr = strings.TrimSuffix(s, "MB")
|
||||
} else if strings.HasSuffix(s, "KB") {
|
||||
multiplier = 1024
|
||||
numStr = strings.TrimSuffix(s, "KB")
|
||||
} else {
|
||||
numStr = s
|
||||
}
|
||||
|
||||
var num float64
|
||||
_, err := fmt.Sscanf(numStr, "%f", &num)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid size format: %s", s)
|
||||
}
|
||||
|
||||
return int64(num * float64(multiplier)), nil
|
||||
}
|
||||
540
cmd/validate.go
Normal file
540
cmd/validate.go
Normal file
@ -0,0 +1,540 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"dbbackup/internal/config"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var validateCmd = &cobra.Command{
|
||||
Use: "validate",
|
||||
Short: "Validate configuration and environment",
|
||||
Long: `Validate dbbackup configuration file and runtime environment.
|
||||
|
||||
This command performs comprehensive validation:
|
||||
- Configuration file syntax and structure
|
||||
- Database connection parameters
|
||||
- Directory paths and permissions
|
||||
- External tool availability (pg_dump, mysqldump)
|
||||
- Cloud storage credentials (if configured)
|
||||
- Encryption setup (if enabled)
|
||||
- Resource limits and system requirements
|
||||
- Port accessibility
|
||||
|
||||
Helps identify configuration issues before running backups.
|
||||
|
||||
Examples:
|
||||
# Validate default config (.dbbackup.conf)
|
||||
dbbackup validate
|
||||
|
||||
# Validate specific config file
|
||||
dbbackup validate --config /etc/dbbackup/prod.conf
|
||||
|
||||
# Quick validation (skip connectivity tests)
|
||||
dbbackup validate --quick
|
||||
|
||||
# JSON output for automation
|
||||
dbbackup validate --format json`,
|
||||
RunE: runValidate,
|
||||
}
|
||||
|
||||
var (
|
||||
validateFormat string
|
||||
validateQuick bool
|
||||
)
|
||||
|
||||
type ValidationResult struct {
|
||||
Valid bool `json:"valid"`
|
||||
Issues []ValidationIssue `json:"issues"`
|
||||
Warnings []ValidationIssue `json:"warnings"`
|
||||
Checks []ValidationCheck `json:"checks"`
|
||||
Summary string `json:"summary"`
|
||||
}
|
||||
|
||||
type ValidationIssue struct {
|
||||
Category string `json:"category"`
|
||||
Description string `json:"description"`
|
||||
Suggestion string `json:"suggestion,omitempty"`
|
||||
}
|
||||
|
||||
type ValidationCheck struct {
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"` // "pass", "warn", "fail"
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(validateCmd)
|
||||
|
||||
validateCmd.Flags().StringVar(&validateFormat, "format", "table", "Output format (table, json)")
|
||||
validateCmd.Flags().BoolVar(&validateQuick, "quick", false, "Quick validation (skip connectivity tests)")
|
||||
}
|
||||
|
||||
func runValidate(cmd *cobra.Command, args []string) error {
|
||||
result := &ValidationResult{
|
||||
Valid: true,
|
||||
Issues: []ValidationIssue{},
|
||||
Warnings: []ValidationIssue{},
|
||||
Checks: []ValidationCheck{},
|
||||
}
|
||||
|
||||
// Validate configuration file
|
||||
validateConfigFile(cfg, result)
|
||||
|
||||
// Validate database settings
|
||||
validateDatabase(cfg, result)
|
||||
|
||||
// Validate paths
|
||||
validatePaths(cfg, result)
|
||||
|
||||
// Validate external tools
|
||||
validateTools(cfg, result)
|
||||
|
||||
// Validate cloud storage (if enabled)
|
||||
if cfg.CloudEnabled {
|
||||
validateCloud(cfg, result)
|
||||
}
|
||||
|
||||
// Validate encryption (if enabled)
|
||||
if cfg.PITREnabled && cfg.WALEncryption {
|
||||
validateEncryption(cfg, result)
|
||||
}
|
||||
|
||||
// Validate resource limits
|
||||
validateResources(cfg, result)
|
||||
|
||||
// Connectivity tests (unless --quick)
|
||||
if !validateQuick {
|
||||
validateConnectivity(cfg, result)
|
||||
}
|
||||
|
||||
// Determine overall validity
|
||||
result.Valid = len(result.Issues) == 0
|
||||
|
||||
// Generate summary
|
||||
if result.Valid {
|
||||
if len(result.Warnings) > 0 {
|
||||
result.Summary = fmt.Sprintf("Configuration valid with %d warning(s)", len(result.Warnings))
|
||||
} else {
|
||||
result.Summary = "Configuration valid - all checks passed"
|
||||
}
|
||||
} else {
|
||||
result.Summary = fmt.Sprintf("Configuration invalid - %d issue(s) found", len(result.Issues))
|
||||
}
|
||||
|
||||
// Output results
|
||||
if validateFormat == "json" {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(result)
|
||||
}
|
||||
|
||||
printValidationResult(result)
|
||||
|
||||
if !result.Valid {
|
||||
return fmt.Errorf("validation failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateConfigFile(cfg *config.Config, result *ValidationResult) {
|
||||
check := ValidationCheck{Name: "Configuration File"}
|
||||
|
||||
if cfg.ConfigPath == "" {
|
||||
check.Status = "warn"
|
||||
check.Message = "No config file specified (using defaults)"
|
||||
result.Warnings = append(result.Warnings, ValidationIssue{
|
||||
Category: "config",
|
||||
Description: "No configuration file found",
|
||||
Suggestion: "Run 'dbbackup backup' to create .dbbackup.conf",
|
||||
})
|
||||
} else {
|
||||
if _, err := os.Stat(cfg.ConfigPath); err != nil {
|
||||
check.Status = "warn"
|
||||
check.Message = "Config file not found"
|
||||
result.Warnings = append(result.Warnings, ValidationIssue{
|
||||
Category: "config",
|
||||
Description: fmt.Sprintf("Config file not accessible: %s", cfg.ConfigPath),
|
||||
Suggestion: "Check file path and permissions",
|
||||
})
|
||||
} else {
|
||||
check.Status = "pass"
|
||||
check.Message = fmt.Sprintf("Loaded from %s", cfg.ConfigPath)
|
||||
}
|
||||
}
|
||||
|
||||
result.Checks = append(result.Checks, check)
|
||||
}
|
||||
|
||||
func validateDatabase(cfg *config.Config, result *ValidationResult) {
|
||||
// Database type
|
||||
check := ValidationCheck{Name: "Database Type"}
|
||||
if cfg.DatabaseType != "postgres" && cfg.DatabaseType != "mysql" && cfg.DatabaseType != "mariadb" {
|
||||
check.Status = "fail"
|
||||
check.Message = fmt.Sprintf("Invalid: %s", cfg.DatabaseType)
|
||||
result.Issues = append(result.Issues, ValidationIssue{
|
||||
Category: "database",
|
||||
Description: fmt.Sprintf("Invalid database type: %s", cfg.DatabaseType),
|
||||
Suggestion: "Use 'postgres', 'mysql', or 'mariadb'",
|
||||
})
|
||||
} else {
|
||||
check.Status = "pass"
|
||||
check.Message = cfg.DatabaseType
|
||||
}
|
||||
result.Checks = append(result.Checks, check)
|
||||
|
||||
// Host
|
||||
check = ValidationCheck{Name: "Database Host"}
|
||||
if cfg.Host == "" {
|
||||
check.Status = "fail"
|
||||
check.Message = "Not configured"
|
||||
result.Issues = append(result.Issues, ValidationIssue{
|
||||
Category: "database",
|
||||
Description: "Database host not specified",
|
||||
Suggestion: "Set --host flag or host in config file",
|
||||
})
|
||||
} else {
|
||||
check.Status = "pass"
|
||||
check.Message = cfg.Host
|
||||
}
|
||||
result.Checks = append(result.Checks, check)
|
||||
|
||||
// Port
|
||||
check = ValidationCheck{Name: "Database Port"}
|
||||
if cfg.Port <= 0 || cfg.Port > 65535 {
|
||||
check.Status = "fail"
|
||||
check.Message = fmt.Sprintf("Invalid: %d", cfg.Port)
|
||||
result.Issues = append(result.Issues, ValidationIssue{
|
||||
Category: "database",
|
||||
Description: fmt.Sprintf("Invalid port number: %d", cfg.Port),
|
||||
Suggestion: "Use valid port (1-65535)",
|
||||
})
|
||||
} else {
|
||||
check.Status = "pass"
|
||||
check.Message = strconv.Itoa(cfg.Port)
|
||||
}
|
||||
result.Checks = append(result.Checks, check)
|
||||
|
||||
// User
|
||||
check = ValidationCheck{Name: "Database User"}
|
||||
if cfg.User == "" {
|
||||
check.Status = "warn"
|
||||
check.Message = "Not configured (using current user)"
|
||||
result.Warnings = append(result.Warnings, ValidationIssue{
|
||||
Category: "database",
|
||||
Description: "Database user not specified",
|
||||
Suggestion: "Set --user flag or user in config file",
|
||||
})
|
||||
} else {
|
||||
check.Status = "pass"
|
||||
check.Message = cfg.User
|
||||
}
|
||||
result.Checks = append(result.Checks, check)
|
||||
}
|
||||
|
||||
func validatePaths(cfg *config.Config, result *ValidationResult) {
|
||||
// Backup directory
|
||||
check := ValidationCheck{Name: "Backup Directory"}
|
||||
if cfg.BackupDir == "" {
|
||||
check.Status = "fail"
|
||||
check.Message = "Not configured"
|
||||
result.Issues = append(result.Issues, ValidationIssue{
|
||||
Category: "paths",
|
||||
Description: "Backup directory not specified",
|
||||
Suggestion: "Set --backup-dir flag or backup_dir in config",
|
||||
})
|
||||
} else {
|
||||
info, err := os.Stat(cfg.BackupDir)
|
||||
if err != nil {
|
||||
check.Status = "warn"
|
||||
check.Message = "Does not exist (will be created)"
|
||||
result.Warnings = append(result.Warnings, ValidationIssue{
|
||||
Category: "paths",
|
||||
Description: fmt.Sprintf("Backup directory does not exist: %s", cfg.BackupDir),
|
||||
Suggestion: "Directory will be created automatically",
|
||||
})
|
||||
} else if !info.IsDir() {
|
||||
check.Status = "fail"
|
||||
check.Message = "Not a directory"
|
||||
result.Issues = append(result.Issues, ValidationIssue{
|
||||
Category: "paths",
|
||||
Description: fmt.Sprintf("Backup path is not a directory: %s", cfg.BackupDir),
|
||||
Suggestion: "Specify a valid directory path",
|
||||
})
|
||||
} else {
|
||||
// Check write permissions
|
||||
testFile := filepath.Join(cfg.BackupDir, ".dbbackup-test")
|
||||
if err := os.WriteFile(testFile, []byte("test"), 0644); err != nil {
|
||||
check.Status = "fail"
|
||||
check.Message = "Not writable"
|
||||
result.Issues = append(result.Issues, ValidationIssue{
|
||||
Category: "paths",
|
||||
Description: fmt.Sprintf("Cannot write to backup directory: %s", cfg.BackupDir),
|
||||
Suggestion: "Check directory permissions",
|
||||
})
|
||||
} else {
|
||||
os.Remove(testFile)
|
||||
check.Status = "pass"
|
||||
check.Message = cfg.BackupDir
|
||||
}
|
||||
}
|
||||
}
|
||||
result.Checks = append(result.Checks, check)
|
||||
|
||||
// WAL archive directory (if PITR enabled)
|
||||
if cfg.PITREnabled {
|
||||
check = ValidationCheck{Name: "WAL Archive Directory"}
|
||||
if cfg.WALArchiveDir == "" {
|
||||
check.Status = "warn"
|
||||
check.Message = "Not configured"
|
||||
result.Warnings = append(result.Warnings, ValidationIssue{
|
||||
Category: "pitr",
|
||||
Description: "PITR enabled but WAL archive directory not set",
|
||||
Suggestion: "Set --wal-archive-dir for PITR functionality",
|
||||
})
|
||||
} else {
|
||||
check.Status = "pass"
|
||||
check.Message = cfg.WALArchiveDir
|
||||
}
|
||||
result.Checks = append(result.Checks, check)
|
||||
}
|
||||
}
|
||||
|
||||
func validateTools(cfg *config.Config, result *ValidationResult) {
|
||||
// Skip if using native engine
|
||||
if cfg.UseNativeEngine {
|
||||
check := ValidationCheck{
|
||||
Name: "External Tools",
|
||||
Status: "pass",
|
||||
Message: "Using native Go engine (no external tools required)",
|
||||
}
|
||||
result.Checks = append(result.Checks, check)
|
||||
return
|
||||
}
|
||||
|
||||
// Check for database tools
|
||||
var requiredTools []string
|
||||
if cfg.DatabaseType == "postgres" {
|
||||
requiredTools = []string{"pg_dump", "pg_restore", "psql"}
|
||||
} else if cfg.DatabaseType == "mysql" || cfg.DatabaseType == "mariadb" {
|
||||
requiredTools = []string{"mysqldump", "mysql"}
|
||||
}
|
||||
|
||||
for _, tool := range requiredTools {
|
||||
check := ValidationCheck{Name: fmt.Sprintf("Tool: %s", tool)}
|
||||
path, err := exec.LookPath(tool)
|
||||
if err != nil {
|
||||
check.Status = "fail"
|
||||
check.Message = "Not found in PATH"
|
||||
result.Issues = append(result.Issues, ValidationIssue{
|
||||
Category: "tools",
|
||||
Description: fmt.Sprintf("Required tool not found: %s", tool),
|
||||
Suggestion: fmt.Sprintf("Install %s or use --native flag", tool),
|
||||
})
|
||||
} else {
|
||||
check.Status = "pass"
|
||||
check.Message = path
|
||||
}
|
||||
result.Checks = append(result.Checks, check)
|
||||
}
|
||||
}
|
||||
|
||||
func validateCloud(cfg *config.Config, result *ValidationResult) {
|
||||
check := ValidationCheck{Name: "Cloud Storage"}
|
||||
|
||||
if cfg.CloudProvider == "" {
|
||||
check.Status = "fail"
|
||||
check.Message = "Provider not configured"
|
||||
result.Issues = append(result.Issues, ValidationIssue{
|
||||
Category: "cloud",
|
||||
Description: "Cloud enabled but provider not specified",
|
||||
Suggestion: "Set --cloud-provider (s3, gcs, azure, minio, b2)",
|
||||
})
|
||||
} else {
|
||||
check.Status = "pass"
|
||||
check.Message = cfg.CloudProvider
|
||||
}
|
||||
result.Checks = append(result.Checks, check)
|
||||
|
||||
// Bucket
|
||||
check = ValidationCheck{Name: "Cloud Bucket"}
|
||||
if cfg.CloudBucket == "" {
|
||||
check.Status = "fail"
|
||||
check.Message = "Not configured"
|
||||
result.Issues = append(result.Issues, ValidationIssue{
|
||||
Category: "cloud",
|
||||
Description: "Cloud bucket/container not specified",
|
||||
Suggestion: "Set --cloud-bucket",
|
||||
})
|
||||
} else {
|
||||
check.Status = "pass"
|
||||
check.Message = cfg.CloudBucket
|
||||
}
|
||||
result.Checks = append(result.Checks, check)
|
||||
|
||||
// Credentials
|
||||
check = ValidationCheck{Name: "Cloud Credentials"}
|
||||
if cfg.CloudAccessKey == "" || cfg.CloudSecretKey == "" {
|
||||
check.Status = "warn"
|
||||
check.Message = "Credentials not in config (may use env vars)"
|
||||
result.Warnings = append(result.Warnings, ValidationIssue{
|
||||
Category: "cloud",
|
||||
Description: "Cloud credentials not in config file",
|
||||
Suggestion: "Ensure AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY or similar env vars are set",
|
||||
})
|
||||
} else {
|
||||
check.Status = "pass"
|
||||
check.Message = "Configured"
|
||||
}
|
||||
result.Checks = append(result.Checks, check)
|
||||
}
|
||||
|
||||
func validateEncryption(cfg *config.Config, result *ValidationResult) {
|
||||
check := ValidationCheck{Name: "Encryption"}
|
||||
|
||||
// Check for openssl
|
||||
if _, err := exec.LookPath("openssl"); err != nil {
|
||||
check.Status = "fail"
|
||||
check.Message = "openssl not found"
|
||||
result.Issues = append(result.Issues, ValidationIssue{
|
||||
Category: "encryption",
|
||||
Description: "Encryption enabled but openssl not available",
|
||||
Suggestion: "Install openssl or disable WAL encryption",
|
||||
})
|
||||
} else {
|
||||
check.Status = "pass"
|
||||
check.Message = "openssl available"
|
||||
}
|
||||
|
||||
result.Checks = append(result.Checks, check)
|
||||
}
|
||||
|
||||
func validateResources(cfg *config.Config, result *ValidationResult) {
|
||||
// CPU cores
|
||||
check := ValidationCheck{Name: "CPU Cores"}
|
||||
if cfg.MaxCores < 1 {
|
||||
check.Status = "fail"
|
||||
check.Message = "Invalid core count"
|
||||
result.Issues = append(result.Issues, ValidationIssue{
|
||||
Category: "resources",
|
||||
Description: "Invalid max cores setting",
|
||||
Suggestion: "Set --max-cores to positive value",
|
||||
})
|
||||
} else {
|
||||
check.Status = "pass"
|
||||
check.Message = fmt.Sprintf("%d cores", cfg.MaxCores)
|
||||
}
|
||||
result.Checks = append(result.Checks, check)
|
||||
|
||||
// Jobs
|
||||
check = ValidationCheck{Name: "Parallel Jobs"}
|
||||
if cfg.Jobs < 1 {
|
||||
check.Status = "fail"
|
||||
check.Message = "Invalid job count"
|
||||
result.Issues = append(result.Issues, ValidationIssue{
|
||||
Category: "resources",
|
||||
Description: "Invalid jobs setting",
|
||||
Suggestion: "Set --jobs to positive value",
|
||||
})
|
||||
} else if cfg.Jobs > cfg.MaxCores*2 {
|
||||
check.Status = "warn"
|
||||
check.Message = fmt.Sprintf("%d jobs (high)", cfg.Jobs)
|
||||
result.Warnings = append(result.Warnings, ValidationIssue{
|
||||
Category: "resources",
|
||||
Description: "Jobs count higher than CPU cores",
|
||||
Suggestion: "Consider reducing --jobs for better performance",
|
||||
})
|
||||
} else {
|
||||
check.Status = "pass"
|
||||
check.Message = fmt.Sprintf("%d jobs", cfg.Jobs)
|
||||
}
|
||||
result.Checks = append(result.Checks, check)
|
||||
}
|
||||
|
||||
func validateConnectivity(cfg *config.Config, result *ValidationResult) {
|
||||
check := ValidationCheck{Name: "Database Connectivity"}
|
||||
|
||||
// Try to connect to database port
|
||||
address := net.JoinHostPort(cfg.Host, strconv.Itoa(cfg.Port))
|
||||
conn, err := net.DialTimeout("tcp", address, 5*1000000000) // 5 seconds
|
||||
if err != nil {
|
||||
check.Status = "fail"
|
||||
check.Message = fmt.Sprintf("Cannot connect to %s", address)
|
||||
result.Issues = append(result.Issues, ValidationIssue{
|
||||
Category: "connectivity",
|
||||
Description: fmt.Sprintf("Cannot connect to database: %v", err),
|
||||
Suggestion: "Check host, port, and network connectivity",
|
||||
})
|
||||
} else {
|
||||
conn.Close()
|
||||
check.Status = "pass"
|
||||
check.Message = fmt.Sprintf("Connected to %s", address)
|
||||
}
|
||||
|
||||
result.Checks = append(result.Checks, check)
|
||||
}
|
||||
|
||||
func printValidationResult(result *ValidationResult) {
|
||||
fmt.Println("\n[VALIDATION REPORT]")
|
||||
fmt.Println(strings.Repeat("=", 60))
|
||||
|
||||
// Print checks
|
||||
fmt.Println("\n[CHECKS]")
|
||||
for _, check := range result.Checks {
|
||||
var status string
|
||||
switch check.Status {
|
||||
case "pass":
|
||||
status = "[PASS]"
|
||||
case "warn":
|
||||
status = "[WARN]"
|
||||
case "fail":
|
||||
status = "[FAIL]"
|
||||
}
|
||||
|
||||
fmt.Printf(" %-25s %s", check.Name+":", status)
|
||||
if check.Message != "" {
|
||||
fmt.Printf(" %s", check.Message)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Print issues
|
||||
if len(result.Issues) > 0 {
|
||||
fmt.Println("\n[ISSUES]")
|
||||
for i, issue := range result.Issues {
|
||||
fmt.Printf(" %d. [%s] %s\n", i+1, strings.ToUpper(issue.Category), issue.Description)
|
||||
if issue.Suggestion != "" {
|
||||
fmt.Printf(" → %s\n", issue.Suggestion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print warnings
|
||||
if len(result.Warnings) > 0 {
|
||||
fmt.Println("\n[WARNINGS]")
|
||||
for i, warning := range result.Warnings {
|
||||
fmt.Printf(" %d. [%s] %s\n", i+1, strings.ToUpper(warning.Category), warning.Description)
|
||||
if warning.Suggestion != "" {
|
||||
fmt.Printf(" → %s\n", warning.Suggestion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print summary
|
||||
fmt.Println("\n" + strings.Repeat("=", 60))
|
||||
if result.Valid {
|
||||
fmt.Printf("[OK] %s\n\n", result.Summary)
|
||||
} else {
|
||||
fmt.Printf("[FAIL] %s\n\n", result.Summary)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user