Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b2e0c739e0 | |||
| ad23abdf4e | |||
| 390b830976 | |||
| 7e53950967 | |||
| 59d2094241 | |||
| b1f8c6d646 | |||
| b05c2be19d | |||
| ec33959e3e | |||
| 92402f0fdb | |||
| 682510d1bc |
@@ -56,7 +56,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Install and run golangci-lint
|
- name: Install and run golangci-lint
|
||||||
run: |
|
run: |
|
||||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.62.2
|
go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.8.0
|
||||||
golangci-lint run --timeout=5m ./...
|
golangci-lint run --timeout=5m ./...
|
||||||
|
|
||||||
build-and-release:
|
build-and-release:
|
||||||
|
|||||||
@@ -1,16 +1,16 @@
|
|||||||
# golangci-lint configuration - relaxed for existing codebase
|
# golangci-lint configuration - relaxed for existing codebase
|
||||||
|
version: "2"
|
||||||
|
|
||||||
run:
|
run:
|
||||||
timeout: 5m
|
timeout: 5m
|
||||||
tests: false
|
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
disable-all: true
|
default: none
|
||||||
enable:
|
enable:
|
||||||
# Only essential linters that catch real bugs
|
# Only essential linters that catch real bugs
|
||||||
- govet
|
- govet
|
||||||
- ineffassign
|
|
||||||
|
|
||||||
linters-settings:
|
settings:
|
||||||
govet:
|
govet:
|
||||||
disable:
|
disable:
|
||||||
- fieldalignment
|
- fieldalignment
|
||||||
|
|||||||
21
SYSTEMD.md
21
SYSTEMD.md
@@ -116,8 +116,9 @@ sudo chmod 755 /usr/local/bin/dbbackup
|
|||||||
### Step 2: Create Configuration
|
### Step 2: Create Configuration
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Main configuration
|
# Main configuration in working directory (where service runs from)
|
||||||
sudo tee /etc/dbbackup/dbbackup.conf << 'EOF'
|
# dbbackup reads .dbbackup.conf from WorkingDirectory
|
||||||
|
sudo tee /var/lib/dbbackup/.dbbackup.conf << 'EOF'
|
||||||
# DBBackup Configuration
|
# DBBackup Configuration
|
||||||
db-type=postgres
|
db-type=postgres
|
||||||
host=localhost
|
host=localhost
|
||||||
@@ -128,6 +129,8 @@ compression=6
|
|||||||
retention-days=30
|
retention-days=30
|
||||||
min-backups=7
|
min-backups=7
|
||||||
EOF
|
EOF
|
||||||
|
sudo chown dbbackup:dbbackup /var/lib/dbbackup/.dbbackup.conf
|
||||||
|
sudo chmod 600 /var/lib/dbbackup/.dbbackup.conf
|
||||||
|
|
||||||
# Instance credentials (secure permissions)
|
# Instance credentials (secure permissions)
|
||||||
sudo tee /etc/dbbackup/env.d/cluster.conf << 'EOF'
|
sudo tee /etc/dbbackup/env.d/cluster.conf << 'EOF'
|
||||||
@@ -157,13 +160,15 @@ Group=dbbackup
|
|||||||
# Load configuration
|
# Load configuration
|
||||||
EnvironmentFile=-/etc/dbbackup/env.d/cluster.conf
|
EnvironmentFile=-/etc/dbbackup/env.d/cluster.conf
|
||||||
|
|
||||||
# Working directory
|
# Working directory (config is loaded from .dbbackup.conf here)
|
||||||
WorkingDirectory=/var/lib/dbbackup
|
WorkingDirectory=/var/lib/dbbackup
|
||||||
|
|
||||||
# Execute backup
|
# Execute backup (reads .dbbackup.conf from WorkingDirectory)
|
||||||
ExecStart=/usr/local/bin/dbbackup backup cluster \
|
ExecStart=/usr/local/bin/dbbackup backup cluster \
|
||||||
--config /etc/dbbackup/dbbackup.conf \
|
|
||||||
--backup-dir /var/lib/dbbackup/backups \
|
--backup-dir /var/lib/dbbackup/backups \
|
||||||
|
--host localhost \
|
||||||
|
--port 5432 \
|
||||||
|
--user postgres \
|
||||||
--allow-root
|
--allow-root
|
||||||
|
|
||||||
# Security hardening
|
# Security hardening
|
||||||
@@ -443,12 +448,12 @@ sudo systemctl status dbbackup-cluster.service
|
|||||||
# View detailed error
|
# View detailed error
|
||||||
sudo journalctl -u dbbackup-cluster.service -n 50 --no-pager
|
sudo journalctl -u dbbackup-cluster.service -n 50 --no-pager
|
||||||
|
|
||||||
# Test manually as dbbackup user
|
# Test manually as dbbackup user (run from working directory with .dbbackup.conf)
|
||||||
sudo -u dbbackup /usr/local/bin/dbbackup backup cluster --config /etc/dbbackup/dbbackup.conf
|
cd /var/lib/dbbackup && sudo -u dbbackup /usr/local/bin/dbbackup backup cluster
|
||||||
|
|
||||||
# Check permissions
|
# Check permissions
|
||||||
ls -la /var/lib/dbbackup/
|
ls -la /var/lib/dbbackup/
|
||||||
ls -la /etc/dbbackup/
|
ls -la /var/lib/dbbackup/.dbbackup.conf
|
||||||
```
|
```
|
||||||
|
|
||||||
### Permission Denied
|
### Permission Denied
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ This directory contains pre-compiled binaries for the DB Backup Tool across mult
|
|||||||
|
|
||||||
## Build Information
|
## Build Information
|
||||||
- **Version**: 3.42.10
|
- **Version**: 3.42.10
|
||||||
- **Build Time**: 2026-01-08_09:40:57_UTC
|
- **Build Time**: 2026-01-12_08:50:35_UTC
|
||||||
- **Git Commit**: 55d34be
|
- **Git Commit**: b1f8c6d
|
||||||
|
|
||||||
## Recent Updates (v1.1.0)
|
## Recent Updates (v1.1.0)
|
||||||
- ✅ Fixed TUI progress display with line-by-line output
|
- ✅ Fixed TUI progress display with line-by-line output
|
||||||
|
|||||||
721
cmd/dedup.go
721
cmd/dedup.go
@@ -1,11 +1,13 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"compress/gzip"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -34,7 +36,24 @@ Storage Structure:
|
|||||||
chunks/ # Content-addressed chunk files
|
chunks/ # Content-addressed chunk files
|
||||||
ab/cdef... # Sharded by first 2 chars of hash
|
ab/cdef... # Sharded by first 2 chars of hash
|
||||||
manifests/ # JSON manifest per backup
|
manifests/ # JSON manifest per backup
|
||||||
chunks.db # SQLite index`,
|
chunks.db # SQLite index
|
||||||
|
|
||||||
|
NFS/CIFS NOTICE:
|
||||||
|
SQLite may have locking issues on network storage.
|
||||||
|
Use --index-db to put the SQLite index on local storage while keeping
|
||||||
|
chunks on network storage:
|
||||||
|
|
||||||
|
dbbackup dedup backup mydb.sql \
|
||||||
|
--dedup-dir /mnt/nfs/backups/dedup \
|
||||||
|
--index-db /var/lib/dbbackup/dedup-index.db
|
||||||
|
|
||||||
|
This avoids "database is locked" errors while still storing chunks remotely.
|
||||||
|
|
||||||
|
COMPRESSED INPUT NOTICE:
|
||||||
|
Pre-compressed files (.gz) have poor deduplication ratios (<10%).
|
||||||
|
Use --decompress-input to decompress before chunking for better results:
|
||||||
|
|
||||||
|
dbbackup dedup backup mydb.sql.gz --decompress-input`,
|
||||||
}
|
}
|
||||||
|
|
||||||
var dedupBackupCmd = &cobra.Command{
|
var dedupBackupCmd = &cobra.Command{
|
||||||
@@ -89,9 +108,85 @@ var dedupDeleteCmd = &cobra.Command{
|
|||||||
RunE: runDedupDelete,
|
RunE: runDedupDelete,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var dedupVerifyCmd = &cobra.Command{
|
||||||
|
Use: "verify [manifest-id]",
|
||||||
|
Short: "Verify chunk integrity against manifests",
|
||||||
|
Long: `Verify that all chunks referenced by manifests exist and have correct hashes.
|
||||||
|
|
||||||
|
Without arguments, verifies all backups. With a manifest ID, verifies only that backup.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
dbbackup dedup verify # Verify all backups
|
||||||
|
dbbackup dedup verify 2026-01-07_mydb # Verify specific backup`,
|
||||||
|
RunE: runDedupVerify,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupPruneCmd = &cobra.Command{
|
||||||
|
Use: "prune",
|
||||||
|
Short: "Apply retention policy to manifests",
|
||||||
|
Long: `Delete old manifests based on retention policy (like borg prune).
|
||||||
|
|
||||||
|
Keeps a specified number of recent backups per database and deletes the rest.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
dbbackup dedup prune --keep-last 7 # Keep 7 most recent
|
||||||
|
dbbackup dedup prune --keep-daily 7 --keep-weekly 4 # Keep 7 daily + 4 weekly`,
|
||||||
|
RunE: runDedupPrune,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dedupBackupDBCmd = &cobra.Command{
|
||||||
|
Use: "backup-db",
|
||||||
|
Short: "Direct database dump with deduplication",
|
||||||
|
Long: `Dump a database directly into deduplicated chunks without temp files.
|
||||||
|
|
||||||
|
Streams the database dump through the chunker for efficient deduplication.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
dbbackup dedup backup-db --db-type postgres --db-name mydb
|
||||||
|
dbbackup dedup backup-db -d mariadb --database production_db --host db.local`,
|
||||||
|
RunE: runDedupBackupDB,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prune flags
|
||||||
|
var (
|
||||||
|
pruneKeepLast int
|
||||||
|
pruneKeepDaily int
|
||||||
|
pruneKeepWeekly int
|
||||||
|
pruneDryRun bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// backup-db flags
|
||||||
|
var (
|
||||||
|
backupDBDatabase string
|
||||||
|
backupDBUser string
|
||||||
|
backupDBPassword string
|
||||||
|
)
|
||||||
|
|
||||||
|
// metrics flags
|
||||||
|
var (
|
||||||
|
dedupMetricsOutput string
|
||||||
|
dedupMetricsInstance string
|
||||||
|
)
|
||||||
|
|
||||||
|
var dedupMetricsCmd = &cobra.Command{
|
||||||
|
Use: "metrics",
|
||||||
|
Short: "Export dedup statistics as Prometheus metrics",
|
||||||
|
Long: `Export deduplication statistics in Prometheus format.
|
||||||
|
|
||||||
|
Can write to a textfile for node_exporter's textfile collector,
|
||||||
|
or print to stdout for custom integrations.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
dbbackup dedup metrics # Print to stdout
|
||||||
|
dbbackup dedup metrics --output /var/lib/node_exporter/textfile_collector/dedup.prom
|
||||||
|
dbbackup dedup metrics --instance prod-db-1`,
|
||||||
|
RunE: runDedupMetrics,
|
||||||
|
}
|
||||||
|
|
||||||
// Flags
|
// Flags
|
||||||
var (
|
var (
|
||||||
dedupDir string
|
dedupDir string
|
||||||
|
dedupIndexDB string // Separate path for SQLite index (for NFS/CIFS support)
|
||||||
dedupCompress bool
|
dedupCompress bool
|
||||||
dedupEncrypt bool
|
dedupEncrypt bool
|
||||||
dedupKey string
|
dedupKey string
|
||||||
@@ -99,6 +194,7 @@ var (
|
|||||||
dedupDBType string
|
dedupDBType string
|
||||||
dedupDBName string
|
dedupDBName string
|
||||||
dedupDBHost string
|
dedupDBHost string
|
||||||
|
dedupDecompress bool // Auto-decompress gzip input
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -109,9 +205,14 @@ func init() {
|
|||||||
dedupCmd.AddCommand(dedupStatsCmd)
|
dedupCmd.AddCommand(dedupStatsCmd)
|
||||||
dedupCmd.AddCommand(dedupGCCmd)
|
dedupCmd.AddCommand(dedupGCCmd)
|
||||||
dedupCmd.AddCommand(dedupDeleteCmd)
|
dedupCmd.AddCommand(dedupDeleteCmd)
|
||||||
|
dedupCmd.AddCommand(dedupVerifyCmd)
|
||||||
|
dedupCmd.AddCommand(dedupPruneCmd)
|
||||||
|
dedupCmd.AddCommand(dedupBackupDBCmd)
|
||||||
|
dedupCmd.AddCommand(dedupMetricsCmd)
|
||||||
|
|
||||||
// Global dedup flags
|
// Global dedup flags
|
||||||
dedupCmd.PersistentFlags().StringVar(&dedupDir, "dedup-dir", "", "Dedup storage directory (default: $BACKUP_DIR/dedup)")
|
dedupCmd.PersistentFlags().StringVar(&dedupDir, "dedup-dir", "", "Dedup storage directory (default: $BACKUP_DIR/dedup)")
|
||||||
|
dedupCmd.PersistentFlags().StringVar(&dedupIndexDB, "index-db", "", "SQLite index path (local recommended for NFS/CIFS chunk dirs)")
|
||||||
dedupCmd.PersistentFlags().BoolVar(&dedupCompress, "compress", true, "Compress chunks with gzip")
|
dedupCmd.PersistentFlags().BoolVar(&dedupCompress, "compress", true, "Compress chunks with gzip")
|
||||||
dedupCmd.PersistentFlags().BoolVar(&dedupEncrypt, "encrypt", false, "Encrypt chunks with AES-256-GCM")
|
dedupCmd.PersistentFlags().BoolVar(&dedupEncrypt, "encrypt", false, "Encrypt chunks with AES-256-GCM")
|
||||||
dedupCmd.PersistentFlags().StringVar(&dedupKey, "key", "", "Encryption key (hex) or use DBBACKUP_DEDUP_KEY env")
|
dedupCmd.PersistentFlags().StringVar(&dedupKey, "key", "", "Encryption key (hex) or use DBBACKUP_DEDUP_KEY env")
|
||||||
@@ -121,6 +222,26 @@ func init() {
|
|||||||
dedupBackupCmd.Flags().StringVar(&dedupDBType, "db-type", "", "Database type (postgres/mysql)")
|
dedupBackupCmd.Flags().StringVar(&dedupDBType, "db-type", "", "Database type (postgres/mysql)")
|
||||||
dedupBackupCmd.Flags().StringVar(&dedupDBName, "db-name", "", "Database name")
|
dedupBackupCmd.Flags().StringVar(&dedupDBName, "db-name", "", "Database name")
|
||||||
dedupBackupCmd.Flags().StringVar(&dedupDBHost, "db-host", "", "Database host")
|
dedupBackupCmd.Flags().StringVar(&dedupDBHost, "db-host", "", "Database host")
|
||||||
|
dedupBackupCmd.Flags().BoolVar(&dedupDecompress, "decompress-input", false, "Auto-decompress gzip input before chunking (improves dedup ratio)")
|
||||||
|
|
||||||
|
// Prune flags
|
||||||
|
dedupPruneCmd.Flags().IntVar(&pruneKeepLast, "keep-last", 0, "Keep the last N backups")
|
||||||
|
dedupPruneCmd.Flags().IntVar(&pruneKeepDaily, "keep-daily", 0, "Keep N daily backups")
|
||||||
|
dedupPruneCmd.Flags().IntVar(&pruneKeepWeekly, "keep-weekly", 0, "Keep N weekly backups")
|
||||||
|
dedupPruneCmd.Flags().BoolVar(&pruneDryRun, "dry-run", false, "Show what would be deleted without actually deleting")
|
||||||
|
|
||||||
|
// backup-db flags
|
||||||
|
dedupBackupDBCmd.Flags().StringVarP(&dedupDBType, "db-type", "d", "", "Database type (postgres/mariadb/mysql)")
|
||||||
|
dedupBackupDBCmd.Flags().StringVar(&backupDBDatabase, "database", "", "Database name to backup")
|
||||||
|
dedupBackupDBCmd.Flags().StringVar(&dedupDBHost, "host", "localhost", "Database host")
|
||||||
|
dedupBackupDBCmd.Flags().StringVarP(&backupDBUser, "user", "u", "", "Database user")
|
||||||
|
dedupBackupDBCmd.Flags().StringVarP(&backupDBPassword, "password", "p", "", "Database password (or use env)")
|
||||||
|
dedupBackupDBCmd.MarkFlagRequired("db-type")
|
||||||
|
dedupBackupDBCmd.MarkFlagRequired("database")
|
||||||
|
|
||||||
|
// Metrics flags
|
||||||
|
dedupMetricsCmd.Flags().StringVarP(&dedupMetricsOutput, "output", "o", "", "Output file path (default: stdout)")
|
||||||
|
dedupMetricsCmd.Flags().StringVar(&dedupMetricsInstance, "instance", "", "Instance label for metrics (default: hostname)")
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDedupDir() string {
|
func getDedupDir() string {
|
||||||
@@ -133,6 +254,14 @@ func getDedupDir() string {
|
|||||||
return filepath.Join(os.Getenv("HOME"), "db_backups", "dedup")
|
return filepath.Join(os.Getenv("HOME"), "db_backups", "dedup")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getIndexDBPath() string {
|
||||||
|
if dedupIndexDB != "" {
|
||||||
|
return dedupIndexDB
|
||||||
|
}
|
||||||
|
// Default: same directory as chunks (may have issues on NFS/CIFS)
|
||||||
|
return filepath.Join(getDedupDir(), "chunks.db")
|
||||||
|
}
|
||||||
|
|
||||||
func getEncryptionKey() string {
|
func getEncryptionKey() string {
|
||||||
if dedupKey != "" {
|
if dedupKey != "" {
|
||||||
return dedupKey
|
return dedupKey
|
||||||
@@ -155,6 +284,25 @@ func runDedupBackup(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("failed to stat input file: %w", err)
|
return fmt.Errorf("failed to stat input file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check for compressed input and warn/handle
|
||||||
|
var reader io.Reader = file
|
||||||
|
isGzipped := strings.HasSuffix(strings.ToLower(inputPath), ".gz")
|
||||||
|
if isGzipped && !dedupDecompress {
|
||||||
|
fmt.Printf("Warning: Input appears to be gzip compressed (.gz)\n")
|
||||||
|
fmt.Printf(" Compressed data typically has poor dedup ratios (<10%%).\n")
|
||||||
|
fmt.Printf(" Consider using --decompress-input for better deduplication.\n\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
if isGzipped && dedupDecompress {
|
||||||
|
fmt.Printf("Auto-decompressing gzip input for better dedup ratio...\n")
|
||||||
|
gzReader, err := gzip.NewReader(file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to decompress gzip input: %w", err)
|
||||||
|
}
|
||||||
|
defer gzReader.Close()
|
||||||
|
reader = gzReader
|
||||||
|
}
|
||||||
|
|
||||||
// Setup dedup storage
|
// Setup dedup storage
|
||||||
basePath := getDedupDir()
|
basePath := getDedupDir()
|
||||||
encKey := ""
|
encKey := ""
|
||||||
@@ -179,7 +327,7 @@ func runDedupBackup(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("failed to open manifest store: %w", err)
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
index, err := dedup.NewChunkIndex(basePath)
|
index, err := dedup.NewChunkIndexAt(getIndexDBPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to open chunk index: %w", err)
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
}
|
}
|
||||||
@@ -193,22 +341,43 @@ func runDedupBackup(cmd *cobra.Command, args []string) error {
|
|||||||
} else {
|
} else {
|
||||||
base := filepath.Base(inputPath)
|
base := filepath.Base(inputPath)
|
||||||
ext := filepath.Ext(base)
|
ext := filepath.Ext(base)
|
||||||
|
// Remove .gz extension if decompressing
|
||||||
|
if isGzipped && dedupDecompress {
|
||||||
|
base = strings.TrimSuffix(base, ext)
|
||||||
|
ext = filepath.Ext(base)
|
||||||
|
}
|
||||||
manifestID += "_" + strings.TrimSuffix(base, ext)
|
manifestID += "_" + strings.TrimSuffix(base, ext)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Creating deduplicated backup: %s\n", manifestID)
|
fmt.Printf("Creating deduplicated backup: %s\n", manifestID)
|
||||||
fmt.Printf("Input: %s (%s)\n", inputPath, formatBytes(info.Size()))
|
fmt.Printf("Input: %s (%s)\n", inputPath, formatBytes(info.Size()))
|
||||||
|
if isGzipped && dedupDecompress {
|
||||||
|
fmt.Printf("Mode: Decompressing before chunking\n")
|
||||||
|
}
|
||||||
fmt.Printf("Store: %s\n", basePath)
|
fmt.Printf("Store: %s\n", basePath)
|
||||||
|
if dedupIndexDB != "" {
|
||||||
|
fmt.Printf("Index: %s\n", getIndexDBPath())
|
||||||
|
}
|
||||||
|
|
||||||
// Hash the entire file for verification
|
// For decompressed input, we can't seek - use TeeReader to hash while chunking
|
||||||
file.Seek(0, 0)
|
|
||||||
h := sha256.New()
|
h := sha256.New()
|
||||||
io.Copy(h, file)
|
var chunkReader io.Reader
|
||||||
fileHash := hex.EncodeToString(h.Sum(nil))
|
|
||||||
|
if isGzipped && dedupDecompress {
|
||||||
|
// Can't seek on gzip stream - hash will be computed inline
|
||||||
|
chunkReader = io.TeeReader(reader, h)
|
||||||
|
} else {
|
||||||
|
// Regular file - hash first, then reset and chunk
|
||||||
file.Seek(0, 0)
|
file.Seek(0, 0)
|
||||||
|
io.Copy(h, file)
|
||||||
|
file.Seek(0, 0)
|
||||||
|
chunkReader = file
|
||||||
|
h = sha256.New() // Reset for inline hashing
|
||||||
|
chunkReader = io.TeeReader(file, h)
|
||||||
|
}
|
||||||
|
|
||||||
// Chunk the file
|
// Chunk the file
|
||||||
chunker := dedup.NewChunker(file, dedup.DefaultChunkerConfig())
|
chunker := dedup.NewChunker(chunkReader, dedup.DefaultChunkerConfig())
|
||||||
var chunks []dedup.ChunkRef
|
var chunks []dedup.ChunkRef
|
||||||
var totalSize, storedSize int64
|
var totalSize, storedSize int64
|
||||||
var chunkCount, newChunks int
|
var chunkCount, newChunks int
|
||||||
@@ -254,6 +423,9 @@ func runDedupBackup(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
duration := time.Since(startTime)
|
duration := time.Since(startTime)
|
||||||
|
|
||||||
|
// Get final hash (computed inline via TeeReader)
|
||||||
|
fileHash := hex.EncodeToString(h.Sum(nil))
|
||||||
|
|
||||||
// Calculate dedup ratio
|
// Calculate dedup ratio
|
||||||
dedupRatio := 0.0
|
dedupRatio := 0.0
|
||||||
if totalSize > 0 {
|
if totalSize > 0 {
|
||||||
@@ -277,6 +449,7 @@ func runDedupBackup(cmd *cobra.Command, args []string) error {
|
|||||||
Encrypted: dedupEncrypt,
|
Encrypted: dedupEncrypt,
|
||||||
Compressed: dedupCompress,
|
Compressed: dedupCompress,
|
||||||
SHA256: fileHash,
|
SHA256: fileHash,
|
||||||
|
Decompressed: isGzipped && dedupDecompress, // Track if we decompressed
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := manifestStore.Save(manifest); err != nil {
|
if err := manifestStore.Save(manifest); err != nil {
|
||||||
@@ -451,8 +624,12 @@ func runDedupStats(cmd *cobra.Command, args []string) error {
|
|||||||
fmt.Printf("Unique chunks: %d\n", stats.TotalChunks)
|
fmt.Printf("Unique chunks: %d\n", stats.TotalChunks)
|
||||||
fmt.Printf("Total raw size: %s\n", formatBytes(stats.TotalSizeRaw))
|
fmt.Printf("Total raw size: %s\n", formatBytes(stats.TotalSizeRaw))
|
||||||
fmt.Printf("Stored size: %s\n", formatBytes(stats.TotalSizeStored))
|
fmt.Printf("Stored size: %s\n", formatBytes(stats.TotalSizeStored))
|
||||||
|
fmt.Printf("\n")
|
||||||
|
fmt.Printf("Backup Statistics (accurate dedup calculation):\n")
|
||||||
|
fmt.Printf(" Total backed up: %s (across all backups)\n", formatBytes(stats.TotalBackupSize))
|
||||||
|
fmt.Printf(" New data stored: %s\n", formatBytes(stats.TotalNewData))
|
||||||
|
fmt.Printf(" Space saved: %s\n", formatBytes(stats.SpaceSaved))
|
||||||
fmt.Printf(" Dedup ratio: %.1f%%\n", stats.DedupRatio*100)
|
fmt.Printf(" Dedup ratio: %.1f%%\n", stats.DedupRatio*100)
|
||||||
fmt.Printf("Space saved: %s\n", formatBytes(stats.TotalSizeRaw-stats.TotalSizeStored))
|
|
||||||
|
|
||||||
if storeStats != nil {
|
if storeStats != nil {
|
||||||
fmt.Printf("Disk usage: %s\n", formatBytes(storeStats.TotalSize))
|
fmt.Printf("Disk usage: %s\n", formatBytes(storeStats.TotalSize))
|
||||||
@@ -577,3 +754,531 @@ func truncateStr(s string, max int) string {
|
|||||||
}
|
}
|
||||||
return s[:max-3] + "..."
|
return s[:max-3] + "..."
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func runDedupVerify(cmd *cobra.Command, args []string) error {
|
||||||
|
basePath := getDedupDir()
|
||||||
|
|
||||||
|
store, err := dedup.NewChunkStore(dedup.StoreConfig{
|
||||||
|
BasePath: basePath,
|
||||||
|
Compress: dedupCompress,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestStore, err := dedup.NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
index, err := dedup.NewChunkIndexAt(getIndexDBPath())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer index.Close()
|
||||||
|
|
||||||
|
var manifests []*dedup.Manifest
|
||||||
|
|
||||||
|
if len(args) > 0 {
|
||||||
|
// Verify specific manifest
|
||||||
|
m, err := manifestStore.Load(args[0])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load manifest: %w", err)
|
||||||
|
}
|
||||||
|
manifests = []*dedup.Manifest{m}
|
||||||
|
} else {
|
||||||
|
// Verify all manifests
|
||||||
|
manifests, err = manifestStore.ListAll()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list manifests: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(manifests) == 0 {
|
||||||
|
fmt.Println("No manifests to verify.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Verifying %d backup(s)...\n\n", len(manifests))
|
||||||
|
|
||||||
|
var totalChunks, missingChunks, corruptChunks int
|
||||||
|
var allOK = true
|
||||||
|
|
||||||
|
for _, m := range manifests {
|
||||||
|
fmt.Printf("Verifying: %s (%d chunks)\n", m.ID, m.ChunkCount)
|
||||||
|
|
||||||
|
var missing, corrupt int
|
||||||
|
seenHashes := make(map[string]bool)
|
||||||
|
|
||||||
|
for i, ref := range m.Chunks {
|
||||||
|
if seenHashes[ref.Hash] {
|
||||||
|
continue // Already verified this chunk
|
||||||
|
}
|
||||||
|
seenHashes[ref.Hash] = true
|
||||||
|
totalChunks++
|
||||||
|
|
||||||
|
// Check if chunk exists
|
||||||
|
if !store.Has(ref.Hash) {
|
||||||
|
missing++
|
||||||
|
missingChunks++
|
||||||
|
if missing <= 5 {
|
||||||
|
fmt.Printf(" [MISSING] chunk %d: %s\n", i, ref.Hash[:16])
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify chunk hash by reading it
|
||||||
|
chunk, err := store.Get(ref.Hash)
|
||||||
|
if err != nil {
|
||||||
|
corrupt++
|
||||||
|
corruptChunks++
|
||||||
|
if corrupt <= 5 {
|
||||||
|
fmt.Printf(" [CORRUPT] chunk %d: %s - %v\n", i, ref.Hash[:16], err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify size
|
||||||
|
if chunk.Length != ref.Length {
|
||||||
|
corrupt++
|
||||||
|
corruptChunks++
|
||||||
|
if corrupt <= 5 {
|
||||||
|
fmt.Printf(" [SIZE MISMATCH] chunk %d: expected %d, got %d\n", i, ref.Length, chunk.Length)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if missing > 0 || corrupt > 0 {
|
||||||
|
allOK = false
|
||||||
|
fmt.Printf(" Result: FAILED (%d missing, %d corrupt)\n", missing, corrupt)
|
||||||
|
if missing > 5 || corrupt > 5 {
|
||||||
|
fmt.Printf(" ... and %d more errors\n", (missing+corrupt)-10)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" Result: OK (%d unique chunks verified)\n", len(seenHashes))
|
||||||
|
// Update verified timestamp
|
||||||
|
m.VerifiedAt = time.Now()
|
||||||
|
manifestStore.Save(m)
|
||||||
|
index.UpdateManifestVerified(m.ID, m.VerifiedAt)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("========================================")
|
||||||
|
if allOK {
|
||||||
|
fmt.Printf("All %d backup(s) verified successfully!\n", len(manifests))
|
||||||
|
fmt.Printf("Total unique chunks checked: %d\n", totalChunks)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Verification FAILED!\n")
|
||||||
|
fmt.Printf("Missing chunks: %d\n", missingChunks)
|
||||||
|
fmt.Printf("Corrupt chunks: %d\n", corruptChunks)
|
||||||
|
return fmt.Errorf("verification failed: %d missing, %d corrupt chunks", missingChunks, corruptChunks)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupPrune(cmd *cobra.Command, args []string) error {
|
||||||
|
if pruneKeepLast == 0 && pruneKeepDaily == 0 && pruneKeepWeekly == 0 {
|
||||||
|
return fmt.Errorf("at least one of --keep-last, --keep-daily, or --keep-weekly must be specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
basePath := getDedupDir()
|
||||||
|
|
||||||
|
manifestStore, err := dedup.NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
index, err := dedup.NewChunkIndexAt(getIndexDBPath())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer index.Close()
|
||||||
|
|
||||||
|
manifests, err := manifestStore.ListAll()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list manifests: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(manifests) == 0 {
|
||||||
|
fmt.Println("No backups to prune.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Group by database name
|
||||||
|
byDatabase := make(map[string][]*dedup.Manifest)
|
||||||
|
for _, m := range manifests {
|
||||||
|
key := m.DatabaseName
|
||||||
|
if key == "" {
|
||||||
|
key = "_default"
|
||||||
|
}
|
||||||
|
byDatabase[key] = append(byDatabase[key], m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var toDelete []*dedup.Manifest
|
||||||
|
|
||||||
|
for dbName, dbManifests := range byDatabase {
|
||||||
|
// Already sorted by time (newest first from ListAll)
|
||||||
|
kept := make(map[string]bool)
|
||||||
|
var keepReasons = make(map[string]string)
|
||||||
|
|
||||||
|
// Keep last N
|
||||||
|
if pruneKeepLast > 0 {
|
||||||
|
for i := 0; i < pruneKeepLast && i < len(dbManifests); i++ {
|
||||||
|
kept[dbManifests[i].ID] = true
|
||||||
|
keepReasons[dbManifests[i].ID] = "keep-last"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep daily (one per day)
|
||||||
|
if pruneKeepDaily > 0 {
|
||||||
|
seenDays := make(map[string]bool)
|
||||||
|
count := 0
|
||||||
|
for _, m := range dbManifests {
|
||||||
|
day := m.CreatedAt.Format("2006-01-02")
|
||||||
|
if !seenDays[day] {
|
||||||
|
seenDays[day] = true
|
||||||
|
if count < pruneKeepDaily {
|
||||||
|
kept[m.ID] = true
|
||||||
|
if keepReasons[m.ID] == "" {
|
||||||
|
keepReasons[m.ID] = "keep-daily"
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep weekly (one per week)
|
||||||
|
if pruneKeepWeekly > 0 {
|
||||||
|
seenWeeks := make(map[string]bool)
|
||||||
|
count := 0
|
||||||
|
for _, m := range dbManifests {
|
||||||
|
year, week := m.CreatedAt.ISOWeek()
|
||||||
|
weekKey := fmt.Sprintf("%d-W%02d", year, week)
|
||||||
|
if !seenWeeks[weekKey] {
|
||||||
|
seenWeeks[weekKey] = true
|
||||||
|
if count < pruneKeepWeekly {
|
||||||
|
kept[m.ID] = true
|
||||||
|
if keepReasons[m.ID] == "" {
|
||||||
|
keepReasons[m.ID] = "keep-weekly"
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if dbName != "_default" {
|
||||||
|
fmt.Printf("\nDatabase: %s\n", dbName)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("\nUnnamed backups:\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range dbManifests {
|
||||||
|
if kept[m.ID] {
|
||||||
|
fmt.Printf(" [KEEP] %s (%s) - %s\n", m.ID, m.CreatedAt.Format("2006-01-02"), keepReasons[m.ID])
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" [DELETE] %s (%s)\n", m.ID, m.CreatedAt.Format("2006-01-02"))
|
||||||
|
toDelete = append(toDelete, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(toDelete) == 0 {
|
||||||
|
fmt.Printf("\nNo backups to prune (all match retention policy).\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n%d backup(s) will be deleted.\n", len(toDelete))
|
||||||
|
|
||||||
|
if pruneDryRun {
|
||||||
|
fmt.Println("\n[DRY RUN] No changes made. Remove --dry-run to actually delete.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Actually delete
|
||||||
|
for _, m := range toDelete {
|
||||||
|
// Decrement chunk references
|
||||||
|
for _, ref := range m.Chunks {
|
||||||
|
index.DecrementRef(ref.Hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := manifestStore.Delete(m.ID); err != nil {
|
||||||
|
log.Warn("Failed to delete manifest", "id", m.ID, "error", err)
|
||||||
|
}
|
||||||
|
index.RemoveManifest(m.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\nDeleted %d backup(s).\n", len(toDelete))
|
||||||
|
fmt.Println("Run 'dbbackup dedup gc' to reclaim space from unreferenced chunks.")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupBackupDB(cmd *cobra.Command, args []string) error {
|
||||||
|
dbType := strings.ToLower(dedupDBType)
|
||||||
|
dbName := backupDBDatabase
|
||||||
|
|
||||||
|
// Validate db type
|
||||||
|
var dumpCmd string
|
||||||
|
var dumpArgs []string
|
||||||
|
|
||||||
|
switch dbType {
|
||||||
|
case "postgres", "postgresql", "pg":
|
||||||
|
dbType = "postgres"
|
||||||
|
dumpCmd = "pg_dump"
|
||||||
|
dumpArgs = []string{"-Fc"} // Custom format for better compression
|
||||||
|
if dedupDBHost != "" && dedupDBHost != "localhost" {
|
||||||
|
dumpArgs = append(dumpArgs, "-h", dedupDBHost)
|
||||||
|
}
|
||||||
|
if backupDBUser != "" {
|
||||||
|
dumpArgs = append(dumpArgs, "-U", backupDBUser)
|
||||||
|
}
|
||||||
|
dumpArgs = append(dumpArgs, dbName)
|
||||||
|
|
||||||
|
case "mysql":
|
||||||
|
dumpCmd = "mysqldump"
|
||||||
|
dumpArgs = []string{
|
||||||
|
"--single-transaction",
|
||||||
|
"--routines",
|
||||||
|
"--triggers",
|
||||||
|
"--events",
|
||||||
|
}
|
||||||
|
if dedupDBHost != "" {
|
||||||
|
dumpArgs = append(dumpArgs, "-h", dedupDBHost)
|
||||||
|
}
|
||||||
|
if backupDBUser != "" {
|
||||||
|
dumpArgs = append(dumpArgs, "-u", backupDBUser)
|
||||||
|
}
|
||||||
|
if backupDBPassword != "" {
|
||||||
|
dumpArgs = append(dumpArgs, "-p"+backupDBPassword)
|
||||||
|
}
|
||||||
|
dumpArgs = append(dumpArgs, dbName)
|
||||||
|
|
||||||
|
case "mariadb":
|
||||||
|
dumpCmd = "mariadb-dump"
|
||||||
|
// Fall back to mysqldump if mariadb-dump not available
|
||||||
|
if _, err := exec.LookPath(dumpCmd); err != nil {
|
||||||
|
dumpCmd = "mysqldump"
|
||||||
|
}
|
||||||
|
dumpArgs = []string{
|
||||||
|
"--single-transaction",
|
||||||
|
"--routines",
|
||||||
|
"--triggers",
|
||||||
|
"--events",
|
||||||
|
}
|
||||||
|
if dedupDBHost != "" {
|
||||||
|
dumpArgs = append(dumpArgs, "-h", dedupDBHost)
|
||||||
|
}
|
||||||
|
if backupDBUser != "" {
|
||||||
|
dumpArgs = append(dumpArgs, "-u", backupDBUser)
|
||||||
|
}
|
||||||
|
if backupDBPassword != "" {
|
||||||
|
dumpArgs = append(dumpArgs, "-p"+backupDBPassword)
|
||||||
|
}
|
||||||
|
dumpArgs = append(dumpArgs, dbName)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported database type: %s (use postgres, mysql, or mariadb)", dbType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify dump command exists
|
||||||
|
if _, err := exec.LookPath(dumpCmd); err != nil {
|
||||||
|
return fmt.Errorf("%s not found in PATH: %w", dumpCmd, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup dedup storage
|
||||||
|
basePath := getDedupDir()
|
||||||
|
encKey := ""
|
||||||
|
if dedupEncrypt {
|
||||||
|
encKey = getEncryptionKey()
|
||||||
|
if encKey == "" {
|
||||||
|
return fmt.Errorf("encryption enabled but no key provided (use --key or DBBACKUP_DEDUP_KEY)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := dedup.NewChunkStore(dedup.StoreConfig{
|
||||||
|
BasePath: basePath,
|
||||||
|
Compress: dedupCompress,
|
||||||
|
EncryptionKey: encKey,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestStore, err := dedup.NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open manifest store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
index, err := dedup.NewChunkIndexAt(getIndexDBPath())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer index.Close()
|
||||||
|
|
||||||
|
// Generate manifest ID
|
||||||
|
now := time.Now()
|
||||||
|
manifestID := now.Format("2006-01-02_150405") + "_" + dbName
|
||||||
|
|
||||||
|
fmt.Printf("Creating deduplicated database backup: %s\n", manifestID)
|
||||||
|
fmt.Printf("Database: %s (%s)\n", dbName, dbType)
|
||||||
|
fmt.Printf("Command: %s %s\n", dumpCmd, strings.Join(dumpArgs, " "))
|
||||||
|
fmt.Printf("Store: %s\n", basePath)
|
||||||
|
|
||||||
|
// Start the dump command
|
||||||
|
dumpExec := exec.Command(dumpCmd, dumpArgs...)
|
||||||
|
|
||||||
|
// Set password via environment for postgres
|
||||||
|
if dbType == "postgres" && backupDBPassword != "" {
|
||||||
|
dumpExec.Env = append(os.Environ(), "PGPASSWORD="+backupDBPassword)
|
||||||
|
}
|
||||||
|
|
||||||
|
stdout, err := dumpExec.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get stdout pipe: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stderr, err := dumpExec.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get stderr pipe: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := dumpExec.Start(); err != nil {
|
||||||
|
return fmt.Errorf("failed to start %s: %w", dumpCmd, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash while chunking using TeeReader
|
||||||
|
h := sha256.New()
|
||||||
|
reader := io.TeeReader(stdout, h)
|
||||||
|
|
||||||
|
// Chunk the stream directly
|
||||||
|
chunker := dedup.NewChunker(reader, dedup.DefaultChunkerConfig())
|
||||||
|
var chunks []dedup.ChunkRef
|
||||||
|
var totalSize, storedSize int64
|
||||||
|
var chunkCount, newChunks int
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
for {
|
||||||
|
chunk, err := chunker.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("chunking failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkCount++
|
||||||
|
totalSize += int64(chunk.Length)
|
||||||
|
|
||||||
|
// Store chunk (deduplication happens here)
|
||||||
|
isNew, err := store.Put(chunk)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to store chunk: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if isNew {
|
||||||
|
newChunks++
|
||||||
|
storedSize += int64(chunk.Length)
|
||||||
|
index.AddChunk(chunk.Hash, chunk.Length, chunk.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunks = append(chunks, dedup.ChunkRef{
|
||||||
|
Hash: chunk.Hash,
|
||||||
|
Offset: chunk.Offset,
|
||||||
|
Length: chunk.Length,
|
||||||
|
})
|
||||||
|
|
||||||
|
if chunkCount%1000 == 0 {
|
||||||
|
fmt.Printf("\r Processed %d chunks, %d new, %s...", chunkCount, newChunks, formatBytes(totalSize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read any stderr
|
||||||
|
stderrBytes, _ := io.ReadAll(stderr)
|
||||||
|
|
||||||
|
// Wait for command to complete
|
||||||
|
if err := dumpExec.Wait(); err != nil {
|
||||||
|
return fmt.Errorf("%s failed: %w\nstderr: %s", dumpCmd, err, string(stderrBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Since(startTime)
|
||||||
|
fileHash := hex.EncodeToString(h.Sum(nil))
|
||||||
|
|
||||||
|
// Calculate dedup ratio
|
||||||
|
dedupRatio := 0.0
|
||||||
|
if totalSize > 0 {
|
||||||
|
dedupRatio = 1.0 - float64(storedSize)/float64(totalSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create manifest
|
||||||
|
manifest := &dedup.Manifest{
|
||||||
|
ID: manifestID,
|
||||||
|
Name: dedupName,
|
||||||
|
CreatedAt: now,
|
||||||
|
DatabaseType: dbType,
|
||||||
|
DatabaseName: dbName,
|
||||||
|
DatabaseHost: dedupDBHost,
|
||||||
|
Chunks: chunks,
|
||||||
|
OriginalSize: totalSize,
|
||||||
|
StoredSize: storedSize,
|
||||||
|
ChunkCount: chunkCount,
|
||||||
|
NewChunks: newChunks,
|
||||||
|
DedupRatio: dedupRatio,
|
||||||
|
Encrypted: dedupEncrypt,
|
||||||
|
Compressed: dedupCompress,
|
||||||
|
SHA256: fileHash,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := manifestStore.Save(manifest); err != nil {
|
||||||
|
return fmt.Errorf("failed to save manifest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := index.AddManifest(manifest); err != nil {
|
||||||
|
log.Warn("Failed to index manifest", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\r \r")
|
||||||
|
fmt.Printf("\nBackup complete!\n")
|
||||||
|
fmt.Printf(" Manifest: %s\n", manifestID)
|
||||||
|
fmt.Printf(" Chunks: %d total, %d new\n", chunkCount, newChunks)
|
||||||
|
fmt.Printf(" Dump size: %s\n", formatBytes(totalSize))
|
||||||
|
fmt.Printf(" Stored: %s (new data)\n", formatBytes(storedSize))
|
||||||
|
fmt.Printf(" Dedup ratio: %.1f%%\n", dedupRatio*100)
|
||||||
|
fmt.Printf(" Duration: %s\n", duration.Round(time.Millisecond))
|
||||||
|
fmt.Printf(" Throughput: %s/s\n", formatBytes(int64(float64(totalSize)/duration.Seconds())))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDedupMetrics(cmd *cobra.Command, args []string) error {
|
||||||
|
basePath := getDedupDir()
|
||||||
|
indexPath := getIndexDBPath()
|
||||||
|
|
||||||
|
instance := dedupMetricsInstance
|
||||||
|
if instance == "" {
|
||||||
|
hostname, _ := os.Hostname()
|
||||||
|
instance = hostname
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics, err := dedup.CollectMetrics(basePath, indexPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to collect metrics: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
output := dedup.FormatPrometheusMetrics(metrics, instance)
|
||||||
|
|
||||||
|
if dedupMetricsOutput != "" {
|
||||||
|
if err := dedup.WritePrometheusTextfile(dedupMetricsOutput, instance, basePath, indexPath); err != nil {
|
||||||
|
return fmt.Errorf("failed to write metrics: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Wrote metrics to %s\n", dedupMetricsOutput)
|
||||||
|
} else {
|
||||||
|
fmt.Print(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
1303
grafana/dbbackup-dashboard.json
Normal file
1303
grafana/dbbackup-dashboard.json
Normal file
@@ -0,0 +1,1303 @@
|
|||||||
|
{
|
||||||
|
"annotations": {
|
||||||
|
"list": [
|
||||||
|
{
|
||||||
|
"builtIn": 1,
|
||||||
|
"datasource": {
|
||||||
|
"type": "grafana",
|
||||||
|
"uid": "-- Grafana --"
|
||||||
|
},
|
||||||
|
"enable": true,
|
||||||
|
"hide": true,
|
||||||
|
"iconColor": "rgba(0, 211, 255, 1)",
|
||||||
|
"name": "Annotations & Alerts",
|
||||||
|
"type": "dashboard"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"editable": true,
|
||||||
|
"fiscalYearStartMonth": 0,
|
||||||
|
"graphTooltip": 0,
|
||||||
|
"id": null,
|
||||||
|
"links": [],
|
||||||
|
"liveNow": false,
|
||||||
|
"panels": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [
|
||||||
|
{
|
||||||
|
"options": {
|
||||||
|
"0": {
|
||||||
|
"color": "red",
|
||||||
|
"index": 1,
|
||||||
|
"text": "FAILED"
|
||||||
|
},
|
||||||
|
"1": {
|
||||||
|
"color": "green",
|
||||||
|
"index": 0,
|
||||||
|
"text": "SUCCESS"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "value"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 4,
|
||||||
|
"w": 6,
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"id": 1,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "background",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_rpo_seconds{instance=~\"$instance\"} < 86400",
|
||||||
|
"legendFormat": "{{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Last Backup Status",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "yellow",
|
||||||
|
"value": 43200
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 86400
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "s"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 4,
|
||||||
|
"w": 6,
|
||||||
|
"x": 6,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"id": 2,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_rpo_seconds{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "{{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Time Since Last Backup",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 4,
|
||||||
|
"w": 6,
|
||||||
|
"x": 12,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"id": 3,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_backup_total{instance=~\"$instance\", status=\"success\"}",
|
||||||
|
"legendFormat": "{{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Total Successful Backups",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 4,
|
||||||
|
"w": 6,
|
||||||
|
"x": 18,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"id": 4,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_backup_total{instance=~\"$instance\", status=\"failure\"}",
|
||||||
|
"legendFormat": "{{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Total Failed Backups",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 10,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "line"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 86400
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "s"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 0,
|
||||||
|
"y": 4
|
||||||
|
},
|
||||||
|
"id": 5,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_rpo_seconds{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "{{instance}} - {{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "RPO Over Time",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "bars",
|
||||||
|
"fillOpacity": 100,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "never",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "bytes"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 12,
|
||||||
|
"y": 4
|
||||||
|
},
|
||||||
|
"id": 6,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_last_backup_size_bytes{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "{{instance}} - {{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Backup Size",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 10,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "s"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 0,
|
||||||
|
"y": 12
|
||||||
|
},
|
||||||
|
"id": 7,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_last_backup_duration_seconds{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "{{instance}} - {{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Backup Duration",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"align": "auto",
|
||||||
|
"cellOptions": {
|
||||||
|
"type": "auto"
|
||||||
|
},
|
||||||
|
"inspect": false
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": [
|
||||||
|
{
|
||||||
|
"matcher": {
|
||||||
|
"id": "byName",
|
||||||
|
"options": "Status"
|
||||||
|
},
|
||||||
|
"properties": [
|
||||||
|
{
|
||||||
|
"id": "mappings",
|
||||||
|
"value": [
|
||||||
|
{
|
||||||
|
"options": {
|
||||||
|
"0": {
|
||||||
|
"color": "red",
|
||||||
|
"index": 1,
|
||||||
|
"text": "FAILED"
|
||||||
|
},
|
||||||
|
"1": {
|
||||||
|
"color": "green",
|
||||||
|
"index": 0,
|
||||||
|
"text": "SUCCESS"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "value"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "custom.cellOptions",
|
||||||
|
"value": {
|
||||||
|
"mode": "basic",
|
||||||
|
"type": "color-background"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"matcher": {
|
||||||
|
"id": "byName",
|
||||||
|
"options": "RPO"
|
||||||
|
},
|
||||||
|
"properties": [
|
||||||
|
{
|
||||||
|
"id": "unit",
|
||||||
|
"value": "s"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "thresholds",
|
||||||
|
"value": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "yellow",
|
||||||
|
"value": 43200
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 86400
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "custom.cellOptions",
|
||||||
|
"value": {
|
||||||
|
"mode": "basic",
|
||||||
|
"type": "color-background"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"matcher": {
|
||||||
|
"id": "byName",
|
||||||
|
"options": "Size"
|
||||||
|
},
|
||||||
|
"properties": [
|
||||||
|
{
|
||||||
|
"id": "unit",
|
||||||
|
"value": "bytes"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 12,
|
||||||
|
"y": 12
|
||||||
|
},
|
||||||
|
"id": 8,
|
||||||
|
"options": {
|
||||||
|
"cellHeight": "sm",
|
||||||
|
"footer": {
|
||||||
|
"countRows": false,
|
||||||
|
"fields": "",
|
||||||
|
"reducer": [
|
||||||
|
"sum"
|
||||||
|
],
|
||||||
|
"show": false
|
||||||
|
},
|
||||||
|
"showHeader": true
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_rpo_seconds{instance=~\"$instance\"} < 86400",
|
||||||
|
"format": "table",
|
||||||
|
"instant": true,
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": false,
|
||||||
|
"refId": "Status"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_rpo_seconds{instance=~\"$instance\"}",
|
||||||
|
"format": "table",
|
||||||
|
"hide": false,
|
||||||
|
"instant": true,
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": false,
|
||||||
|
"refId": "RPO"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_last_backup_size_bytes{instance=~\"$instance\"}",
|
||||||
|
"format": "table",
|
||||||
|
"hide": false,
|
||||||
|
"instant": true,
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": false,
|
||||||
|
"refId": "Size"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Backup Status Overview",
|
||||||
|
"transformations": [
|
||||||
|
{
|
||||||
|
"id": "joinByField",
|
||||||
|
"options": {
|
||||||
|
"byField": "database",
|
||||||
|
"mode": "outer"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "organize",
|
||||||
|
"options": {
|
||||||
|
"excludeByName": {
|
||||||
|
"Time": true,
|
||||||
|
"Time 1": true,
|
||||||
|
"Time 2": true,
|
||||||
|
"Time 3": true,
|
||||||
|
"__name__": true,
|
||||||
|
"__name__ 1": true,
|
||||||
|
"__name__ 2": true,
|
||||||
|
"__name__ 3": true,
|
||||||
|
"instance 1": true,
|
||||||
|
"instance 2": true,
|
||||||
|
"instance 3": true,
|
||||||
|
"job": true,
|
||||||
|
"job 1": true,
|
||||||
|
"job 2": true,
|
||||||
|
"job 3": true
|
||||||
|
},
|
||||||
|
"indexByName": {},
|
||||||
|
"renameByName": {
|
||||||
|
"Value #RPO": "RPO",
|
||||||
|
"Value #Size": "Size",
|
||||||
|
"Value #Status": "Status",
|
||||||
|
"database": "Database",
|
||||||
|
"instance": "Instance"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"type": "table"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"collapsed": false,
|
||||||
|
"gridPos": {
|
||||||
|
"h": 1,
|
||||||
|
"w": 24,
|
||||||
|
"x": 0,
|
||||||
|
"y": 30
|
||||||
|
},
|
||||||
|
"id": 100,
|
||||||
|
"panels": [],
|
||||||
|
"title": "Deduplication Statistics",
|
||||||
|
"type": "row"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "blue",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "percentunit"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 5,
|
||||||
|
"w": 6,
|
||||||
|
"x": 0,
|
||||||
|
"y": 31
|
||||||
|
},
|
||||||
|
"id": 101,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "background",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": ["lastNotNull"],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_dedup_ratio{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Dedup Ratio",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "bytes"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 5,
|
||||||
|
"w": 6,
|
||||||
|
"x": 6,
|
||||||
|
"y": 31
|
||||||
|
},
|
||||||
|
"id": 102,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": ["lastNotNull"],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_dedup_space_saved_bytes{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Space Saved",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "yellow",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "bytes"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 5,
|
||||||
|
"w": 6,
|
||||||
|
"x": 12,
|
||||||
|
"y": 31
|
||||||
|
},
|
||||||
|
"id": 103,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": ["lastNotNull"],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_dedup_disk_usage_bytes{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Disk Usage",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "purple",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "short"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 5,
|
||||||
|
"w": 6,
|
||||||
|
"x": 18,
|
||||||
|
"y": 31
|
||||||
|
},
|
||||||
|
"id": 104,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "none",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": ["lastNotNull"],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"textMode": "auto"
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_dedup_chunks_total{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Total Chunks",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisBorderShow": false,
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 10,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "percentunit"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 0,
|
||||||
|
"y": 36
|
||||||
|
},
|
||||||
|
"id": 105,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_dedup_database_ratio{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "{{database}}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Dedup Ratio by Database",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisBorderShow": false,
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 10,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "bytes"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 12,
|
||||||
|
"y": 36
|
||||||
|
},
|
||||||
|
"id": 106,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.2.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_dedup_space_saved_bytes{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "Space Saved",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "dbbackup_dedup_disk_usage_bytes{instance=~\"$instance\"}",
|
||||||
|
"legendFormat": "Disk Usage",
|
||||||
|
"range": true,
|
||||||
|
"refId": "B"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Dedup Storage Over Time",
|
||||||
|
"type": "timeseries"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"refresh": "30s",
|
||||||
|
"schemaVersion": 38,
|
||||||
|
"tags": [
|
||||||
|
"dbbackup",
|
||||||
|
"backup",
|
||||||
|
"database",
|
||||||
|
"dedup"
|
||||||
|
],
|
||||||
|
"templating": {
|
||||||
|
"list": [
|
||||||
|
{
|
||||||
|
"current": {
|
||||||
|
"selected": false,
|
||||||
|
"text": "All",
|
||||||
|
"value": "$__all"
|
||||||
|
},
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"definition": "label_values(dbbackup_rpo_seconds, instance)",
|
||||||
|
"hide": 0,
|
||||||
|
"includeAll": true,
|
||||||
|
"label": "Instance",
|
||||||
|
"multi": true,
|
||||||
|
"name": "instance",
|
||||||
|
"options": [],
|
||||||
|
"query": {
|
||||||
|
"query": "label_values(dbbackup_rpo_seconds, instance)",
|
||||||
|
"refId": "StandardVariableQuery"
|
||||||
|
},
|
||||||
|
"refresh": 1,
|
||||||
|
"regex": "",
|
||||||
|
"skipUrlSync": false,
|
||||||
|
"sort": 1,
|
||||||
|
"type": "query"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hide": 2,
|
||||||
|
"name": "DS_PROMETHEUS",
|
||||||
|
"query": "prometheus",
|
||||||
|
"skipUrlSync": false,
|
||||||
|
"type": "datasource"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"time": {
|
||||||
|
"from": "now-24h",
|
||||||
|
"to": "now"
|
||||||
|
},
|
||||||
|
"timepicker": {},
|
||||||
|
"timezone": "",
|
||||||
|
"title": "DBBackup Overview",
|
||||||
|
"uid": "dbbackup-overview",
|
||||||
|
"version": 1,
|
||||||
|
"weekStart": ""
|
||||||
|
}
|
||||||
@@ -3,7 +3,9 @@ package dedup
|
|||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
_ "github.com/mattn/go-sqlite3" // SQLite driver
|
_ "github.com/mattn/go-sqlite3" // SQLite driver
|
||||||
@@ -12,26 +14,66 @@ import (
|
|||||||
// ChunkIndex provides fast chunk lookups using SQLite
|
// ChunkIndex provides fast chunk lookups using SQLite
|
||||||
type ChunkIndex struct {
|
type ChunkIndex struct {
|
||||||
db *sql.DB
|
db *sql.DB
|
||||||
|
dbPath string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewChunkIndex opens or creates a chunk index database
|
// NewChunkIndex opens or creates a chunk index database at the default location
|
||||||
func NewChunkIndex(basePath string) (*ChunkIndex, error) {
|
func NewChunkIndex(basePath string) (*ChunkIndex, error) {
|
||||||
dbPath := filepath.Join(basePath, "chunks.db")
|
dbPath := filepath.Join(basePath, "chunks.db")
|
||||||
|
return NewChunkIndexAt(dbPath)
|
||||||
|
}
|
||||||
|
|
||||||
db, err := sql.Open("sqlite3", dbPath+"?_journal_mode=WAL&_synchronous=NORMAL")
|
// NewChunkIndexAt opens or creates a chunk index database at a specific path
|
||||||
|
// Use this to put the SQLite index on local storage when chunks are on NFS/CIFS
|
||||||
|
func NewChunkIndexAt(dbPath string) (*ChunkIndex, error) {
|
||||||
|
// Ensure parent directory exists
|
||||||
|
if err := os.MkdirAll(filepath.Dir(dbPath), 0700); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create index directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add busy_timeout to handle lock contention gracefully
|
||||||
|
db, err := sql.Open("sqlite3", dbPath+"?_journal_mode=WAL&_synchronous=NORMAL&_busy_timeout=5000")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to open chunk index: %w", err)
|
return nil, fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
idx := &ChunkIndex{db: db}
|
// Test the connection and check for locking issues
|
||||||
|
if err := db.Ping(); err != nil {
|
||||||
|
db.Close()
|
||||||
|
if isNFSLockingError(err) {
|
||||||
|
return nil, fmt.Errorf("database locked (common on NFS/CIFS): %w\n\n"+
|
||||||
|
"HINT: Use --index-db to put the SQLite index on local storage:\n"+
|
||||||
|
" dbbackup dedup ... --index-db /var/lib/dbbackup/dedup-index.db", err)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("failed to connect to chunk index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
idx := &ChunkIndex{db: db, dbPath: dbPath}
|
||||||
if err := idx.migrate(); err != nil {
|
if err := idx.migrate(); err != nil {
|
||||||
db.Close()
|
db.Close()
|
||||||
|
if isNFSLockingError(err) {
|
||||||
|
return nil, fmt.Errorf("database locked during migration (common on NFS/CIFS): %w\n\n"+
|
||||||
|
"HINT: Use --index-db to put the SQLite index on local storage:\n"+
|
||||||
|
" dbbackup dedup ... --index-db /var/lib/dbbackup/dedup-index.db", err)
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return idx, nil
|
return idx, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isNFSLockingError checks if an error is likely due to NFS/CIFS locking issues
|
||||||
|
func isNFSLockingError(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
errStr := err.Error()
|
||||||
|
return strings.Contains(errStr, "database is locked") ||
|
||||||
|
strings.Contains(errStr, "SQLITE_BUSY") ||
|
||||||
|
strings.Contains(errStr, "cannot lock") ||
|
||||||
|
strings.Contains(errStr, "lock protocol")
|
||||||
|
}
|
||||||
|
|
||||||
// migrate creates the schema if needed
|
// migrate creates the schema if needed
|
||||||
func (idx *ChunkIndex) migrate() error {
|
func (idx *ChunkIndex) migrate() error {
|
||||||
schema := `
|
schema := `
|
||||||
@@ -166,15 +208,26 @@ func (idx *ChunkIndex) RemoveManifest(id string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdateManifestVerified updates the verified timestamp for a manifest
|
||||||
|
func (idx *ChunkIndex) UpdateManifestVerified(id string, verifiedAt time.Time) error {
|
||||||
|
_, err := idx.db.Exec("UPDATE manifests SET verified_at = ? WHERE id = ?", verifiedAt, id)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// IndexStats holds statistics about the dedup index
|
// IndexStats holds statistics about the dedup index
|
||||||
type IndexStats struct {
|
type IndexStats struct {
|
||||||
TotalChunks int64
|
TotalChunks int64
|
||||||
TotalManifests int64
|
TotalManifests int64
|
||||||
TotalSizeRaw int64 // Uncompressed, undeduplicated
|
TotalSizeRaw int64 // Uncompressed, undeduplicated (per-chunk)
|
||||||
TotalSizeStored int64 // On-disk after dedup+compression
|
TotalSizeStored int64 // On-disk after dedup+compression (per-chunk)
|
||||||
DedupRatio float64
|
DedupRatio float64 // Based on manifests (real dedup ratio)
|
||||||
OldestChunk time.Time
|
OldestChunk time.Time
|
||||||
NewestChunk time.Time
|
NewestChunk time.Time
|
||||||
|
|
||||||
|
// Manifest-based stats (accurate dedup calculation)
|
||||||
|
TotalBackupSize int64 // Sum of all backup original sizes
|
||||||
|
TotalNewData int64 // Sum of all new chunks stored
|
||||||
|
SpaceSaved int64 // Difference = what dedup saved
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stats returns statistics about the index
|
// Stats returns statistics about the index
|
||||||
@@ -206,8 +259,22 @@ func (idx *ChunkIndex) Stats() (*IndexStats, error) {
|
|||||||
|
|
||||||
idx.db.QueryRow("SELECT COUNT(*) FROM manifests").Scan(&stats.TotalManifests)
|
idx.db.QueryRow("SELECT COUNT(*) FROM manifests").Scan(&stats.TotalManifests)
|
||||||
|
|
||||||
if stats.TotalSizeRaw > 0 {
|
// Calculate accurate dedup ratio from manifests
|
||||||
stats.DedupRatio = 1.0 - float64(stats.TotalSizeStored)/float64(stats.TotalSizeRaw)
|
// Sum all backup original sizes and all new data stored
|
||||||
|
err = idx.db.QueryRow(`
|
||||||
|
SELECT
|
||||||
|
COALESCE(SUM(original_size), 0),
|
||||||
|
COALESCE(SUM(stored_size), 0)
|
||||||
|
FROM manifests
|
||||||
|
`).Scan(&stats.TotalBackupSize, &stats.TotalNewData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate real dedup ratio: how much data was deduplicated across all backups
|
||||||
|
if stats.TotalBackupSize > 0 {
|
||||||
|
stats.DedupRatio = 1.0 - float64(stats.TotalNewData)/float64(stats.TotalBackupSize)
|
||||||
|
stats.SpaceSaved = stats.TotalBackupSize - stats.TotalNewData
|
||||||
}
|
}
|
||||||
|
|
||||||
return stats, nil
|
return stats, nil
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ type Manifest struct {
|
|||||||
// Encryption and compression settings used
|
// Encryption and compression settings used
|
||||||
Encrypted bool `json:"encrypted"`
|
Encrypted bool `json:"encrypted"`
|
||||||
Compressed bool `json:"compressed"`
|
Compressed bool `json:"compressed"`
|
||||||
|
Decompressed bool `json:"decompressed,omitempty"` // Input was auto-decompressed before chunking
|
||||||
|
|
||||||
// Verification
|
// Verification
|
||||||
SHA256 string `json:"sha256"` // Hash of reconstructed file
|
SHA256 string `json:"sha256"` // Hash of reconstructed file
|
||||||
|
|||||||
235
internal/dedup/metrics.go
Normal file
235
internal/dedup/metrics.go
Normal file
@@ -0,0 +1,235 @@
|
|||||||
|
package dedup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DedupMetrics holds deduplication statistics for Prometheus
|
||||||
|
type DedupMetrics struct {
|
||||||
|
// Global stats
|
||||||
|
TotalChunks int64
|
||||||
|
TotalManifests int64
|
||||||
|
TotalBackupSize int64 // Sum of all backup original sizes
|
||||||
|
TotalNewData int64 // Sum of all new chunks stored
|
||||||
|
SpaceSaved int64 // Bytes saved by deduplication
|
||||||
|
DedupRatio float64 // Overall dedup ratio (0-1)
|
||||||
|
DiskUsage int64 // Actual bytes on disk
|
||||||
|
|
||||||
|
// Per-database stats
|
||||||
|
ByDatabase map[string]*DatabaseDedupMetrics
|
||||||
|
}
|
||||||
|
|
||||||
|
// DatabaseDedupMetrics holds per-database dedup stats
|
||||||
|
type DatabaseDedupMetrics struct {
|
||||||
|
Database string
|
||||||
|
BackupCount int
|
||||||
|
TotalSize int64
|
||||||
|
StoredSize int64
|
||||||
|
DedupRatio float64
|
||||||
|
LastBackupTime time.Time
|
||||||
|
LastVerified time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectMetrics gathers dedup statistics from the index and store
|
||||||
|
func CollectMetrics(basePath string, indexPath string) (*DedupMetrics, error) {
|
||||||
|
var idx *ChunkIndex
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if indexPath != "" {
|
||||||
|
idx, err = NewChunkIndexAt(indexPath)
|
||||||
|
} else {
|
||||||
|
idx, err = NewChunkIndex(basePath)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open chunk index: %w", err)
|
||||||
|
}
|
||||||
|
defer idx.Close()
|
||||||
|
|
||||||
|
store, err := NewChunkStore(StoreConfig{BasePath: basePath})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open chunk store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get index stats
|
||||||
|
stats, err := idx.Stats()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get index stats: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get store stats
|
||||||
|
storeStats, err := store.Stats()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get store stats: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics := &DedupMetrics{
|
||||||
|
TotalChunks: stats.TotalChunks,
|
||||||
|
TotalManifests: stats.TotalManifests,
|
||||||
|
TotalBackupSize: stats.TotalBackupSize,
|
||||||
|
TotalNewData: stats.TotalNewData,
|
||||||
|
SpaceSaved: stats.SpaceSaved,
|
||||||
|
DedupRatio: stats.DedupRatio,
|
||||||
|
DiskUsage: storeStats.TotalSize,
|
||||||
|
ByDatabase: make(map[string]*DatabaseDedupMetrics),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect per-database metrics from manifest store
|
||||||
|
manifestStore, err := NewManifestStore(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return metrics, nil // Return partial metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
manifests, err := manifestStore.ListAll()
|
||||||
|
if err != nil {
|
||||||
|
return metrics, nil // Return partial metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range manifests {
|
||||||
|
dbKey := m.DatabaseName
|
||||||
|
if dbKey == "" {
|
||||||
|
dbKey = "_default"
|
||||||
|
}
|
||||||
|
|
||||||
|
dbMetrics, ok := metrics.ByDatabase[dbKey]
|
||||||
|
if !ok {
|
||||||
|
dbMetrics = &DatabaseDedupMetrics{
|
||||||
|
Database: dbKey,
|
||||||
|
}
|
||||||
|
metrics.ByDatabase[dbKey] = dbMetrics
|
||||||
|
}
|
||||||
|
|
||||||
|
dbMetrics.BackupCount++
|
||||||
|
dbMetrics.TotalSize += m.OriginalSize
|
||||||
|
dbMetrics.StoredSize += m.StoredSize
|
||||||
|
|
||||||
|
if m.CreatedAt.After(dbMetrics.LastBackupTime) {
|
||||||
|
dbMetrics.LastBackupTime = m.CreatedAt
|
||||||
|
}
|
||||||
|
if !m.VerifiedAt.IsZero() && m.VerifiedAt.After(dbMetrics.LastVerified) {
|
||||||
|
dbMetrics.LastVerified = m.VerifiedAt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate per-database dedup ratios
|
||||||
|
for _, dbMetrics := range metrics.ByDatabase {
|
||||||
|
if dbMetrics.TotalSize > 0 {
|
||||||
|
dbMetrics.DedupRatio = 1.0 - float64(dbMetrics.StoredSize)/float64(dbMetrics.TotalSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return metrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WritePrometheusTextfile writes dedup metrics in Prometheus format
|
||||||
|
func WritePrometheusTextfile(path string, instance string, basePath string, indexPath string) error {
|
||||||
|
metrics, err := CollectMetrics(basePath, indexPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
output := FormatPrometheusMetrics(metrics, instance)
|
||||||
|
|
||||||
|
// Atomic write
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to create directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpPath := path + ".tmp"
|
||||||
|
if err := os.WriteFile(tmpPath, []byte(output), 0644); err != nil {
|
||||||
|
return fmt.Errorf("failed to write temp file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Rename(tmpPath, path); err != nil {
|
||||||
|
os.Remove(tmpPath)
|
||||||
|
return fmt.Errorf("failed to rename temp file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatPrometheusMetrics formats dedup metrics in Prometheus exposition format
|
||||||
|
func FormatPrometheusMetrics(m *DedupMetrics, instance string) string {
|
||||||
|
var b strings.Builder
|
||||||
|
now := time.Now().Unix()
|
||||||
|
|
||||||
|
b.WriteString("# DBBackup Deduplication Prometheus Metrics\n")
|
||||||
|
b.WriteString(fmt.Sprintf("# Generated at: %s\n", time.Now().Format(time.RFC3339)))
|
||||||
|
b.WriteString(fmt.Sprintf("# Instance: %s\n", instance))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
// Global dedup metrics
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_chunks_total Total number of unique chunks stored\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_chunks_total gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_chunks_total{instance=%q} %d\n", instance, m.TotalChunks))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_manifests_total Total number of deduplicated backups\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_manifests_total gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_manifests_total{instance=%q} %d\n", instance, m.TotalManifests))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_backup_bytes_total Total logical size of all backups in bytes\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_backup_bytes_total gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_backup_bytes_total{instance=%q} %d\n", instance, m.TotalBackupSize))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_stored_bytes_total Total unique data stored in bytes (after dedup)\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_stored_bytes_total gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_stored_bytes_total{instance=%q} %d\n", instance, m.TotalNewData))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_space_saved_bytes Bytes saved by deduplication\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_space_saved_bytes gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_space_saved_bytes{instance=%q} %d\n", instance, m.SpaceSaved))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_ratio Deduplication ratio (0-1, higher is better)\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_ratio gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_ratio{instance=%q} %.4f\n", instance, m.DedupRatio))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_disk_usage_bytes Actual disk usage of chunk store\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_disk_usage_bytes gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_disk_usage_bytes{instance=%q} %d\n", instance, m.DiskUsage))
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
// Per-database metrics
|
||||||
|
if len(m.ByDatabase) > 0 {
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_database_backup_count Number of deduplicated backups per database\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_database_backup_count gauge\n")
|
||||||
|
for _, db := range m.ByDatabase {
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_database_backup_count{instance=%q,database=%q} %d\n",
|
||||||
|
instance, db.Database, db.BackupCount))
|
||||||
|
}
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_database_ratio Deduplication ratio per database (0-1)\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_database_ratio gauge\n")
|
||||||
|
for _, db := range m.ByDatabase {
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_database_ratio{instance=%q,database=%q} %.4f\n",
|
||||||
|
instance, db.Database, db.DedupRatio))
|
||||||
|
}
|
||||||
|
b.WriteString("\n")
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_database_last_backup_timestamp Last backup timestamp per database\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_database_last_backup_timestamp gauge\n")
|
||||||
|
for _, db := range m.ByDatabase {
|
||||||
|
if !db.LastBackupTime.IsZero() {
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_database_last_backup_timestamp{instance=%q,database=%q} %d\n",
|
||||||
|
instance, db.Database, db.LastBackupTime.Unix()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.WriteString("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
b.WriteString("# HELP dbbackup_dedup_scrape_timestamp Unix timestamp when dedup metrics were collected\n")
|
||||||
|
b.WriteString("# TYPE dbbackup_dedup_scrape_timestamp gauge\n")
|
||||||
|
b.WriteString(fmt.Sprintf("dbbackup_dedup_scrape_timestamp{instance=%q} %d\n", instance, now))
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
@@ -33,8 +33,11 @@ RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6
|
|||||||
# Environment
|
# Environment
|
||||||
EnvironmentFile=-/etc/dbbackup/env.d/cluster.conf
|
EnvironmentFile=-/etc/dbbackup/env.d/cluster.conf
|
||||||
|
|
||||||
|
# Working directory (config is loaded from .dbbackup.conf here)
|
||||||
|
WorkingDirectory=/var/lib/dbbackup
|
||||||
|
|
||||||
# Execution - cluster backup (all databases)
|
# Execution - cluster backup (all databases)
|
||||||
ExecStart={{.BinaryPath}} backup cluster --config {{.ConfigPath}}
|
ExecStart={{.BinaryPath}} backup cluster --backup-dir {{.BackupDir}}
|
||||||
TimeoutStartSec={{.TimeoutSeconds}}
|
TimeoutStartSec={{.TimeoutSeconds}}
|
||||||
|
|
||||||
# Post-backup metrics export
|
# Post-backup metrics export
|
||||||
|
|||||||
@@ -33,8 +33,11 @@ RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6
|
|||||||
# Environment
|
# Environment
|
||||||
EnvironmentFile=-/etc/dbbackup/env.d/%i.conf
|
EnvironmentFile=-/etc/dbbackup/env.d/%i.conf
|
||||||
|
|
||||||
|
# Working directory (config is loaded from .dbbackup.conf here)
|
||||||
|
WorkingDirectory=/var/lib/dbbackup
|
||||||
|
|
||||||
# Execution
|
# Execution
|
||||||
ExecStart={{.BinaryPath}} backup {{.BackupType}} %i --config {{.ConfigPath}}
|
ExecStart={{.BinaryPath}} backup {{.BackupType}} %i --backup-dir {{.BackupDir}}
|
||||||
TimeoutStartSec={{.TimeoutSeconds}}
|
TimeoutStartSec={{.TimeoutSeconds}}
|
||||||
|
|
||||||
# Post-backup metrics export
|
# Post-backup metrics export
|
||||||
|
|||||||
@@ -4,15 +4,14 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
tea "github.com/charmbracelet/bubbletea"
|
tea "github.com/charmbracelet/bubbletea"
|
||||||
"github.com/mattn/go-runewidth"
|
|
||||||
|
|
||||||
"dbbackup/internal/config"
|
"dbbackup/internal/config"
|
||||||
"dbbackup/internal/logger"
|
"dbbackup/internal/logger"
|
||||||
|
"dbbackup/internal/restore"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OperationState represents the current operation state
|
// OperationState represents the current operation state
|
||||||
@@ -229,72 +228,66 @@ func (m BackupManagerModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
|
|
||||||
func (m BackupManagerModel) View() string {
|
func (m BackupManagerModel) View() string {
|
||||||
var s strings.Builder
|
var s strings.Builder
|
||||||
const boxWidth = 60
|
|
||||||
|
|
||||||
// Helper to pad string to box width (handles UTF-8)
|
|
||||||
padToWidth := func(text string, width int) string {
|
|
||||||
textWidth := runewidth.StringWidth(text)
|
|
||||||
if textWidth >= width {
|
|
||||||
return runewidth.Truncate(text, width-3, "...")
|
|
||||||
}
|
|
||||||
return text + strings.Repeat(" ", width-textWidth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Title
|
// Title
|
||||||
s.WriteString(titleStyle.Render("[DB] Backup Archive Manager"))
|
s.WriteString(TitleStyle.Render("[DB] Backup Archive Manager"))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
// Operation Status Box (always visible)
|
// Status line (no box, bold+color accents)
|
||||||
s.WriteString("+--[ STATUS ]" + strings.Repeat("-", boxWidth-13) + "+\n")
|
|
||||||
switch m.opState {
|
switch m.opState {
|
||||||
case OpVerifying:
|
case OpVerifying:
|
||||||
spinner := spinnerFrames[m.spinnerFrame]
|
spinner := spinnerFrames[m.spinnerFrame]
|
||||||
statusText := fmt.Sprintf(" %s Verifying: %s", spinner, m.opTarget)
|
s.WriteString(StatusActiveStyle.Render(fmt.Sprintf("%s Verifying: %s", spinner, m.opTarget)))
|
||||||
s.WriteString("|" + padToWidth(statusText, boxWidth) + "|\n")
|
s.WriteString("\n\n")
|
||||||
case OpDeleting:
|
case OpDeleting:
|
||||||
spinner := spinnerFrames[m.spinnerFrame]
|
spinner := spinnerFrames[m.spinnerFrame]
|
||||||
statusText := fmt.Sprintf(" %s Deleting: %s", spinner, m.opTarget)
|
s.WriteString(StatusActiveStyle.Render(fmt.Sprintf("%s Deleting: %s", spinner, m.opTarget)))
|
||||||
s.WriteString("|" + padToWidth(statusText, boxWidth) + "|\n")
|
s.WriteString("\n\n")
|
||||||
default:
|
default:
|
||||||
if m.loading {
|
if m.loading {
|
||||||
spinner := spinnerFrames[m.spinnerFrame]
|
spinner := spinnerFrames[m.spinnerFrame]
|
||||||
statusText := fmt.Sprintf(" %s Loading archives...", spinner)
|
s.WriteString(StatusActiveStyle.Render(fmt.Sprintf("%s Loading archives...", spinner)))
|
||||||
s.WriteString("|" + padToWidth(statusText, boxWidth) + "|\n")
|
s.WriteString("\n\n")
|
||||||
} else if m.message != "" {
|
} else if m.message != "" {
|
||||||
msgText := " " + m.message
|
// Color based on message content
|
||||||
s.WriteString("|" + padToWidth(msgText, boxWidth) + "|\n")
|
if strings.HasPrefix(m.message, "[+]") || strings.HasPrefix(m.message, "Valid") {
|
||||||
|
s.WriteString(StatusSuccessStyle.Render(m.message))
|
||||||
|
} else if strings.HasPrefix(m.message, "[-]") || strings.HasPrefix(m.message, "Error") {
|
||||||
|
s.WriteString(StatusErrorStyle.Render(m.message))
|
||||||
} else {
|
} else {
|
||||||
s.WriteString("|" + padToWidth(" Ready", boxWidth) + "|\n")
|
s.WriteString(StatusActiveStyle.Render(m.message))
|
||||||
}
|
}
|
||||||
|
s.WriteString("\n\n")
|
||||||
|
}
|
||||||
|
// No "Ready" message when idle - cleaner UI
|
||||||
}
|
}
|
||||||
s.WriteString("+" + strings.Repeat("-", boxWidth) + "+\n\n")
|
|
||||||
|
|
||||||
if m.loading {
|
if m.loading {
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.err != nil {
|
if m.err != nil {
|
||||||
s.WriteString(errorStyle.Render(fmt.Sprintf("[FAIL] Error: %v", m.err)))
|
s.WriteString(StatusErrorStyle.Render(fmt.Sprintf("[FAIL] Error: %v", m.err)))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(infoStyle.Render("Press Esc to go back"))
|
s.WriteString(ShortcutStyle.Render("Press Esc to go back"))
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Summary
|
// Summary
|
||||||
s.WriteString(infoStyle.Render(fmt.Sprintf("Total Archives: %d | Total Size: %s",
|
s.WriteString(LabelStyle.Render(fmt.Sprintf("Total Archives: %d | Total Size: %s",
|
||||||
len(m.archives), formatSize(m.totalSize))))
|
len(m.archives), formatSize(m.totalSize))))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
// Archives list
|
// Archives list
|
||||||
if len(m.archives) == 0 {
|
if len(m.archives) == 0 {
|
||||||
s.WriteString(infoStyle.Render("No backup archives found"))
|
s.WriteString(StatusReadyStyle.Render("No backup archives found"))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(infoStyle.Render("Press Esc to go back"))
|
s.WriteString(ShortcutStyle.Render("Press Esc to go back"))
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Column headers with better alignment
|
// Column headers with better alignment
|
||||||
s.WriteString(archiveHeaderStyle.Render(fmt.Sprintf(" %-32s %-22s %10s %-16s",
|
s.WriteString(ListHeaderStyle.Render(fmt.Sprintf(" %-32s %-22s %10s %-16s",
|
||||||
"FILENAME", "FORMAT", "SIZE", "MODIFIED")))
|
"FILENAME", "FORMAT", "SIZE", "MODIFIED")))
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
s.WriteString(strings.Repeat("-", 90))
|
s.WriteString(strings.Repeat("-", 90))
|
||||||
@@ -313,18 +306,18 @@ func (m BackupManagerModel) View() string {
|
|||||||
for i := start; i < end; i++ {
|
for i := start; i < end; i++ {
|
||||||
archive := m.archives[i]
|
archive := m.archives[i]
|
||||||
cursor := " "
|
cursor := " "
|
||||||
style := archiveNormalStyle
|
style := ListNormalStyle
|
||||||
|
|
||||||
if i == m.cursor {
|
if i == m.cursor {
|
||||||
cursor = "> "
|
cursor = "> "
|
||||||
style = archiveSelectedStyle
|
style = ListSelectedStyle
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status icon - consistent 4-char width
|
// Status icon - consistent 4-char width
|
||||||
statusIcon := " [+]"
|
statusIcon := " [+]"
|
||||||
if !archive.Valid {
|
if !archive.Valid {
|
||||||
statusIcon = " [-]"
|
statusIcon = " [-]"
|
||||||
style = archiveInvalidStyle
|
style = ItemInvalidStyle
|
||||||
} else if time.Since(archive.Modified) > 30*24*time.Hour {
|
} else if time.Since(archive.Modified) > 30*24*time.Hour {
|
||||||
statusIcon = " [!]"
|
statusIcon = " [!]"
|
||||||
}
|
}
|
||||||
@@ -347,94 +340,79 @@ func (m BackupManagerModel) View() string {
|
|||||||
// Footer
|
// Footer
|
||||||
s.WriteString("\n")
|
s.WriteString("\n")
|
||||||
|
|
||||||
s.WriteString(infoStyle.Render(fmt.Sprintf("Selected: %d/%d", m.cursor+1, len(m.archives))))
|
s.WriteString(StatusReadyStyle.Render(fmt.Sprintf("Selected: %d/%d", m.cursor+1, len(m.archives))))
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
|
|
||||||
// Grouped keyboard shortcuts - simple aligned format
|
// Grouped keyboard shortcuts
|
||||||
s.WriteString("SHORTCUTS: Up/Down=Move | r=Restore | v=Verify | d=Delete | i=Info | R=Refresh | Esc=Back | q=Quit")
|
s.WriteString(ShortcutStyle.Render("SHORTCUTS: Up/Down=Move | r=Restore | v=Verify | d=Delete | i=Info | R=Refresh | Esc=Back | q=Quit"))
|
||||||
|
|
||||||
return s.String()
|
return s.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyArchiveCmd runs actual archive verification
|
// verifyArchiveCmd runs the SAME verification as restore safety checks
|
||||||
|
// This ensures consistency between backup manager verify and restore preview
|
||||||
func verifyArchiveCmd(archive ArchiveInfo) tea.Cmd {
|
func verifyArchiveCmd(archive ArchiveInfo) tea.Cmd {
|
||||||
return func() tea.Msg {
|
return func() tea.Msg {
|
||||||
// Determine verification method based on format
|
var issues []string
|
||||||
var valid bool
|
|
||||||
var details string
|
|
||||||
var err error
|
|
||||||
|
|
||||||
switch {
|
// 1. Run the same archive integrity check as restore
|
||||||
case strings.HasSuffix(archive.Path, ".tar.gz") || strings.HasSuffix(archive.Path, ".tgz"):
|
safety := restore.NewSafety(nil, nil) // Doesn't need config/log for validation
|
||||||
// Verify tar.gz archive
|
if err := safety.ValidateArchive(archive.Path); err != nil {
|
||||||
cmd := exec.Command("tar", "-tzf", archive.Path)
|
return verifyResultMsg{
|
||||||
output, cmdErr := cmd.CombinedOutput()
|
archive: archive.Name,
|
||||||
if cmdErr != nil {
|
valid: false,
|
||||||
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "Archive corrupt or incomplete"}
|
err: nil,
|
||||||
|
details: fmt.Sprintf("Archive integrity: %v", err),
|
||||||
}
|
}
|
||||||
lines := strings.Split(string(output), "\n")
|
|
||||||
fileCount := 0
|
|
||||||
for _, l := range lines {
|
|
||||||
if l != "" {
|
|
||||||
fileCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
valid = true
|
|
||||||
details = fmt.Sprintf("%d files in archive", fileCount)
|
|
||||||
|
|
||||||
case strings.HasSuffix(archive.Path, ".dump") || strings.HasSuffix(archive.Path, ".sql"):
|
|
||||||
// Verify PostgreSQL dump with pg_restore --list
|
|
||||||
cmd := exec.Command("pg_restore", "--list", archive.Path)
|
|
||||||
output, cmdErr := cmd.CombinedOutput()
|
|
||||||
if cmdErr != nil {
|
|
||||||
// Try as plain SQL
|
|
||||||
if strings.HasSuffix(archive.Path, ".sql") {
|
|
||||||
// Just check file is readable and has content
|
|
||||||
fi, statErr := os.Stat(archive.Path)
|
|
||||||
if statErr == nil && fi.Size() > 0 {
|
|
||||||
valid = true
|
|
||||||
details = "Plain SQL file readable"
|
|
||||||
} else {
|
|
||||||
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "File empty or unreadable"}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "pg_restore cannot read dump"}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
lines := strings.Split(string(output), "\n")
|
|
||||||
objectCount := 0
|
|
||||||
for _, l := range lines {
|
|
||||||
if l != "" && !strings.HasPrefix(l, ";") {
|
|
||||||
objectCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
valid = true
|
|
||||||
details = fmt.Sprintf("%d objects in dump", objectCount)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case strings.HasSuffix(archive.Path, ".sql.gz"):
|
// 2. Run the same deep diagnosis as restore
|
||||||
// Verify gzipped SQL
|
diagnoser := restore.NewDiagnoser(nil, false)
|
||||||
cmd := exec.Command("gzip", "-t", archive.Path)
|
diagResult, diagErr := diagnoser.DiagnoseFile(archive.Path)
|
||||||
if cmdErr := cmd.Run(); cmdErr != nil {
|
if diagErr != nil {
|
||||||
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "Gzip archive corrupt"}
|
return verifyResultMsg{
|
||||||
|
archive: archive.Name,
|
||||||
|
valid: false,
|
||||||
|
err: diagErr,
|
||||||
|
details: "Cannot diagnose archive",
|
||||||
}
|
}
|
||||||
valid = true
|
|
||||||
details = "Gzip integrity OK"
|
|
||||||
|
|
||||||
default:
|
|
||||||
// Unknown format - just check file exists and has size
|
|
||||||
fi, statErr := os.Stat(archive.Path)
|
|
||||||
if statErr != nil {
|
|
||||||
return verifyResultMsg{archive: archive.Name, valid: false, err: statErr, details: "Cannot access file"}
|
|
||||||
}
|
|
||||||
if fi.Size() == 0 {
|
|
||||||
return verifyResultMsg{archive: archive.Name, valid: false, err: nil, details: "File is empty"}
|
|
||||||
}
|
|
||||||
valid = true
|
|
||||||
details = "File exists and has content"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return verifyResultMsg{archive: archive.Name, valid: valid, err: err, details: details}
|
if !diagResult.IsValid {
|
||||||
|
// Collect error details
|
||||||
|
if diagResult.IsTruncated {
|
||||||
|
issues = append(issues, "TRUNCATED")
|
||||||
|
}
|
||||||
|
if diagResult.IsCorrupted {
|
||||||
|
issues = append(issues, "CORRUPTED")
|
||||||
|
}
|
||||||
|
if len(diagResult.Errors) > 0 {
|
||||||
|
issues = append(issues, diagResult.Errors[0])
|
||||||
|
}
|
||||||
|
return verifyResultMsg{
|
||||||
|
archive: archive.Name,
|
||||||
|
valid: false,
|
||||||
|
err: nil,
|
||||||
|
details: strings.Join(issues, "; "),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build success details
|
||||||
|
details := "Verified"
|
||||||
|
if diagResult.Details != nil {
|
||||||
|
if diagResult.Details.TableCount > 0 {
|
||||||
|
details = fmt.Sprintf("%d databases in archive", diagResult.Details.TableCount)
|
||||||
|
} else if diagResult.Details.PgRestoreListable {
|
||||||
|
details = "pg_restore verified"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add any warnings
|
||||||
|
if len(diagResult.Warnings) > 0 {
|
||||||
|
details += fmt.Sprintf(" [%d warnings]", len(diagResult.Warnings))
|
||||||
|
}
|
||||||
|
|
||||||
|
return verifyResultMsg{archive: archive.Name, valid: true, err: nil, details: details}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
133
internal/tui/styles.go
Normal file
133
internal/tui/styles.go
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
package tui
|
||||||
|
|
||||||
|
import "github.com/charmbracelet/lipgloss"
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// GLOBAL TUI STYLE DEFINITIONS
|
||||||
|
// =============================================================================
|
||||||
|
// Design Language:
|
||||||
|
// - Bold text for labels and headers
|
||||||
|
// - Colors for semantic meaning (green=success, red=error, yellow=warning)
|
||||||
|
// - No emoticons - use simple text prefixes like [OK], [FAIL], [!]
|
||||||
|
// - No boxes for inline status - use bold+color accents
|
||||||
|
// - Consistent color palette across all views
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
// Color Palette (ANSI 256 colors for terminal compatibility)
|
||||||
|
const (
|
||||||
|
ColorWhite = lipgloss.Color("15") // Bright white
|
||||||
|
ColorGray = lipgloss.Color("250") // Light gray
|
||||||
|
ColorDim = lipgloss.Color("244") // Dim gray
|
||||||
|
ColorDimmer = lipgloss.Color("240") // Darker gray
|
||||||
|
ColorSuccess = lipgloss.Color("2") // Green
|
||||||
|
ColorError = lipgloss.Color("1") // Red
|
||||||
|
ColorWarning = lipgloss.Color("3") // Yellow
|
||||||
|
ColorInfo = lipgloss.Color("6") // Cyan
|
||||||
|
ColorAccent = lipgloss.Color("4") // Blue
|
||||||
|
)
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// TITLE & HEADER STYLES
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
// TitleStyle - main view title (bold white on gray background)
|
||||||
|
var TitleStyle = lipgloss.NewStyle().
|
||||||
|
Bold(true).
|
||||||
|
Foreground(ColorWhite).
|
||||||
|
Background(ColorDimmer).
|
||||||
|
Padding(0, 1)
|
||||||
|
|
||||||
|
// HeaderStyle - section headers (bold gray)
|
||||||
|
var HeaderStyle = lipgloss.NewStyle().
|
||||||
|
Bold(true).
|
||||||
|
Foreground(ColorDim)
|
||||||
|
|
||||||
|
// LabelStyle - field labels (bold cyan)
|
||||||
|
var LabelStyle = lipgloss.NewStyle().
|
||||||
|
Bold(true).
|
||||||
|
Foreground(ColorInfo)
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// STATUS STYLES
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
// StatusReadyStyle - idle/ready state (dim)
|
||||||
|
var StatusReadyStyle = lipgloss.NewStyle().
|
||||||
|
Foreground(ColorDim)
|
||||||
|
|
||||||
|
// StatusActiveStyle - operation in progress (bold cyan)
|
||||||
|
var StatusActiveStyle = lipgloss.NewStyle().
|
||||||
|
Bold(true).
|
||||||
|
Foreground(ColorInfo)
|
||||||
|
|
||||||
|
// StatusSuccessStyle - success messages (bold green)
|
||||||
|
var StatusSuccessStyle = lipgloss.NewStyle().
|
||||||
|
Bold(true).
|
||||||
|
Foreground(ColorSuccess)
|
||||||
|
|
||||||
|
// StatusErrorStyle - error messages (bold red)
|
||||||
|
var StatusErrorStyle = lipgloss.NewStyle().
|
||||||
|
Bold(true).
|
||||||
|
Foreground(ColorError)
|
||||||
|
|
||||||
|
// StatusWarningStyle - warning messages (bold yellow)
|
||||||
|
var StatusWarningStyle = lipgloss.NewStyle().
|
||||||
|
Bold(true).
|
||||||
|
Foreground(ColorWarning)
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// LIST & TABLE STYLES
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
// ListNormalStyle - unselected list items
|
||||||
|
var ListNormalStyle = lipgloss.NewStyle().
|
||||||
|
Foreground(ColorGray)
|
||||||
|
|
||||||
|
// ListSelectedStyle - selected/cursor item (bold white)
|
||||||
|
var ListSelectedStyle = lipgloss.NewStyle().
|
||||||
|
Foreground(ColorWhite).
|
||||||
|
Bold(true)
|
||||||
|
|
||||||
|
// ListHeaderStyle - column headers (bold dim)
|
||||||
|
var ListHeaderStyle = lipgloss.NewStyle().
|
||||||
|
Bold(true).
|
||||||
|
Foreground(ColorDim)
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// ITEM STATUS STYLES
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
// ItemValidStyle - valid/OK items (green)
|
||||||
|
var ItemValidStyle = lipgloss.NewStyle().
|
||||||
|
Foreground(ColorSuccess)
|
||||||
|
|
||||||
|
// ItemInvalidStyle - invalid/failed items (red)
|
||||||
|
var ItemInvalidStyle = lipgloss.NewStyle().
|
||||||
|
Foreground(ColorError)
|
||||||
|
|
||||||
|
// ItemOldStyle - old/stale items (yellow)
|
||||||
|
var ItemOldStyle = lipgloss.NewStyle().
|
||||||
|
Foreground(ColorWarning)
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// SHORTCUT STYLE
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
// ShortcutStyle - keyboard shortcuts footer (dim)
|
||||||
|
var ShortcutStyle = lipgloss.NewStyle().
|
||||||
|
Foreground(ColorDim)
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// HELPER PREFIXES (no emoticons)
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
const (
|
||||||
|
PrefixOK = "[OK]"
|
||||||
|
PrefixFail = "[FAIL]"
|
||||||
|
PrefixWarn = "[!]"
|
||||||
|
PrefixInfo = "[i]"
|
||||||
|
PrefixPlus = "[+]"
|
||||||
|
PrefixMinus = "[-]"
|
||||||
|
PrefixArrow = ">"
|
||||||
|
PrefixSpinner = "" // Spinner character added dynamically
|
||||||
|
)
|
||||||
Reference in New Issue
Block a user