Files
dbbackup/internal/security/retention.go
Renz 86eee44d14 security: Implement MEDIUM priority security improvements
MEDIUM Priority Security Features:
- Backup retention policy with automatic cleanup
- Connection rate limiting with exponential backoff
- Privilege level checks (warn if running as root)
- System resource limit awareness (ulimit checks)

New Security Modules (internal/security/):
- retention.go: Automated backup cleanup based on age and count
- ratelimit.go: Connection attempt tracking with exponential backoff
- privileges.go: Root/Administrator detection and warnings
- resources.go: System resource limit checking (file descriptors, memory)

Retention Policy Features:
- Configurable retention period in days (--retention-days)
- Minimum backup count protection (--min-backups)
- Automatic cleanup after successful backups
- Removes old archives with .sha256 and .meta files
- Reports freed disk space

Rate Limiting Features:
- Per-host connection tracking
- Exponential backoff: 1s, 2s, 4s, 8s, 16s, 32s, max 60s
- Automatic reset after successful connections
- Configurable max retry attempts (--max-retries)
- Prevents brute force connection attempts

Privilege Checks:
- Detects root/Administrator execution
- Warns with security recommendations
- Requires --allow-root flag to proceed
- Suggests dedicated backup user creation
- Platform-specific recommendations (Unix/Windows)

Resource Awareness:
- Checks file descriptor limits (ulimit -n)
- Monitors available memory
- Validates resources before backup operations
- Provides recommendations for limit increases
- Cross-platform support (Linux, BSD, macOS, Windows)

Configuration Integration:
- All features configurable via flags and .dbbackup.conf
- Security section in config file
- Environment variable support
- Persistent settings across sessions

Integration Points:
- All backup operations (cluster, single, sample)
- Automatic cleanup after successful backups
- Rate limiting on all database connections
- Privilege checks before operations
- Resource validation for large backups

Default Values:
- Retention: 30 days, minimum 5 backups
- Max retries: 3 attempts
- Allow root: disabled
- Resource checks: enabled

Security Benefits:
- Prevents disk space exhaustion from old backups
- Protects against connection brute force attacks
- Encourages proper privilege separation
- Avoids resource exhaustion failures
- Compliance-ready audit trail

Testing:
- All code compiles successfully
- Cross-platform compatibility maintained
- Ready for production deployment
2025-11-25 14:15:27 +00:00

198 lines
4.6 KiB
Go

package security
import (
"fmt"
"os"
"path/filepath"
"sort"
"time"
"dbbackup/internal/logger"
)
// RetentionPolicy defines backup retention rules
type RetentionPolicy struct {
RetentionDays int
MinBackups int // Minimum backups to keep regardless of age
log logger.Logger
}
// NewRetentionPolicy creates a new retention policy
func NewRetentionPolicy(retentionDays, minBackups int, log logger.Logger) *RetentionPolicy {
return &RetentionPolicy{
RetentionDays: retentionDays,
MinBackups: minBackups,
log: log,
}
}
// ArchiveInfo holds information about a backup archive
type ArchiveInfo struct {
Path string
ModTime time.Time
Size int64
Database string
}
// CleanupOldBackups removes backups older than retention period
func (rp *RetentionPolicy) CleanupOldBackups(backupDir string) (int, int64, error) {
if rp.RetentionDays <= 0 {
return 0, 0, nil // Retention disabled
}
archives, err := rp.scanBackupArchives(backupDir)
if err != nil {
return 0, 0, fmt.Errorf("failed to scan backup directory: %w", err)
}
if len(archives) <= rp.MinBackups {
rp.log.Debug("Keeping all backups (below minimum threshold)",
"count", len(archives), "min_backups", rp.MinBackups)
return 0, 0, nil
}
cutoffTime := time.Now().AddDate(0, 0, -rp.RetentionDays)
// Sort by modification time (oldest first)
sort.Slice(archives, func(i, j int) bool {
return archives[i].ModTime.Before(archives[j].ModTime)
})
var deletedCount int
var freedSpace int64
for i, archive := range archives {
// Keep minimum number of backups
remaining := len(archives) - i
if remaining <= rp.MinBackups {
rp.log.Debug("Stopped cleanup to maintain minimum backups",
"remaining", remaining, "min_backups", rp.MinBackups)
break
}
// Delete if older than retention period
if archive.ModTime.Before(cutoffTime) {
rp.log.Info("Removing old backup",
"file", filepath.Base(archive.Path),
"age_days", int(time.Since(archive.ModTime).Hours()/24),
"size_mb", archive.Size/1024/1024)
if err := os.Remove(archive.Path); err != nil {
rp.log.Warn("Failed to remove old backup", "file", archive.Path, "error", err)
continue
}
// Also remove checksum file if exists
checksumPath := archive.Path + ".sha256"
if _, err := os.Stat(checksumPath); err == nil {
os.Remove(checksumPath)
}
// Also remove metadata file if exists
metadataPath := archive.Path + ".meta"
if _, err := os.Stat(metadataPath); err == nil {
os.Remove(metadataPath)
}
deletedCount++
freedSpace += archive.Size
}
}
if deletedCount > 0 {
rp.log.Info("Cleanup completed",
"deleted_backups", deletedCount,
"freed_space_mb", freedSpace/1024/1024,
"retention_days", rp.RetentionDays)
}
return deletedCount, freedSpace, nil
}
// scanBackupArchives scans directory for backup archives
func (rp *RetentionPolicy) scanBackupArchives(backupDir string) ([]ArchiveInfo, error) {
var archives []ArchiveInfo
entries, err := os.ReadDir(backupDir)
if err != nil {
return nil, err
}
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := entry.Name()
// Skip non-backup files
if !isBackupArchive(name) {
continue
}
path := filepath.Join(backupDir, name)
info, err := entry.Info()
if err != nil {
rp.log.Warn("Failed to get file info", "file", name, "error", err)
continue
}
archives = append(archives, ArchiveInfo{
Path: path,
ModTime: info.ModTime(),
Size: info.Size(),
Database: extractDatabaseName(name),
})
}
return archives, nil
}
// isBackupArchive checks if filename is a backup archive
func isBackupArchive(name string) bool {
return (filepath.Ext(name) == ".dump" ||
filepath.Ext(name) == ".sql" ||
filepath.Ext(name) == ".gz" ||
filepath.Ext(name) == ".tar") &&
name != ".sha256" &&
name != ".meta"
}
// extractDatabaseName extracts database name from archive filename
func extractDatabaseName(filename string) string {
base := filepath.Base(filename)
// Remove extensions
for {
oldBase := base
base = removeExtension(base)
if base == oldBase {
break
}
}
// Remove timestamp patterns
if len(base) > 20 {
// Typically: db_name_20240101_120000
underscoreCount := 0
for i := len(base) - 1; i >= 0; i-- {
if base[i] == '_' {
underscoreCount++
if underscoreCount >= 2 {
return base[:i]
}
}
}
}
return base
}
// removeExtension removes one extension from filename
func removeExtension(name string) string {
if ext := filepath.Ext(name); ext != "" {
return name[:len(name)-len(ext)]
}
return name
}