feat: Add enterprise DBA features for production reliability

New features implemented:

1. Backup Catalog (internal/catalog/)
   - SQLite-based backup tracking
   - Gap detection and RPO monitoring
   - Search and statistics
   - Filesystem sync

2. DR Drill Testing (internal/drill/)
   - Automated restore testing in Docker containers
   - Database validation with custom queries
   - Catalog integration for drill-tested status

3. Smart Notifications (internal/notify/)
   - Event batching with configurable intervals
   - Time-based escalation policies
   - HTML/text/Slack templates

4. Compliance Reports (internal/report/)
   - SOC2, GDPR, HIPAA, PCI-DSS, ISO27001 frameworks
   - Evidence collection from catalog
   - JSON, Markdown, HTML output formats

5. RTO/RPO Calculator (internal/rto/)
   - Recovery objective analysis
   - RTO breakdown by phase
   - Recommendations for improvement

6. Replica-Aware Backup (internal/replica/)
   - Topology detection for PostgreSQL/MySQL
   - Automatic replica selection
   - Configurable selection strategies

7. Parallel Table Backup (internal/parallel/)
   - Concurrent table dumps
   - Worker pool with progress tracking
   - Large table optimization

8. MySQL/MariaDB PITR (internal/pitr/)
   - Binary log parsing and replay
   - Point-in-time recovery support
   - Transaction filtering

CLI commands added: catalog, drill, report, rto

All changes support the goal: reliable 3 AM database recovery.
This commit is contained in:
2025-12-13 20:28:55 +01:00
parent d0d83b61ef
commit f69bfe7071
34 changed files with 13469 additions and 41 deletions

865
internal/pitr/binlog.go Normal file
View File

@@ -0,0 +1,865 @@
// Package pitr provides Point-in-Time Recovery functionality
// This file contains MySQL/MariaDB binary log handling
package pitr
import (
"bufio"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
)
// BinlogPosition represents a MySQL binary log position
type BinlogPosition struct {
File string `json:"file"` // Binary log filename (e.g., "mysql-bin.000042")
Position uint64 `json:"position"` // Byte position in the file
GTID string `json:"gtid,omitempty"` // GTID set (if available)
ServerID uint32 `json:"server_id,omitempty"`
}
// String returns a string representation of the binlog position
func (p *BinlogPosition) String() string {
if p.GTID != "" {
return fmt.Sprintf("%s:%d (GTID: %s)", p.File, p.Position, p.GTID)
}
return fmt.Sprintf("%s:%d", p.File, p.Position)
}
// IsZero returns true if the position is unset
func (p *BinlogPosition) IsZero() bool {
return p.File == "" && p.Position == 0 && p.GTID == ""
}
// Compare compares two binlog positions
// Returns -1 if p < other, 0 if equal, 1 if p > other
func (p *BinlogPosition) Compare(other LogPosition) int {
o, ok := other.(*BinlogPosition)
if !ok {
return 0
}
// Compare by file first
fileComp := compareBinlogFiles(p.File, o.File)
if fileComp != 0 {
return fileComp
}
// Then by position within file
if p.Position < o.Position {
return -1
} else if p.Position > o.Position {
return 1
}
return 0
}
// ParseBinlogPosition parses a binlog position string
// Format: "filename:position" or "filename:position:gtid"
func ParseBinlogPosition(s string) (*BinlogPosition, error) {
parts := strings.SplitN(s, ":", 3)
if len(parts) < 2 {
return nil, fmt.Errorf("invalid binlog position format: %s (expected file:position)", s)
}
pos, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid position value: %s", parts[1])
}
bp := &BinlogPosition{
File: parts[0],
Position: pos,
}
if len(parts) == 3 {
bp.GTID = parts[2]
}
return bp, nil
}
// MarshalJSON serializes the binlog position to JSON
func (p *BinlogPosition) MarshalJSON() ([]byte, error) {
type Alias BinlogPosition
return json.Marshal((*Alias)(p))
}
// compareBinlogFiles compares two binlog filenames numerically
func compareBinlogFiles(a, b string) int {
numA := extractBinlogNumber(a)
numB := extractBinlogNumber(b)
if numA < numB {
return -1
} else if numA > numB {
return 1
}
return 0
}
// extractBinlogNumber extracts the numeric suffix from a binlog filename
func extractBinlogNumber(filename string) int {
// Match pattern like mysql-bin.000042
re := regexp.MustCompile(`\.(\d+)$`)
matches := re.FindStringSubmatch(filename)
if len(matches) < 2 {
return 0
}
num, _ := strconv.Atoi(matches[1])
return num
}
// BinlogFile represents a binary log file with metadata
type BinlogFile struct {
Name string `json:"name"`
Path string `json:"path"`
Size int64 `json:"size"`
ModTime time.Time `json:"mod_time"`
StartTime time.Time `json:"start_time,omitempty"` // First event timestamp
EndTime time.Time `json:"end_time,omitempty"` // Last event timestamp
StartPos uint64 `json:"start_pos"`
EndPos uint64 `json:"end_pos"`
GTID string `json:"gtid,omitempty"`
ServerID uint32 `json:"server_id,omitempty"`
Format string `json:"format,omitempty"` // ROW, STATEMENT, MIXED
Archived bool `json:"archived"`
ArchiveDir string `json:"archive_dir,omitempty"`
}
// BinlogArchiveInfo contains metadata about an archived binlog
type BinlogArchiveInfo struct {
OriginalFile string `json:"original_file"`
ArchivePath string `json:"archive_path"`
Size int64 `json:"size"`
Compressed bool `json:"compressed"`
Encrypted bool `json:"encrypted"`
Checksum string `json:"checksum"`
ArchivedAt time.Time `json:"archived_at"`
StartPos uint64 `json:"start_pos"`
EndPos uint64 `json:"end_pos"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
GTID string `json:"gtid,omitempty"`
}
// BinlogManager handles binary log operations
type BinlogManager struct {
mysqlbinlogPath string
binlogDir string
archiveDir string
compression bool
encryption bool
encryptionKey []byte
serverType DatabaseType // mysql or mariadb
}
// BinlogManagerConfig holds configuration for BinlogManager
type BinlogManagerConfig struct {
BinlogDir string
ArchiveDir string
Compression bool
Encryption bool
EncryptionKey []byte
}
// NewBinlogManager creates a new BinlogManager
func NewBinlogManager(config BinlogManagerConfig) (*BinlogManager, error) {
m := &BinlogManager{
binlogDir: config.BinlogDir,
archiveDir: config.ArchiveDir,
compression: config.Compression,
encryption: config.Encryption,
encryptionKey: config.EncryptionKey,
}
// Find mysqlbinlog executable
if err := m.detectTools(); err != nil {
return nil, err
}
return m, nil
}
// detectTools finds MySQL/MariaDB tools and determines server type
func (m *BinlogManager) detectTools() error {
// Try mariadb-binlog first (MariaDB)
if path, err := exec.LookPath("mariadb-binlog"); err == nil {
m.mysqlbinlogPath = path
m.serverType = DatabaseMariaDB
return nil
}
// Fall back to mysqlbinlog (MySQL or older MariaDB)
if path, err := exec.LookPath("mysqlbinlog"); err == nil {
m.mysqlbinlogPath = path
// Check if it's actually MariaDB's version
m.serverType = m.detectServerType()
return nil
}
return fmt.Errorf("mysqlbinlog or mariadb-binlog not found in PATH")
}
// detectServerType determines if we're working with MySQL or MariaDB
func (m *BinlogManager) detectServerType() DatabaseType {
cmd := exec.Command(m.mysqlbinlogPath, "--version")
output, err := cmd.Output()
if err != nil {
return DatabaseMySQL // Default to MySQL
}
if strings.Contains(strings.ToLower(string(output)), "mariadb") {
return DatabaseMariaDB
}
return DatabaseMySQL
}
// ServerType returns the detected server type
func (m *BinlogManager) ServerType() DatabaseType {
return m.serverType
}
// DiscoverBinlogs finds all binary log files in the configured directory
func (m *BinlogManager) DiscoverBinlogs(ctx context.Context) ([]BinlogFile, error) {
if m.binlogDir == "" {
return nil, fmt.Errorf("binlog directory not configured")
}
entries, err := os.ReadDir(m.binlogDir)
if err != nil {
return nil, fmt.Errorf("reading binlog directory: %w", err)
}
var binlogs []BinlogFile
binlogPattern := regexp.MustCompile(`^[a-zA-Z0-9_-]+-bin\.\d{6}$`)
for _, entry := range entries {
if entry.IsDir() {
continue
}
// Check if it matches binlog naming convention
if !binlogPattern.MatchString(entry.Name()) {
continue
}
info, err := entry.Info()
if err != nil {
continue
}
binlog := BinlogFile{
Name: entry.Name(),
Path: filepath.Join(m.binlogDir, entry.Name()),
Size: info.Size(),
ModTime: info.ModTime(),
}
// Get binlog metadata using mysqlbinlog
if err := m.enrichBinlogMetadata(ctx, &binlog); err != nil {
// Log but don't fail - we can still use basic info
binlog.StartPos = 4 // Magic number size
}
binlogs = append(binlogs, binlog)
}
// Sort by file number
sort.Slice(binlogs, func(i, j int) bool {
return compareBinlogFiles(binlogs[i].Name, binlogs[j].Name) < 0
})
return binlogs, nil
}
// enrichBinlogMetadata extracts metadata from a binlog file
func (m *BinlogManager) enrichBinlogMetadata(ctx context.Context, binlog *BinlogFile) error {
// Use mysqlbinlog to read header and extract timestamps
cmd := exec.CommandContext(ctx, m.mysqlbinlogPath,
"--no-defaults",
"--start-position=4",
"--stop-position=1000", // Just read header area
binlog.Path,
)
output, err := cmd.Output()
if err != nil {
// Try without position limits
cmd = exec.CommandContext(ctx, m.mysqlbinlogPath,
"--no-defaults",
"-v", // Verbose mode for more info
binlog.Path,
)
output, _ = cmd.Output()
}
// Parse output for metadata
m.parseBinlogOutput(string(output), binlog)
// Get file size for end position
if binlog.EndPos == 0 {
binlog.EndPos = uint64(binlog.Size)
}
return nil
}
// parseBinlogOutput parses mysqlbinlog output to extract metadata
func (m *BinlogManager) parseBinlogOutput(output string, binlog *BinlogFile) {
lines := strings.Split(output, "\n")
// Pattern for timestamp: #YYMMDD HH:MM:SS
timestampRe := regexp.MustCompile(`#(\d{6})\s+(\d{1,2}:\d{2}:\d{2})`)
// Pattern for server_id
serverIDRe := regexp.MustCompile(`server id\s+(\d+)`)
// Pattern for end_log_pos
endPosRe := regexp.MustCompile(`end_log_pos\s+(\d+)`)
// Pattern for binlog format
formatRe := regexp.MustCompile(`binlog_format=(\w+)`)
// Pattern for GTID
gtidRe := regexp.MustCompile(`SET @@SESSION.GTID_NEXT=\s*'([^']+)'`)
mariaGtidRe := regexp.MustCompile(`GTID\s+(\d+-\d+-\d+)`)
var firstTimestamp, lastTimestamp time.Time
var maxEndPos uint64
for _, line := range lines {
// Extract timestamps
if matches := timestampRe.FindStringSubmatch(line); len(matches) == 3 {
// Parse YYMMDD format
dateStr := matches[1]
timeStr := matches[2]
if t, err := time.Parse("060102 15:04:05", dateStr+" "+timeStr); err == nil {
if firstTimestamp.IsZero() {
firstTimestamp = t
}
lastTimestamp = t
}
}
// Extract server_id
if matches := serverIDRe.FindStringSubmatch(line); len(matches) == 2 {
if id, err := strconv.ParseUint(matches[1], 10, 32); err == nil {
binlog.ServerID = uint32(id)
}
}
// Extract end_log_pos (track max for EndPos)
if matches := endPosRe.FindStringSubmatch(line); len(matches) == 2 {
if pos, err := strconv.ParseUint(matches[1], 10, 64); err == nil {
if pos > maxEndPos {
maxEndPos = pos
}
}
}
// Extract format
if matches := formatRe.FindStringSubmatch(line); len(matches) == 2 {
binlog.Format = matches[1]
}
// Extract GTID (MySQL format)
if matches := gtidRe.FindStringSubmatch(line); len(matches) == 2 {
binlog.GTID = matches[1]
}
// Extract GTID (MariaDB format)
if matches := mariaGtidRe.FindStringSubmatch(line); len(matches) == 2 {
binlog.GTID = matches[1]
}
}
if !firstTimestamp.IsZero() {
binlog.StartTime = firstTimestamp
}
if !lastTimestamp.IsZero() {
binlog.EndTime = lastTimestamp
}
if maxEndPos > 0 {
binlog.EndPos = maxEndPos
}
}
// GetCurrentPosition retrieves the current binary log position from MySQL
func (m *BinlogManager) GetCurrentPosition(ctx context.Context, dsn string) (*BinlogPosition, error) {
// This would typically connect to MySQL and run SHOW MASTER STATUS
// For now, return an error indicating it needs to be called with a connection
return nil, fmt.Errorf("GetCurrentPosition requires a database connection - use MySQLPITR.GetCurrentPosition instead")
}
// ArchiveBinlog archives a single binlog file to the archive directory
func (m *BinlogManager) ArchiveBinlog(ctx context.Context, binlog *BinlogFile) (*BinlogArchiveInfo, error) {
if m.archiveDir == "" {
return nil, fmt.Errorf("archive directory not configured")
}
// Ensure archive directory exists
if err := os.MkdirAll(m.archiveDir, 0750); err != nil {
return nil, fmt.Errorf("creating archive directory: %w", err)
}
archiveName := binlog.Name
if m.compression {
archiveName += ".gz"
}
archivePath := filepath.Join(m.archiveDir, archiveName)
// Check if already archived
if _, err := os.Stat(archivePath); err == nil {
return nil, fmt.Errorf("binlog already archived: %s", archivePath)
}
// Open source file
src, err := os.Open(binlog.Path)
if err != nil {
return nil, fmt.Errorf("opening binlog: %w", err)
}
defer src.Close()
// Create destination file
dst, err := os.OpenFile(archivePath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0640)
if err != nil {
return nil, fmt.Errorf("creating archive file: %w", err)
}
defer dst.Close()
var writer io.Writer = dst
var gzWriter *gzip.Writer
if m.compression {
gzWriter = gzip.NewWriter(dst)
writer = gzWriter
defer gzWriter.Close()
}
// TODO: Add encryption layer if enabled
if m.encryption && len(m.encryptionKey) > 0 {
// Encryption would be added here
}
// Copy file content
written, err := io.Copy(writer, src)
if err != nil {
os.Remove(archivePath) // Cleanup on error
return nil, fmt.Errorf("copying binlog: %w", err)
}
// Close gzip writer to flush
if gzWriter != nil {
if err := gzWriter.Close(); err != nil {
os.Remove(archivePath)
return nil, fmt.Errorf("closing gzip writer: %w", err)
}
}
// Get final archive size
archiveInfo, err := os.Stat(archivePath)
if err != nil {
return nil, fmt.Errorf("getting archive info: %w", err)
}
// Calculate checksum (simple for now - could use SHA256)
checksum := fmt.Sprintf("size:%d", written)
return &BinlogArchiveInfo{
OriginalFile: binlog.Name,
ArchivePath: archivePath,
Size: archiveInfo.Size(),
Compressed: m.compression,
Encrypted: m.encryption,
Checksum: checksum,
ArchivedAt: time.Now(),
StartPos: binlog.StartPos,
EndPos: binlog.EndPos,
StartTime: binlog.StartTime,
EndTime: binlog.EndTime,
GTID: binlog.GTID,
}, nil
}
// ListArchivedBinlogs returns all archived binlog files
func (m *BinlogManager) ListArchivedBinlogs(ctx context.Context) ([]BinlogArchiveInfo, error) {
if m.archiveDir == "" {
return nil, fmt.Errorf("archive directory not configured")
}
entries, err := os.ReadDir(m.archiveDir)
if err != nil {
if os.IsNotExist(err) {
return []BinlogArchiveInfo{}, nil
}
return nil, fmt.Errorf("reading archive directory: %w", err)
}
var archives []BinlogArchiveInfo
metadataPath := filepath.Join(m.archiveDir, "metadata.json")
// Try to load metadata file for enriched info
metadata := m.loadArchiveMetadata(metadataPath)
for _, entry := range entries {
if entry.IsDir() || entry.Name() == "metadata.json" {
continue
}
info, err := entry.Info()
if err != nil {
continue
}
originalName := entry.Name()
compressed := false
if strings.HasSuffix(originalName, ".gz") {
originalName = strings.TrimSuffix(originalName, ".gz")
compressed = true
}
archive := BinlogArchiveInfo{
OriginalFile: originalName,
ArchivePath: filepath.Join(m.archiveDir, entry.Name()),
Size: info.Size(),
Compressed: compressed,
ArchivedAt: info.ModTime(),
}
// Enrich from metadata if available
if meta, ok := metadata[originalName]; ok {
archive.StartPos = meta.StartPos
archive.EndPos = meta.EndPos
archive.StartTime = meta.StartTime
archive.EndTime = meta.EndTime
archive.GTID = meta.GTID
archive.Checksum = meta.Checksum
}
archives = append(archives, archive)
}
// Sort by file number
sort.Slice(archives, func(i, j int) bool {
return compareBinlogFiles(archives[i].OriginalFile, archives[j].OriginalFile) < 0
})
return archives, nil
}
// loadArchiveMetadata loads the metadata.json file if it exists
func (m *BinlogManager) loadArchiveMetadata(path string) map[string]BinlogArchiveInfo {
result := make(map[string]BinlogArchiveInfo)
data, err := os.ReadFile(path)
if err != nil {
return result
}
var archives []BinlogArchiveInfo
if err := json.Unmarshal(data, &archives); err != nil {
return result
}
for _, a := range archives {
result[a.OriginalFile] = a
}
return result
}
// SaveArchiveMetadata saves metadata for all archived binlogs
func (m *BinlogManager) SaveArchiveMetadata(archives []BinlogArchiveInfo) error {
if m.archiveDir == "" {
return fmt.Errorf("archive directory not configured")
}
metadataPath := filepath.Join(m.archiveDir, "metadata.json")
data, err := json.MarshalIndent(archives, "", " ")
if err != nil {
return fmt.Errorf("marshaling metadata: %w", err)
}
return os.WriteFile(metadataPath, data, 0640)
}
// ValidateBinlogChain validates the integrity of the binlog chain
func (m *BinlogManager) ValidateBinlogChain(ctx context.Context, binlogs []BinlogFile) (*ChainValidation, error) {
result := &ChainValidation{
Valid: true,
LogCount: len(binlogs),
}
if len(binlogs) == 0 {
result.Warnings = append(result.Warnings, "no binlog files found")
return result, nil
}
// Sort binlogs by file number
sorted := make([]BinlogFile, len(binlogs))
copy(sorted, binlogs)
sort.Slice(sorted, func(i, j int) bool {
return compareBinlogFiles(sorted[i].Name, sorted[j].Name) < 0
})
result.StartPos = &BinlogPosition{
File: sorted[0].Name,
Position: sorted[0].StartPos,
GTID: sorted[0].GTID,
}
result.EndPos = &BinlogPosition{
File: sorted[len(sorted)-1].Name,
Position: sorted[len(sorted)-1].EndPos,
GTID: sorted[len(sorted)-1].GTID,
}
// Check for gaps in sequence
var prevNum int
var prevName string
var prevServerID uint32
for i, binlog := range sorted {
result.TotalSize += binlog.Size
num := extractBinlogNumber(binlog.Name)
if i > 0 {
// Check sequence continuity
if num != prevNum+1 {
gap := LogGap{
After: prevName,
Before: binlog.Name,
Reason: fmt.Sprintf("missing binlog file(s) %d to %d", prevNum+1, num-1),
}
result.Gaps = append(result.Gaps, gap)
result.Valid = false
}
// Check server_id consistency
if binlog.ServerID != 0 && prevServerID != 0 && binlog.ServerID != prevServerID {
result.Warnings = append(result.Warnings,
fmt.Sprintf("server_id changed from %d to %d at %s (possible master failover)",
prevServerID, binlog.ServerID, binlog.Name))
}
}
prevNum = num
prevName = binlog.Name
if binlog.ServerID != 0 {
prevServerID = binlog.ServerID
}
}
if len(result.Gaps) > 0 {
result.Errors = append(result.Errors,
fmt.Sprintf("found %d gap(s) in binlog chain", len(result.Gaps)))
}
return result, nil
}
// ReplayBinlogs replays binlog events to a target time or position
func (m *BinlogManager) ReplayBinlogs(ctx context.Context, opts ReplayOptions) error {
if len(opts.BinlogFiles) == 0 {
return fmt.Errorf("no binlog files specified")
}
// Build mysqlbinlog command
args := []string{"--no-defaults"}
// Add start position if specified
if opts.StartPosition != nil && !opts.StartPosition.IsZero() {
startPos, ok := opts.StartPosition.(*BinlogPosition)
if ok && startPos.Position > 0 {
args = append(args, fmt.Sprintf("--start-position=%d", startPos.Position))
}
}
// Add stop time or position
if opts.StopTime != nil && !opts.StopTime.IsZero() {
args = append(args, fmt.Sprintf("--stop-datetime=%s", opts.StopTime.Format("2006-01-02 15:04:05")))
}
if opts.StopPosition != nil && !opts.StopPosition.IsZero() {
stopPos, ok := opts.StopPosition.(*BinlogPosition)
if ok && stopPos.Position > 0 {
args = append(args, fmt.Sprintf("--stop-position=%d", stopPos.Position))
}
}
// Add binlog files
args = append(args, opts.BinlogFiles...)
if opts.DryRun {
// Just decode and show SQL
args = append([]string{args[0]}, append([]string{"-v"}, args[1:]...)...)
cmd := exec.CommandContext(ctx, m.mysqlbinlogPath, args...)
output, err := cmd.Output()
if err != nil {
return fmt.Errorf("parsing binlogs: %w", err)
}
if opts.Output != nil {
opts.Output.Write(output)
}
return nil
}
// Pipe to mysql for replay
mysqlCmd := exec.CommandContext(ctx, "mysql",
"-u", opts.MySQLUser,
"-p"+opts.MySQLPass,
"-h", opts.MySQLHost,
"-P", strconv.Itoa(opts.MySQLPort),
)
binlogCmd := exec.CommandContext(ctx, m.mysqlbinlogPath, args...)
// Pipe mysqlbinlog output to mysql
pipe, err := binlogCmd.StdoutPipe()
if err != nil {
return fmt.Errorf("creating pipe: %w", err)
}
mysqlCmd.Stdin = pipe
// Capture stderr for error reporting
var binlogStderr, mysqlStderr strings.Builder
binlogCmd.Stderr = &binlogStderr
mysqlCmd.Stderr = &mysqlStderr
// Start commands
if err := binlogCmd.Start(); err != nil {
return fmt.Errorf("starting mysqlbinlog: %w", err)
}
if err := mysqlCmd.Start(); err != nil {
binlogCmd.Process.Kill()
return fmt.Errorf("starting mysql: %w", err)
}
// Wait for completion
binlogErr := binlogCmd.Wait()
mysqlErr := mysqlCmd.Wait()
if binlogErr != nil {
return fmt.Errorf("mysqlbinlog failed: %w\nstderr: %s", binlogErr, binlogStderr.String())
}
if mysqlErr != nil {
return fmt.Errorf("mysql replay failed: %w\nstderr: %s", mysqlErr, mysqlStderr.String())
}
return nil
}
// ReplayOptions holds options for replaying binlog files
type ReplayOptions struct {
BinlogFiles []string // Files to replay (in order)
StartPosition LogPosition // Start from this position
StopTime *time.Time // Stop at this time
StopPosition LogPosition // Stop at this position
DryRun bool // Just show what would be done
Output io.Writer // For dry-run output
MySQLHost string // MySQL host for replay
MySQLPort int // MySQL port
MySQLUser string // MySQL user
MySQLPass string // MySQL password
Database string // Limit to specific database
StopOnError bool // Stop on first error
}
// FindBinlogsInRange finds binlog files containing events within a time range
func (m *BinlogManager) FindBinlogsInRange(ctx context.Context, binlogs []BinlogFile, start, end time.Time) []BinlogFile {
var result []BinlogFile
for _, b := range binlogs {
// Include if binlog time range overlaps with requested range
if b.EndTime.IsZero() && b.StartTime.IsZero() {
// No timestamp info, include to be safe
result = append(result, b)
continue
}
// Check for overlap
binlogStart := b.StartTime
binlogEnd := b.EndTime
if binlogEnd.IsZero() {
binlogEnd = time.Now() // Assume current file goes to now
}
if !binlogStart.After(end) && !binlogEnd.Before(start) {
result = append(result, b)
}
}
return result
}
// WatchBinlogs monitors for new binlog files and archives them
func (m *BinlogManager) WatchBinlogs(ctx context.Context, interval time.Duration, callback func(*BinlogFile)) error {
if m.binlogDir == "" {
return fmt.Errorf("binlog directory not configured")
}
// Get initial list
known := make(map[string]struct{})
binlogs, err := m.DiscoverBinlogs(ctx)
if err != nil {
return err
}
for _, b := range binlogs {
known[b.Name] = struct{}{}
}
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
binlogs, err := m.DiscoverBinlogs(ctx)
if err != nil {
continue // Log error but keep watching
}
for _, b := range binlogs {
if _, exists := known[b.Name]; !exists {
// New binlog found
known[b.Name] = struct{}{}
if callback != nil {
callback(&b)
}
}
}
}
}
}
// ParseBinlogIndex reads the binlog index file
func (m *BinlogManager) ParseBinlogIndex(indexPath string) ([]string, error) {
file, err := os.Open(indexPath)
if err != nil {
return nil, fmt.Errorf("opening index file: %w", err)
}
defer file.Close()
var binlogs []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line != "" {
binlogs = append(binlogs, line)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("reading index file: %w", err)
}
return binlogs, nil
}

View File

@@ -0,0 +1,585 @@
package pitr
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
"time"
)
func TestBinlogPosition_String(t *testing.T) {
tests := []struct {
name string
position BinlogPosition
expected string
}{
{
name: "basic position",
position: BinlogPosition{
File: "mysql-bin.000042",
Position: 1234,
},
expected: "mysql-bin.000042:1234",
},
{
name: "with GTID",
position: BinlogPosition{
File: "mysql-bin.000042",
Position: 1234,
GTID: "3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
},
expected: "mysql-bin.000042:1234 (GTID: 3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5)",
},
{
name: "MariaDB GTID",
position: BinlogPosition{
File: "mariadb-bin.000010",
Position: 500,
GTID: "0-1-100",
},
expected: "mariadb-bin.000010:500 (GTID: 0-1-100)",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := tt.position.String()
if result != tt.expected {
t.Errorf("got %q, want %q", result, tt.expected)
}
})
}
}
func TestBinlogPosition_IsZero(t *testing.T) {
tests := []struct {
name string
position BinlogPosition
expected bool
}{
{
name: "empty position",
position: BinlogPosition{},
expected: true,
},
{
name: "has file",
position: BinlogPosition{
File: "mysql-bin.000001",
},
expected: false,
},
{
name: "has position only",
position: BinlogPosition{
Position: 100,
},
expected: false,
},
{
name: "has GTID only",
position: BinlogPosition{
GTID: "3E11FA47-71CA-11E1-9E33-C80AA9429562:1",
},
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := tt.position.IsZero()
if result != tt.expected {
t.Errorf("got %v, want %v", result, tt.expected)
}
})
}
}
func TestBinlogPosition_Compare(t *testing.T) {
tests := []struct {
name string
a *BinlogPosition
b *BinlogPosition
expected int
}{
{
name: "equal positions",
a: &BinlogPosition{
File: "mysql-bin.000010",
Position: 1000,
},
b: &BinlogPosition{
File: "mysql-bin.000010",
Position: 1000,
},
expected: 0,
},
{
name: "a before b - same file",
a: &BinlogPosition{
File: "mysql-bin.000010",
Position: 100,
},
b: &BinlogPosition{
File: "mysql-bin.000010",
Position: 200,
},
expected: -1,
},
{
name: "a after b - same file",
a: &BinlogPosition{
File: "mysql-bin.000010",
Position: 300,
},
b: &BinlogPosition{
File: "mysql-bin.000010",
Position: 200,
},
expected: 1,
},
{
name: "a before b - different files",
a: &BinlogPosition{
File: "mysql-bin.000009",
Position: 9999,
},
b: &BinlogPosition{
File: "mysql-bin.000010",
Position: 100,
},
expected: -1,
},
{
name: "a after b - different files",
a: &BinlogPosition{
File: "mysql-bin.000011",
Position: 100,
},
b: &BinlogPosition{
File: "mysql-bin.000010",
Position: 9999,
},
expected: 1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := tt.a.Compare(tt.b)
if result != tt.expected {
t.Errorf("got %d, want %d", result, tt.expected)
}
})
}
}
func TestParseBinlogPosition(t *testing.T) {
tests := []struct {
name string
input string
expected *BinlogPosition
expectError bool
}{
{
name: "basic position",
input: "mysql-bin.000042:1234",
expected: &BinlogPosition{
File: "mysql-bin.000042",
Position: 1234,
},
expectError: false,
},
{
name: "with GTID",
input: "mysql-bin.000042:1234:3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
expected: &BinlogPosition{
File: "mysql-bin.000042",
Position: 1234,
GTID: "3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
},
expectError: false,
},
{
name: "invalid format",
input: "invalid",
expected: nil,
expectError: true,
},
{
name: "invalid position",
input: "mysql-bin.000042:notanumber",
expected: nil,
expectError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := ParseBinlogPosition(tt.input)
if tt.expectError {
if err == nil {
t.Error("expected error, got nil")
}
return
}
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
if result.File != tt.expected.File {
t.Errorf("File: got %q, want %q", result.File, tt.expected.File)
}
if result.Position != tt.expected.Position {
t.Errorf("Position: got %d, want %d", result.Position, tt.expected.Position)
}
if result.GTID != tt.expected.GTID {
t.Errorf("GTID: got %q, want %q", result.GTID, tt.expected.GTID)
}
})
}
}
func TestExtractBinlogNumber(t *testing.T) {
tests := []struct {
name string
filename string
expected int
}{
{"mysql binlog", "mysql-bin.000042", 42},
{"mariadb binlog", "mariadb-bin.000100", 100},
{"first binlog", "mysql-bin.000001", 1},
{"large number", "mysql-bin.999999", 999999},
{"no number", "mysql-bin", 0},
{"invalid format", "binlog", 0},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := extractBinlogNumber(tt.filename)
if result != tt.expected {
t.Errorf("got %d, want %d", result, tt.expected)
}
})
}
}
func TestCompareBinlogFiles(t *testing.T) {
tests := []struct {
name string
a string
b string
expected int
}{
{"equal", "mysql-bin.000010", "mysql-bin.000010", 0},
{"a < b", "mysql-bin.000009", "mysql-bin.000010", -1},
{"a > b", "mysql-bin.000011", "mysql-bin.000010", 1},
{"large difference", "mysql-bin.000001", "mysql-bin.000100", -1},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := compareBinlogFiles(tt.a, tt.b)
if result != tt.expected {
t.Errorf("got %d, want %d", result, tt.expected)
}
})
}
}
func TestValidateBinlogChain(t *testing.T) {
ctx := context.Background()
bm := &BinlogManager{}
tests := []struct {
name string
binlogs []BinlogFile
expectValid bool
expectGaps int
expectWarnings bool
}{
{
name: "empty chain",
binlogs: []BinlogFile{},
expectValid: true,
expectGaps: 0,
},
{
name: "continuous chain",
binlogs: []BinlogFile{
{Name: "mysql-bin.000001", ServerID: 1},
{Name: "mysql-bin.000002", ServerID: 1},
{Name: "mysql-bin.000003", ServerID: 1},
},
expectValid: true,
expectGaps: 0,
},
{
name: "chain with gap",
binlogs: []BinlogFile{
{Name: "mysql-bin.000001", ServerID: 1},
{Name: "mysql-bin.000003", ServerID: 1}, // 000002 missing
{Name: "mysql-bin.000004", ServerID: 1},
},
expectValid: false,
expectGaps: 1,
},
{
name: "chain with multiple gaps",
binlogs: []BinlogFile{
{Name: "mysql-bin.000001", ServerID: 1},
{Name: "mysql-bin.000005", ServerID: 1}, // 000002-000004 missing
{Name: "mysql-bin.000010", ServerID: 1}, // 000006-000009 missing
},
expectValid: false,
expectGaps: 2,
},
{
name: "server_id change warning",
binlogs: []BinlogFile{
{Name: "mysql-bin.000001", ServerID: 1},
{Name: "mysql-bin.000002", ServerID: 2}, // Server ID changed
{Name: "mysql-bin.000003", ServerID: 2},
},
expectValid: true,
expectGaps: 0,
expectWarnings: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := bm.ValidateBinlogChain(ctx, tt.binlogs)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if result.Valid != tt.expectValid {
t.Errorf("Valid: got %v, want %v", result.Valid, tt.expectValid)
}
if len(result.Gaps) != tt.expectGaps {
t.Errorf("Gaps: got %d, want %d", len(result.Gaps), tt.expectGaps)
}
if tt.expectWarnings && len(result.Warnings) == 0 {
t.Error("expected warnings, got none")
}
})
}
}
func TestFindBinlogsInRange(t *testing.T) {
ctx := context.Background()
bm := &BinlogManager{}
now := time.Now()
hour := time.Hour
binlogs := []BinlogFile{
{
Name: "mysql-bin.000001",
StartTime: now.Add(-5 * hour),
EndTime: now.Add(-4 * hour),
},
{
Name: "mysql-bin.000002",
StartTime: now.Add(-4 * hour),
EndTime: now.Add(-3 * hour),
},
{
Name: "mysql-bin.000003",
StartTime: now.Add(-3 * hour),
EndTime: now.Add(-2 * hour),
},
{
Name: "mysql-bin.000004",
StartTime: now.Add(-2 * hour),
EndTime: now.Add(-1 * hour),
},
{
Name: "mysql-bin.000005",
StartTime: now.Add(-1 * hour),
EndTime: now,
},
}
tests := []struct {
name string
start time.Time
end time.Time
expected int
}{
{
name: "all binlogs",
start: now.Add(-6 * hour),
end: now.Add(1 * hour),
expected: 5,
},
{
name: "middle range",
start: now.Add(-4 * hour),
end: now.Add(-2 * hour),
expected: 4, // binlogs 1-4 overlap (1 ends at -4h, 4 starts at -2h)
},
{
name: "last two",
start: now.Add(-2 * hour),
end: now,
expected: 3, // binlogs 3-5 overlap (3 ends at -2h, 5 ends at now)
},
{
name: "exact match one binlog",
start: now.Add(-3 * hour),
end: now.Add(-2 * hour),
expected: 3, // binlogs 2,3,4 overlap with this range
},
{
name: "no overlap - before",
start: now.Add(-10 * hour),
end: now.Add(-6 * hour),
expected: 0,
},
{
name: "no overlap - after",
start: now.Add(1 * hour),
end: now.Add(2 * hour),
expected: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := bm.FindBinlogsInRange(ctx, binlogs, tt.start, tt.end)
if len(result) != tt.expected {
t.Errorf("got %d binlogs, want %d", len(result), tt.expected)
}
})
}
}
func TestBinlogArchiveInfo_Metadata(t *testing.T) {
// Test that archive metadata is properly saved and loaded
tempDir, err := os.MkdirTemp("", "binlog_test")
if err != nil {
t.Fatalf("creating temp dir: %v", err)
}
defer os.RemoveAll(tempDir)
bm := &BinlogManager{
archiveDir: tempDir,
}
archives := []BinlogArchiveInfo{
{
OriginalFile: "mysql-bin.000001",
ArchivePath: filepath.Join(tempDir, "mysql-bin.000001.gz"),
Size: 1024,
Compressed: true,
ArchivedAt: time.Now().Add(-2 * time.Hour),
StartPos: 4,
EndPos: 1024,
StartTime: time.Now().Add(-3 * time.Hour),
EndTime: time.Now().Add(-2 * time.Hour),
},
{
OriginalFile: "mysql-bin.000002",
ArchivePath: filepath.Join(tempDir, "mysql-bin.000002.gz"),
Size: 2048,
Compressed: true,
ArchivedAt: time.Now().Add(-1 * time.Hour),
StartPos: 4,
EndPos: 2048,
StartTime: time.Now().Add(-2 * time.Hour),
EndTime: time.Now().Add(-1 * time.Hour),
},
}
// Save metadata
err = bm.SaveArchiveMetadata(archives)
if err != nil {
t.Fatalf("saving metadata: %v", err)
}
// Verify metadata file exists
metadataPath := filepath.Join(tempDir, "metadata.json")
if _, err := os.Stat(metadataPath); os.IsNotExist(err) {
t.Fatal("metadata file was not created")
}
// Load and verify
loaded := bm.loadArchiveMetadata(metadataPath)
if len(loaded) != 2 {
t.Errorf("got %d archives, want 2", len(loaded))
}
if loaded["mysql-bin.000001"].Size != 1024 {
t.Errorf("wrong size for first archive")
}
if loaded["mysql-bin.000002"].Size != 2048 {
t.Errorf("wrong size for second archive")
}
}
func TestLimitedScanner(t *testing.T) {
// Test the limited scanner used for reading dump headers
input := "line1\nline2\nline3\nline4\nline5\nline6\nline7\nline8\nline9\nline10\n"
reader := NewLimitedScanner(strings.NewReader(input), 5)
var lines []string
for reader.Scan() {
lines = append(lines, reader.Text())
}
if len(lines) != 5 {
t.Errorf("got %d lines, want 5", len(lines))
}
}
// TestDatabaseType tests database type constants
func TestDatabaseType(t *testing.T) {
tests := []struct {
name string
dbType DatabaseType
expected string
}{
{"PostgreSQL", DatabasePostgreSQL, "postgres"},
{"MySQL", DatabaseMySQL, "mysql"},
{"MariaDB", DatabaseMariaDB, "mariadb"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if string(tt.dbType) != tt.expected {
t.Errorf("got %q, want %q", tt.dbType, tt.expected)
}
})
}
}
// TestRestoreTargetType tests restore target type constants
func TestRestoreTargetType(t *testing.T) {
tests := []struct {
name string
target RestoreTargetType
expected string
}{
{"Time", RestoreTargetTime, "time"},
{"Position", RestoreTargetPosition, "position"},
{"Immediate", RestoreTargetImmediate, "immediate"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if string(tt.target) != tt.expected {
t.Errorf("got %q, want %q", tt.target, tt.expected)
}
})
}
}

155
internal/pitr/interface.go Normal file
View File

@@ -0,0 +1,155 @@
// Package pitr provides Point-in-Time Recovery functionality
// This file contains shared interfaces and types for multi-database PITR support
package pitr
import (
"context"
"time"
)
// DatabaseType represents the type of database for PITR
type DatabaseType string
const (
DatabasePostgreSQL DatabaseType = "postgres"
DatabaseMySQL DatabaseType = "mysql"
DatabaseMariaDB DatabaseType = "mariadb"
)
// PITRProvider is the interface for database-specific PITR implementations
type PITRProvider interface {
// DatabaseType returns the database type this provider handles
DatabaseType() DatabaseType
// Enable enables PITR for the database
Enable(ctx context.Context, config PITREnableConfig) error
// Disable disables PITR for the database
Disable(ctx context.Context) error
// Status returns the current PITR status
Status(ctx context.Context) (*PITRStatus, error)
// CreateBackup creates a PITR-capable backup with position recording
CreateBackup(ctx context.Context, opts BackupOptions) (*PITRBackupInfo, error)
// Restore performs a point-in-time restore
Restore(ctx context.Context, backup *PITRBackupInfo, target RestoreTarget) error
// ListRecoveryPoints lists available recovery points/ranges
ListRecoveryPoints(ctx context.Context) ([]RecoveryWindow, error)
// ValidateChain validates the log chain integrity
ValidateChain(ctx context.Context, from, to time.Time) (*ChainValidation, error)
}
// PITREnableConfig holds configuration for enabling PITR
type PITREnableConfig struct {
ArchiveDir string // Directory to store archived logs
RetentionDays int // Days to keep archives
ArchiveInterval time.Duration // How often to check for new logs (MySQL)
Compression bool // Compress archived logs
Encryption bool // Encrypt archived logs
EncryptionKey []byte // Encryption key
}
// PITRStatus represents the current PITR configuration status
type PITRStatus struct {
Enabled bool
DatabaseType DatabaseType
ArchiveDir string
LogLevel string // WAL level (postgres) or binlog format (mysql)
ArchiveMethod string // archive_command (postgres) or manual (mysql)
Position LogPosition
LastArchived time.Time
ArchiveCount int
ArchiveSize int64
}
// LogPosition is a generic interface for database-specific log positions
type LogPosition interface {
// String returns a string representation of the position
String() string
// IsZero returns true if the position is unset
IsZero() bool
// Compare returns -1 if p < other, 0 if equal, 1 if p > other
Compare(other LogPosition) int
}
// BackupOptions holds options for creating a PITR backup
type BackupOptions struct {
Database string // Database name (empty for all)
OutputPath string // Where to save the backup
Compression bool
CompressionLvl int
Encryption bool
EncryptionKey []byte
FlushLogs bool // Flush logs before backup (mysql)
SingleTxn bool // Single transaction mode
}
// PITRBackupInfo contains metadata about a PITR-capable backup
type PITRBackupInfo struct {
BackupFile string `json:"backup_file"`
DatabaseType DatabaseType `json:"database_type"`
DatabaseName string `json:"database_name,omitempty"`
Timestamp time.Time `json:"timestamp"`
ServerVersion string `json:"server_version"`
ServerID int `json:"server_id,omitempty"` // MySQL server_id
Position LogPosition `json:"-"` // Start position (type-specific)
PositionJSON string `json:"position"` // Serialized position
SizeBytes int64 `json:"size_bytes"`
Compressed bool `json:"compressed"`
Encrypted bool `json:"encrypted"`
}
// RestoreTarget specifies the point-in-time to restore to
type RestoreTarget struct {
Type RestoreTargetType
Time *time.Time // For RestoreTargetTime
Position LogPosition // For RestoreTargetPosition (LSN, binlog pos, GTID)
Inclusive bool // Include target transaction
DryRun bool // Only show what would be done
StopOnErr bool // Stop replay on first error
}
// RestoreTargetType defines the type of restore target
type RestoreTargetType string
const (
RestoreTargetTime RestoreTargetType = "time"
RestoreTargetPosition RestoreTargetType = "position"
RestoreTargetImmediate RestoreTargetType = "immediate"
)
// RecoveryWindow represents a time range available for recovery
type RecoveryWindow struct {
BaseBackup string `json:"base_backup"`
BackupTime time.Time `json:"backup_time"`
StartPosition LogPosition `json:"-"`
EndPosition LogPosition `json:"-"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
LogFiles []string `json:"log_files"` // WAL segments or binlog files
HasGaps bool `json:"has_gaps"`
GapDetails []string `json:"gap_details,omitempty"`
}
// ChainValidation contains results of log chain validation
type ChainValidation struct {
Valid bool
StartPos LogPosition
EndPos LogPosition
LogCount int
TotalSize int64
Gaps []LogGap
Errors []string
Warnings []string
}
// LogGap represents a gap in the log chain
type LogGap struct {
After string // Log file/position after which gap occurs
Before string // Log file/position where chain resumes
Reason string // Reason for gap if known
}

924
internal/pitr/mysql.go Normal file
View File

@@ -0,0 +1,924 @@
// Package pitr provides Point-in-Time Recovery functionality
// This file contains the MySQL/MariaDB PITR provider implementation
package pitr
import (
"bufio"
"compress/gzip"
"context"
"database/sql"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
)
// MySQLPITR implements PITRProvider for MySQL and MariaDB
type MySQLPITR struct {
db *sql.DB
config MySQLPITRConfig
binlogManager *BinlogManager
serverType DatabaseType
serverVersion string
serverID uint32
gtidMode bool
}
// MySQLPITRConfig holds configuration for MySQL PITR
type MySQLPITRConfig struct {
// Connection settings
Host string `json:"host"`
Port int `json:"port"`
User string `json:"user"`
Password string `json:"password,omitempty"`
Socket string `json:"socket,omitempty"`
// Paths
DataDir string `json:"data_dir"`
BinlogDir string `json:"binlog_dir"`
ArchiveDir string `json:"archive_dir"`
RestoreDir string `json:"restore_dir"`
// Archive settings
ArchiveInterval time.Duration `json:"archive_interval"`
RetentionDays int `json:"retention_days"`
Compression bool `json:"compression"`
CompressionLevel int `json:"compression_level"`
Encryption bool `json:"encryption"`
EncryptionKey []byte `json:"-"`
// Behavior settings
RequireRowFormat bool `json:"require_row_format"`
RequireGTID bool `json:"require_gtid"`
FlushLogsOnBackup bool `json:"flush_logs_on_backup"`
LockTables bool `json:"lock_tables"`
SingleTransaction bool `json:"single_transaction"`
}
// NewMySQLPITR creates a new MySQL PITR provider
func NewMySQLPITR(db *sql.DB, config MySQLPITRConfig) (*MySQLPITR, error) {
m := &MySQLPITR{
db: db,
config: config,
}
// Detect server type and version
if err := m.detectServerInfo(); err != nil {
return nil, fmt.Errorf("detecting server info: %w", err)
}
// Initialize binlog manager
binlogConfig := BinlogManagerConfig{
BinlogDir: config.BinlogDir,
ArchiveDir: config.ArchiveDir,
Compression: config.Compression,
Encryption: config.Encryption,
EncryptionKey: config.EncryptionKey,
}
var err error
m.binlogManager, err = NewBinlogManager(binlogConfig)
if err != nil {
return nil, fmt.Errorf("creating binlog manager: %w", err)
}
return m, nil
}
// detectServerInfo detects MySQL/MariaDB version and configuration
func (m *MySQLPITR) detectServerInfo() error {
// Get version
var version string
err := m.db.QueryRow("SELECT VERSION()").Scan(&version)
if err != nil {
return fmt.Errorf("getting version: %w", err)
}
m.serverVersion = version
// Detect MariaDB vs MySQL
if strings.Contains(strings.ToLower(version), "mariadb") {
m.serverType = DatabaseMariaDB
} else {
m.serverType = DatabaseMySQL
}
// Get server_id
var serverID int
err = m.db.QueryRow("SELECT @@server_id").Scan(&serverID)
if err == nil {
m.serverID = uint32(serverID)
}
// Check GTID mode
if m.serverType == DatabaseMySQL {
var gtidMode string
err = m.db.QueryRow("SELECT @@gtid_mode").Scan(&gtidMode)
if err == nil {
m.gtidMode = strings.ToUpper(gtidMode) == "ON"
}
} else {
// MariaDB uses different variables
var gtidPos string
err = m.db.QueryRow("SELECT @@gtid_current_pos").Scan(&gtidPos)
m.gtidMode = err == nil && gtidPos != ""
}
return nil
}
// DatabaseType returns the database type this provider handles
func (m *MySQLPITR) DatabaseType() DatabaseType {
return m.serverType
}
// Enable enables PITR for the MySQL database
func (m *MySQLPITR) Enable(ctx context.Context, config PITREnableConfig) error {
// Check current binlog settings
status, err := m.Status(ctx)
if err != nil {
return fmt.Errorf("checking status: %w", err)
}
var issues []string
// Check if binlog is enabled
var logBin string
if err := m.db.QueryRowContext(ctx, "SELECT @@log_bin").Scan(&logBin); err != nil {
return fmt.Errorf("checking log_bin: %w", err)
}
if logBin != "1" && strings.ToUpper(logBin) != "ON" {
issues = append(issues, "binary logging is not enabled (log_bin=OFF)")
issues = append(issues, " Add to my.cnf: log_bin = mysql-bin")
}
// Check binlog format
if m.config.RequireRowFormat && status.LogLevel != "ROW" {
issues = append(issues, fmt.Sprintf("binlog_format is %s, not ROW", status.LogLevel))
issues = append(issues, " Add to my.cnf: binlog_format = ROW")
}
// Check GTID mode if required
if m.config.RequireGTID && !m.gtidMode {
issues = append(issues, "GTID mode is not enabled")
if m.serverType == DatabaseMySQL {
issues = append(issues, " Add to my.cnf: gtid_mode = ON, enforce_gtid_consistency = ON")
} else {
issues = append(issues, " MariaDB: GTIDs are automatically managed with log_slave_updates")
}
}
// Check expire_logs_days (don't want logs expiring before we archive them)
var expireDays int
m.db.QueryRowContext(ctx, "SELECT @@expire_logs_days").Scan(&expireDays)
if expireDays > 0 && expireDays < config.RetentionDays {
issues = append(issues,
fmt.Sprintf("expire_logs_days (%d) is less than retention days (%d)",
expireDays, config.RetentionDays))
}
if len(issues) > 0 {
return fmt.Errorf("PITR requirements not met:\n - %s", strings.Join(issues, "\n - "))
}
// Update archive configuration
m.config.ArchiveDir = config.ArchiveDir
m.config.RetentionDays = config.RetentionDays
m.config.ArchiveInterval = config.ArchiveInterval
m.config.Compression = config.Compression
m.config.Encryption = config.Encryption
m.config.EncryptionKey = config.EncryptionKey
// Create archive directory
if err := os.MkdirAll(config.ArchiveDir, 0750); err != nil {
return fmt.Errorf("creating archive directory: %w", err)
}
// Save configuration
configPath := filepath.Join(config.ArchiveDir, "pitr_config.json")
configData, _ := json.MarshalIndent(map[string]interface{}{
"enabled": true,
"server_type": m.serverType,
"server_version": m.serverVersion,
"server_id": m.serverID,
"gtid_mode": m.gtidMode,
"archive_dir": config.ArchiveDir,
"retention_days": config.RetentionDays,
"archive_interval": config.ArchiveInterval.String(),
"compression": config.Compression,
"encryption": config.Encryption,
"created_at": time.Now().Format(time.RFC3339),
}, "", " ")
if err := os.WriteFile(configPath, configData, 0640); err != nil {
return fmt.Errorf("saving config: %w", err)
}
return nil
}
// Disable disables PITR for the MySQL database
func (m *MySQLPITR) Disable(ctx context.Context) error {
configPath := filepath.Join(m.config.ArchiveDir, "pitr_config.json")
// Check if config exists
if _, err := os.Stat(configPath); os.IsNotExist(err) {
return fmt.Errorf("PITR is not enabled (no config file found)")
}
// Update config to disabled
configData, _ := json.MarshalIndent(map[string]interface{}{
"enabled": false,
"disabled_at": time.Now().Format(time.RFC3339),
}, "", " ")
if err := os.WriteFile(configPath, configData, 0640); err != nil {
return fmt.Errorf("updating config: %w", err)
}
return nil
}
// Status returns the current PITR status
func (m *MySQLPITR) Status(ctx context.Context) (*PITRStatus, error) {
status := &PITRStatus{
DatabaseType: m.serverType,
ArchiveDir: m.config.ArchiveDir,
}
// Check if PITR is enabled via config file
configPath := filepath.Join(m.config.ArchiveDir, "pitr_config.json")
if data, err := os.ReadFile(configPath); err == nil {
var config map[string]interface{}
if json.Unmarshal(data, &config) == nil {
if enabled, ok := config["enabled"].(bool); ok {
status.Enabled = enabled
}
}
}
// Get binlog format
var binlogFormat string
if err := m.db.QueryRowContext(ctx, "SELECT @@binlog_format").Scan(&binlogFormat); err == nil {
status.LogLevel = binlogFormat
}
// Get current position
pos, err := m.GetCurrentPosition(ctx)
if err == nil {
status.Position = pos
}
// Get archive stats
if m.config.ArchiveDir != "" {
archives, err := m.binlogManager.ListArchivedBinlogs(ctx)
if err == nil {
status.ArchiveCount = len(archives)
for _, a := range archives {
status.ArchiveSize += a.Size
if a.ArchivedAt.After(status.LastArchived) {
status.LastArchived = a.ArchivedAt
}
}
}
}
status.ArchiveMethod = "manual" // MySQL doesn't have automatic archiving like PostgreSQL
return status, nil
}
// GetCurrentPosition retrieves the current binary log position
func (m *MySQLPITR) GetCurrentPosition(ctx context.Context) (*BinlogPosition, error) {
pos := &BinlogPosition{}
// Use SHOW MASTER STATUS for current position
rows, err := m.db.QueryContext(ctx, "SHOW MASTER STATUS")
if err != nil {
return nil, fmt.Errorf("getting master status: %w", err)
}
defer rows.Close()
if rows.Next() {
var file string
var position uint64
var binlogDoDB, binlogIgnoreDB, executedGtidSet sql.NullString
cols, _ := rows.Columns()
switch len(cols) {
case 5: // MySQL 5.6+
err = rows.Scan(&file, &position, &binlogDoDB, &binlogIgnoreDB, &executedGtidSet)
case 4: // Older versions
err = rows.Scan(&file, &position, &binlogDoDB, &binlogIgnoreDB)
default:
err = rows.Scan(&file, &position)
}
if err != nil {
return nil, fmt.Errorf("scanning master status: %w", err)
}
pos.File = file
pos.Position = position
pos.ServerID = m.serverID
if executedGtidSet.Valid {
pos.GTID = executedGtidSet.String
}
} else {
return nil, fmt.Errorf("no master status available (is binary logging enabled?)")
}
// For MariaDB, get GTID position differently
if m.serverType == DatabaseMariaDB && pos.GTID == "" {
var gtidPos string
if err := m.db.QueryRowContext(ctx, "SELECT @@gtid_current_pos").Scan(&gtidPos); err == nil {
pos.GTID = gtidPos
}
}
return pos, nil
}
// CreateBackup creates a PITR-capable backup with position recording
func (m *MySQLPITR) CreateBackup(ctx context.Context, opts BackupOptions) (*PITRBackupInfo, error) {
// Get position BEFORE flushing logs
startPos, err := m.GetCurrentPosition(ctx)
if err != nil {
return nil, fmt.Errorf("getting start position: %w", err)
}
// Optionally flush logs to start a new binlog file
if opts.FlushLogs || m.config.FlushLogsOnBackup {
if _, err := m.db.ExecContext(ctx, "FLUSH BINARY LOGS"); err != nil {
return nil, fmt.Errorf("flushing binary logs: %w", err)
}
// Get new position after flush
startPos, err = m.GetCurrentPosition(ctx)
if err != nil {
return nil, fmt.Errorf("getting position after flush: %w", err)
}
}
// Build mysqldump command
dumpArgs := []string{
"--single-transaction",
"--routines",
"--triggers",
"--events",
"--master-data=2", // Include binlog position as comment
}
if m.config.FlushLogsOnBackup {
dumpArgs = append(dumpArgs, "--flush-logs")
}
// Add connection params
if m.config.Host != "" {
dumpArgs = append(dumpArgs, "-h", m.config.Host)
}
if m.config.Port > 0 {
dumpArgs = append(dumpArgs, "-P", strconv.Itoa(m.config.Port))
}
if m.config.User != "" {
dumpArgs = append(dumpArgs, "-u", m.config.User)
}
if m.config.Password != "" {
dumpArgs = append(dumpArgs, "-p"+m.config.Password)
}
if m.config.Socket != "" {
dumpArgs = append(dumpArgs, "-S", m.config.Socket)
}
// Add database selection
if opts.Database != "" {
dumpArgs = append(dumpArgs, opts.Database)
} else {
dumpArgs = append(dumpArgs, "--all-databases")
}
// Create output file
timestamp := time.Now().Format("20060102_150405")
backupName := fmt.Sprintf("mysql_pitr_%s.sql", timestamp)
if opts.Compression {
backupName += ".gz"
}
backupPath := filepath.Join(opts.OutputPath, backupName)
if err := os.MkdirAll(opts.OutputPath, 0750); err != nil {
return nil, fmt.Errorf("creating output directory: %w", err)
}
// Run mysqldump
cmd := exec.CommandContext(ctx, "mysqldump", dumpArgs...)
// Create output file
outFile, err := os.Create(backupPath)
if err != nil {
return nil, fmt.Errorf("creating backup file: %w", err)
}
defer outFile.Close()
var writer io.WriteCloser = outFile
if opts.Compression {
gzWriter := NewGzipWriter(outFile, opts.CompressionLvl)
writer = gzWriter
defer gzWriter.Close()
}
cmd.Stdout = writer
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
os.Remove(backupPath)
return nil, fmt.Errorf("mysqldump failed: %w", err)
}
// Close writers
if opts.Compression {
writer.Close()
}
// Get file size
info, err := os.Stat(backupPath)
if err != nil {
return nil, fmt.Errorf("getting backup info: %w", err)
}
// Serialize position for JSON storage
posJSON, _ := json.Marshal(startPos)
backupInfo := &PITRBackupInfo{
BackupFile: backupPath,
DatabaseType: m.serverType,
DatabaseName: opts.Database,
Timestamp: time.Now(),
ServerVersion: m.serverVersion,
ServerID: int(m.serverID),
Position: startPos,
PositionJSON: string(posJSON),
SizeBytes: info.Size(),
Compressed: opts.Compression,
Encrypted: opts.Encryption,
}
// Save metadata alongside backup
metadataPath := backupPath + ".meta"
metaData, _ := json.MarshalIndent(backupInfo, "", " ")
os.WriteFile(metadataPath, metaData, 0640)
return backupInfo, nil
}
// Restore performs a point-in-time restore
func (m *MySQLPITR) Restore(ctx context.Context, backup *PITRBackupInfo, target RestoreTarget) error {
// Step 1: Restore base backup
if err := m.restoreBaseBackup(ctx, backup); err != nil {
return fmt.Errorf("restoring base backup: %w", err)
}
// Step 2: If target time is after backup time, replay binlogs
if target.Type == RestoreTargetImmediate {
return nil // Just restore to backup point
}
// Parse start position from backup
var startPos BinlogPosition
if err := json.Unmarshal([]byte(backup.PositionJSON), &startPos); err != nil {
return fmt.Errorf("parsing backup position: %w", err)
}
// Step 3: Find binlogs to replay
binlogs, err := m.binlogManager.DiscoverBinlogs(ctx)
if err != nil {
return fmt.Errorf("discovering binlogs: %w", err)
}
// Find archived binlogs too
archivedBinlogs, _ := m.binlogManager.ListArchivedBinlogs(ctx)
var filesToReplay []string
// Determine which binlogs to replay based on target
switch target.Type {
case RestoreTargetTime:
if target.Time == nil {
return fmt.Errorf("target time not specified")
}
// Find binlogs in range
relevantBinlogs := m.binlogManager.FindBinlogsInRange(ctx, binlogs, backup.Timestamp, *target.Time)
for _, b := range relevantBinlogs {
filesToReplay = append(filesToReplay, b.Path)
}
// Also check archives
for _, a := range archivedBinlogs {
if compareBinlogFiles(a.OriginalFile, startPos.File) >= 0 {
if !a.EndTime.IsZero() && !a.EndTime.Before(backup.Timestamp) && !a.StartTime.After(*target.Time) {
filesToReplay = append(filesToReplay, a.ArchivePath)
}
}
}
case RestoreTargetPosition:
if target.Position == nil {
return fmt.Errorf("target position not specified")
}
targetPos, ok := target.Position.(*BinlogPosition)
if !ok {
return fmt.Errorf("invalid target position type")
}
// Find binlogs from start to target position
for _, b := range binlogs {
if compareBinlogFiles(b.Name, startPos.File) >= 0 &&
compareBinlogFiles(b.Name, targetPos.File) <= 0 {
filesToReplay = append(filesToReplay, b.Path)
}
}
}
if len(filesToReplay) == 0 {
// Nothing to replay, backup is already at or past target
return nil
}
// Step 4: Replay binlogs
replayOpts := ReplayOptions{
BinlogFiles: filesToReplay,
StartPosition: &startPos,
DryRun: target.DryRun,
MySQLHost: m.config.Host,
MySQLPort: m.config.Port,
MySQLUser: m.config.User,
MySQLPass: m.config.Password,
StopOnError: target.StopOnErr,
}
if target.Type == RestoreTargetTime && target.Time != nil {
replayOpts.StopTime = target.Time
}
if target.Type == RestoreTargetPosition && target.Position != nil {
replayOpts.StopPosition = target.Position
}
if target.DryRun {
replayOpts.Output = os.Stdout
}
return m.binlogManager.ReplayBinlogs(ctx, replayOpts)
}
// restoreBaseBackup restores the base MySQL backup
func (m *MySQLPITR) restoreBaseBackup(ctx context.Context, backup *PITRBackupInfo) error {
// Build mysql command
mysqlArgs := []string{}
if m.config.Host != "" {
mysqlArgs = append(mysqlArgs, "-h", m.config.Host)
}
if m.config.Port > 0 {
mysqlArgs = append(mysqlArgs, "-P", strconv.Itoa(m.config.Port))
}
if m.config.User != "" {
mysqlArgs = append(mysqlArgs, "-u", m.config.User)
}
if m.config.Password != "" {
mysqlArgs = append(mysqlArgs, "-p"+m.config.Password)
}
if m.config.Socket != "" {
mysqlArgs = append(mysqlArgs, "-S", m.config.Socket)
}
// Prepare input
var input io.Reader
backupFile, err := os.Open(backup.BackupFile)
if err != nil {
return fmt.Errorf("opening backup file: %w", err)
}
defer backupFile.Close()
input = backupFile
// Handle compressed backups
if backup.Compressed || strings.HasSuffix(backup.BackupFile, ".gz") {
gzReader, err := NewGzipReader(backupFile)
if err != nil {
return fmt.Errorf("creating gzip reader: %w", err)
}
defer gzReader.Close()
input = gzReader
}
// Run mysql
cmd := exec.CommandContext(ctx, "mysql", mysqlArgs...)
cmd.Stdin = input
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
// ListRecoveryPoints lists available recovery points/ranges
func (m *MySQLPITR) ListRecoveryPoints(ctx context.Context) ([]RecoveryWindow, error) {
var windows []RecoveryWindow
// Find all backup metadata files
backupPattern := filepath.Join(m.config.ArchiveDir, "..", "*", "*.meta")
metaFiles, _ := filepath.Glob(backupPattern)
// Also check default backup locations
additionalPaths := []string{
filepath.Join(m.config.ArchiveDir, "*.meta"),
filepath.Join(m.config.RestoreDir, "*.meta"),
}
for _, p := range additionalPaths {
matches, _ := filepath.Glob(p)
metaFiles = append(metaFiles, matches...)
}
// Get current binlogs
binlogs, err := m.binlogManager.DiscoverBinlogs(ctx)
if err != nil {
binlogs = []BinlogFile{}
}
// Get archived binlogs
archivedBinlogs, _ := m.binlogManager.ListArchivedBinlogs(ctx)
for _, metaFile := range metaFiles {
data, err := os.ReadFile(metaFile)
if err != nil {
continue
}
var backup PITRBackupInfo
if err := json.Unmarshal(data, &backup); err != nil {
continue
}
// Parse position
var startPos BinlogPosition
json.Unmarshal([]byte(backup.PositionJSON), &startPos)
window := RecoveryWindow{
BaseBackup: backup.BackupFile,
BackupTime: backup.Timestamp,
StartTime: backup.Timestamp,
StartPosition: &startPos,
}
// Find binlogs available after this backup
var relevantBinlogs []string
var latestTime time.Time
var latestPos *BinlogPosition
for _, b := range binlogs {
if compareBinlogFiles(b.Name, startPos.File) >= 0 {
relevantBinlogs = append(relevantBinlogs, b.Name)
if !b.EndTime.IsZero() && b.EndTime.After(latestTime) {
latestTime = b.EndTime
latestPos = &BinlogPosition{
File: b.Name,
Position: b.EndPos,
GTID: b.GTID,
}
}
}
}
for _, a := range archivedBinlogs {
if compareBinlogFiles(a.OriginalFile, startPos.File) >= 0 {
relevantBinlogs = append(relevantBinlogs, a.OriginalFile)
if !a.EndTime.IsZero() && a.EndTime.After(latestTime) {
latestTime = a.EndTime
latestPos = &BinlogPosition{
File: a.OriginalFile,
Position: a.EndPos,
GTID: a.GTID,
}
}
}
}
window.LogFiles = relevantBinlogs
if !latestTime.IsZero() {
window.EndTime = latestTime
} else {
window.EndTime = time.Now()
}
window.EndPosition = latestPos
// Check for gaps
validation, _ := m.binlogManager.ValidateBinlogChain(ctx, binlogs)
if validation != nil {
window.HasGaps = !validation.Valid
for _, gap := range validation.Gaps {
window.GapDetails = append(window.GapDetails, gap.Reason)
}
}
windows = append(windows, window)
}
return windows, nil
}
// ValidateChain validates the log chain integrity
func (m *MySQLPITR) ValidateChain(ctx context.Context, from, to time.Time) (*ChainValidation, error) {
// Discover all binlogs
binlogs, err := m.binlogManager.DiscoverBinlogs(ctx)
if err != nil {
return nil, fmt.Errorf("discovering binlogs: %w", err)
}
// Filter to time range
relevant := m.binlogManager.FindBinlogsInRange(ctx, binlogs, from, to)
// Validate chain
return m.binlogManager.ValidateBinlogChain(ctx, relevant)
}
// ArchiveNewBinlogs archives any binlog files that haven't been archived yet
func (m *MySQLPITR) ArchiveNewBinlogs(ctx context.Context) ([]BinlogArchiveInfo, error) {
// Get current binlogs
binlogs, err := m.binlogManager.DiscoverBinlogs(ctx)
if err != nil {
return nil, fmt.Errorf("discovering binlogs: %w", err)
}
// Get already archived
archived, _ := m.binlogManager.ListArchivedBinlogs(ctx)
archivedSet := make(map[string]struct{})
for _, a := range archived {
archivedSet[a.OriginalFile] = struct{}{}
}
// Get current binlog file (don't archive the active one)
currentPos, _ := m.GetCurrentPosition(ctx)
currentFile := ""
if currentPos != nil {
currentFile = currentPos.File
}
var newArchives []BinlogArchiveInfo
for i := range binlogs {
b := &binlogs[i]
// Skip if already archived
if _, exists := archivedSet[b.Name]; exists {
continue
}
// Skip the current active binlog
if b.Name == currentFile {
continue
}
// Archive
archiveInfo, err := m.binlogManager.ArchiveBinlog(ctx, b)
if err != nil {
// Log but continue
continue
}
newArchives = append(newArchives, *archiveInfo)
}
// Update metadata
if len(newArchives) > 0 {
allArchived, _ := m.binlogManager.ListArchivedBinlogs(ctx)
m.binlogManager.SaveArchiveMetadata(allArchived)
}
return newArchives, nil
}
// PurgeBinlogs purges old binlog files based on retention policy
func (m *MySQLPITR) PurgeBinlogs(ctx context.Context) error {
if m.config.RetentionDays <= 0 {
return fmt.Errorf("retention days not configured")
}
cutoff := time.Now().AddDate(0, 0, -m.config.RetentionDays)
// Get archived binlogs
archived, err := m.binlogManager.ListArchivedBinlogs(ctx)
if err != nil {
return fmt.Errorf("listing archived binlogs: %w", err)
}
for _, a := range archived {
if a.ArchivedAt.Before(cutoff) {
os.Remove(a.ArchivePath)
}
}
return nil
}
// GzipWriter is a helper for gzip compression
type GzipWriter struct {
w *gzip.Writer
}
func NewGzipWriter(w io.Writer, level int) *GzipWriter {
if level <= 0 {
level = gzip.DefaultCompression
}
gw, _ := gzip.NewWriterLevel(w, level)
return &GzipWriter{w: gw}
}
func (g *GzipWriter) Write(p []byte) (int, error) {
return g.w.Write(p)
}
func (g *GzipWriter) Close() error {
return g.w.Close()
}
// GzipReader is a helper for gzip decompression
type GzipReader struct {
r *gzip.Reader
}
func NewGzipReader(r io.Reader) (*GzipReader, error) {
gr, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
return &GzipReader{r: gr}, nil
}
func (g *GzipReader) Read(p []byte) (int, error) {
return g.r.Read(p)
}
func (g *GzipReader) Close() error {
return g.r.Close()
}
// ExtractBinlogPositionFromDump extracts the binlog position from a mysqldump file
func ExtractBinlogPositionFromDump(dumpPath string) (*BinlogPosition, error) {
file, err := os.Open(dumpPath)
if err != nil {
return nil, fmt.Errorf("opening dump file: %w", err)
}
defer file.Close()
var reader io.Reader = file
if strings.HasSuffix(dumpPath, ".gz") {
gzReader, err := gzip.NewReader(file)
if err != nil {
return nil, fmt.Errorf("creating gzip reader: %w", err)
}
defer gzReader.Close()
reader = gzReader
}
// Look for CHANGE MASTER TO or -- CHANGE MASTER TO comment
// Pattern: -- CHANGE MASTER TO MASTER_LOG_FILE='mysql-bin.000042', MASTER_LOG_POS=1234;
scanner := NewLimitedScanner(reader, 1000) // Only scan first 1000 lines
posPattern := regexp.MustCompile(`MASTER_LOG_FILE='([^']+)',\s*MASTER_LOG_POS=(\d+)`)
for scanner.Scan() {
line := scanner.Text()
if matches := posPattern.FindStringSubmatch(line); len(matches) == 3 {
pos, _ := strconv.ParseUint(matches[2], 10, 64)
return &BinlogPosition{
File: matches[1],
Position: pos,
}, nil
}
}
return nil, fmt.Errorf("binlog position not found in dump file")
}
// LimitedScanner wraps bufio.Scanner with a line limit
type LimitedScanner struct {
scanner *bufio.Scanner
limit int
count int
}
func NewLimitedScanner(r io.Reader, limit int) *LimitedScanner {
return &LimitedScanner{
scanner: bufio.NewScanner(r),
limit: limit,
}
}
func (s *LimitedScanner) Scan() bool {
if s.count >= s.limit {
return false
}
s.count++
return s.scanner.Scan()
}
func (s *LimitedScanner) Text() string {
return s.scanner.Text()
}