Compare commits

..

3 Commits

Author SHA1 Message Date
7711a206ab Fix panic in TUI backup manager verify when logger is nil
All checks were successful
CI/CD / Test (push) Successful in 1m17s
CI/CD / Lint (push) Successful in 1m25s
CI/CD / Build & Release (push) Successful in 3m15s
- Add nil checks before logger calls in diagnose.go (8 places)
- Add nil checks before logger calls in safety.go (2 places)
- Fixes crash when pressing 'v' to verify backup in interactive menu
2026-01-14 17:18:37 +01:00
ba6e8a2b39 v3.42.37: Remove ASCII boxes from diagnose view
All checks were successful
CI/CD / Test (push) Successful in 1m17s
CI/CD / Lint (push) Successful in 1m26s
CI/CD / Build & Release (push) Successful in 3m14s
Cleaner output without box drawing characters:
- [STATUS] Validation section
- [INFO] Details section
- [FAIL] Errors section
- [WARN] Warnings section
- [HINT] Recommendations section
2026-01-14 17:05:43 +01:00
ec5e89eab7 v3.42.36: Fix remaining TUI prefix inconsistencies
All checks were successful
CI/CD / Test (push) Successful in 1m18s
CI/CD / Lint (push) Successful in 1m26s
CI/CD / Build & Release (push) Successful in 3m13s
- diagnose_view.go: Add [STATS], [LIST], [INFO] section prefixes
- status.go: Add [CONN], [INFO] section prefixes
- settings.go: [LOG] → [INFO] for configuration summary
- menu.go: [DB] → [SELECT]/[CHECK] for selectors
2026-01-14 16:59:24 +01:00
9 changed files with 99 additions and 107 deletions

View File

@@ -56,7 +56,7 @@ Download from [releases](https://git.uuxo.net/UUXO/dbbackup/releases):
```bash ```bash
# Linux x86_64 # Linux x86_64
wget https://git.uuxo.net/UUXO/dbbackup/releases/download/v3.42.1/dbbackup-linux-amd64 wget https://git.uuxo.net/UUXO/dbbackup/releases/download/v3.42.35/dbbackup-linux-amd64
chmod +x dbbackup-linux-amd64 chmod +x dbbackup-linux-amd64
sudo mv dbbackup-linux-amd64 /usr/local/bin/dbbackup sudo mv dbbackup-linux-amd64 /usr/local/bin/dbbackup
``` ```

View File

@@ -4,8 +4,8 @@ This directory contains pre-compiled binaries for the DB Backup Tool across mult
## Build Information ## Build Information
- **Version**: 3.42.34 - **Version**: 3.42.34
- **Build Time**: 2026-01-14_15:24:20_UTC - **Build Time**: 2026-01-14_16:06:08_UTC
- **Git Commit**: 721e53f - **Git Commit**: ba6e8a2
## Recent Updates (v1.1.0) ## Recent Updates (v1.1.0)
- ✅ Fixed TUI progress display with line-by-line output - ✅ Fixed TUI progress display with line-by-line output

View File

@@ -368,7 +368,7 @@ func (d *Diagnoser) diagnoseSQLScript(filePath string, compressed bool, result *
} }
// Store last line for termination check // Store last line for termination check
if lineNumber > 0 && (lineNumber%100000 == 0) && d.verbose { if lineNumber > 0 && (lineNumber%100000 == 0) && d.verbose && d.log != nil {
d.log.Debug("Scanning SQL file", "lines_processed", lineNumber) d.log.Debug("Scanning SQL file", "lines_processed", lineNumber)
} }
} }
@@ -430,9 +430,11 @@ func (d *Diagnoser) diagnoseClusterArchive(filePath string, result *DiagnoseResu
} }
} }
d.log.Info("Verifying cluster archive integrity", if d.log != nil {
"size", fmt.Sprintf("%.1f GB", float64(result.FileSize)/(1024*1024*1024)), d.log.Info("Verifying cluster archive integrity",
"timeout", fmt.Sprintf("%d min", timeoutMinutes)) "size", fmt.Sprintf("%.1f GB", float64(result.FileSize)/(1024*1024*1024)),
"timeout", fmt.Sprintf("%d min", timeoutMinutes))
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutMinutes)*time.Minute) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutMinutes)*time.Minute)
defer cancel() defer cancel()
@@ -561,7 +563,7 @@ func (d *Diagnoser) diagnoseClusterArchive(filePath string, result *DiagnoseResu
} }
// For verbose mode, diagnose individual dumps inside the archive // For verbose mode, diagnose individual dumps inside the archive
if d.verbose && len(dumpFiles) > 0 { if d.verbose && len(dumpFiles) > 0 && d.log != nil {
d.log.Info("Cluster archive contains databases", "count", len(dumpFiles)) d.log.Info("Cluster archive contains databases", "count", len(dumpFiles))
for _, df := range dumpFiles { for _, df := range dumpFiles {
d.log.Info(" - " + df) d.log.Info(" - " + df)
@@ -684,9 +686,11 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
} }
} }
d.log.Info("Listing cluster archive contents", if d.log != nil {
"size", fmt.Sprintf("%.1f GB", float64(archiveInfo.Size())/(1024*1024*1024)), d.log.Info("Listing cluster archive contents",
"timeout", fmt.Sprintf("%d min", timeoutMinutes)) "size", fmt.Sprintf("%.1f GB", float64(archiveInfo.Size())/(1024*1024*1024)),
"timeout", fmt.Sprintf("%d min", timeoutMinutes))
}
listCtx, listCancel := context.WithTimeout(context.Background(), time.Duration(timeoutMinutes)*time.Minute) listCtx, listCancel := context.WithTimeout(context.Background(), time.Duration(timeoutMinutes)*time.Minute)
defer listCancel() defer listCancel()
@@ -766,7 +770,9 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
return []*DiagnoseResult{errResult}, nil return []*DiagnoseResult{errResult}, nil
} }
d.log.Debug("Archive listing streamed successfully", "total_files", fileCount, "relevant_files", len(files)) if d.log != nil {
d.log.Debug("Archive listing streamed successfully", "total_files", fileCount, "relevant_files", len(files))
}
// Check if we have enough disk space (estimate 4x archive size needed) // Check if we have enough disk space (estimate 4x archive size needed)
// archiveInfo already obtained at function start // archiveInfo already obtained at function start
@@ -781,7 +787,9 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
testCancel() testCancel()
} }
d.log.Info("Archive listing successful", "files", len(files)) if d.log != nil {
d.log.Info("Archive listing successful", "files", len(files))
}
// Try full extraction - NO TIMEOUT here as large archives can take a long time // Try full extraction - NO TIMEOUT here as large archives can take a long time
// Use a generous timeout (30 minutes) for very large archives // Use a generous timeout (30 minutes) for very large archives
@@ -870,11 +878,15 @@ func (d *Diagnoser) DiagnoseClusterDumps(archivePath, tempDir string) ([]*Diagno
} }
dumpPath := filepath.Join(dumpsDir, name) dumpPath := filepath.Join(dumpsDir, name)
d.log.Info("Diagnosing dump file", "file", name) if d.log != nil {
d.log.Info("Diagnosing dump file", "file", name)
}
result, err := d.DiagnoseFile(dumpPath) result, err := d.DiagnoseFile(dumpPath)
if err != nil { if err != nil {
d.log.Warn("Failed to diagnose file", "file", name, "error", err) if d.log != nil {
d.log.Warn("Failed to diagnose file", "file", name, "error", err)
}
continue continue
} }
results = append(results, result) results = append(results, result)

View File

@@ -255,7 +255,9 @@ func (s *Safety) CheckDiskSpaceAt(archivePath string, checkDir string, multiplie
// Get available disk space // Get available disk space
availableSpace, err := getDiskSpace(checkDir) availableSpace, err := getDiskSpace(checkDir)
if err != nil { if err != nil {
s.log.Warn("Cannot check disk space", "error", err) if s.log != nil {
s.log.Warn("Cannot check disk space", "error", err)
}
return nil // Don't fail if we can't check return nil // Don't fail if we can't check
} }
@@ -278,10 +280,12 @@ func (s *Safety) CheckDiskSpaceAt(archivePath string, checkDir string, multiplie
checkDir) checkDir)
} }
s.log.Info("Disk space check passed", if s.log != nil {
"location", checkDir, s.log.Info("Disk space check passed",
"required", FormatBytes(requiredSpace), "location", checkDir,
"available", FormatBytes(availableSpace)) "required", FormatBytes(requiredSpace),
"available", FormatBytes(availableSpace))
}
return nil return nil
} }

View File

@@ -204,132 +204,111 @@ func (m DiagnoseViewModel) View() string {
func (m DiagnoseViewModel) renderSingleResult(result *restore.DiagnoseResult) string { func (m DiagnoseViewModel) renderSingleResult(result *restore.DiagnoseResult) string {
var s strings.Builder var s strings.Builder
// Status Box // Validation Status
s.WriteString("+--[ VALIDATION STATUS ]" + strings.Repeat("-", 37) + "+\n") s.WriteString(diagnoseHeaderStyle.Render("[STATUS] Validation"))
s.WriteString("\n")
if result.IsValid { if result.IsValid {
s.WriteString("| " + diagnosePassStyle.Render("[OK] VALID - Archive passed all checks") + strings.Repeat(" ", 18) + "|\n") s.WriteString(diagnosePassStyle.Render(" [OK] VALID - Archive passed all checks"))
s.WriteString("\n")
} else { } else {
s.WriteString("| " + diagnoseFailStyle.Render("[FAIL] INVALID - Archive has problems") + strings.Repeat(" ", 19) + "|\n") s.WriteString(diagnoseFailStyle.Render(" [FAIL] INVALID - Archive has problems"))
s.WriteString("\n")
} }
if result.IsTruncated { if result.IsTruncated {
s.WriteString("| " + diagnoseFailStyle.Render("[!] TRUNCATED - File is incomplete") + strings.Repeat(" ", 22) + "|\n") s.WriteString(diagnoseFailStyle.Render(" [!] TRUNCATED - File is incomplete"))
s.WriteString("\n")
} }
if result.IsCorrupted { if result.IsCorrupted {
s.WriteString("| " + diagnoseFailStyle.Render("[!] CORRUPTED - File structure damaged") + strings.Repeat(" ", 18) + "|\n") s.WriteString(diagnoseFailStyle.Render(" [!] CORRUPTED - File structure damaged"))
s.WriteString("\n")
} }
s.WriteString("+" + strings.Repeat("-", 60) + "+\n\n") s.WriteString("\n")
// Details Box // Details
if result.Details != nil { if result.Details != nil {
s.WriteString("+--[ DETAILS ]" + strings.Repeat("-", 46) + "+\n") s.WriteString(diagnoseHeaderStyle.Render("[INFO] Details"))
s.WriteString("\n")
if result.Details.HasPGDMPSignature { if result.Details.HasPGDMPSignature {
s.WriteString("| " + diagnosePassStyle.Render("[+]") + " PostgreSQL custom format (PGDMP)" + strings.Repeat(" ", 20) + "|\n") s.WriteString(diagnosePassStyle.Render(" [+]") + " PostgreSQL custom format (PGDMP)\n")
} }
if result.Details.HasSQLHeader { if result.Details.HasSQLHeader {
s.WriteString("| " + diagnosePassStyle.Render("[+]") + " PostgreSQL SQL header found" + strings.Repeat(" ", 25) + "|\n") s.WriteString(diagnosePassStyle.Render(" [+]") + " PostgreSQL SQL header found\n")
} }
if result.Details.GzipValid { if result.Details.GzipValid {
s.WriteString("| " + diagnosePassStyle.Render("[+]") + " Gzip compression valid" + strings.Repeat(" ", 30) + "|\n") s.WriteString(diagnosePassStyle.Render(" [+]") + " Gzip compression valid\n")
} }
if result.Details.PgRestoreListable { if result.Details.PgRestoreListable {
tableInfo := fmt.Sprintf(" (%d tables)", result.Details.TableCount) s.WriteString(diagnosePassStyle.Render(" [+]") + fmt.Sprintf(" pg_restore can list contents (%d tables)\n", result.Details.TableCount))
padding := 36 - len(tableInfo)
if padding < 0 {
padding = 0
}
s.WriteString("| " + diagnosePassStyle.Render("[+]") + " pg_restore can list contents" + tableInfo + strings.Repeat(" ", padding) + "|\n")
} }
if result.Details.CopyBlockCount > 0 { if result.Details.CopyBlockCount > 0 {
blockInfo := fmt.Sprintf("%d COPY blocks found", result.Details.CopyBlockCount) s.WriteString(fmt.Sprintf(" [-] %d COPY blocks found\n", result.Details.CopyBlockCount))
padding := 50 - len(blockInfo)
if padding < 0 {
padding = 0
}
s.WriteString("| [-] " + blockInfo + strings.Repeat(" ", padding) + "|\n")
} }
if result.Details.UnterminatedCopy { if result.Details.UnterminatedCopy {
s.WriteString("| " + diagnoseFailStyle.Render("[-]") + " Unterminated COPY: " + truncate(result.Details.LastCopyTable, 30) + strings.Repeat(" ", 5) + "|\n") s.WriteString(diagnoseFailStyle.Render(" [-]") + " Unterminated COPY: " + truncate(result.Details.LastCopyTable, 30) + "\n")
} }
if result.Details.ProperlyTerminated { if result.Details.ProperlyTerminated {
s.WriteString("| " + diagnosePassStyle.Render("[+]") + " All COPY blocks properly terminated" + strings.Repeat(" ", 17) + "|\n") s.WriteString(diagnosePassStyle.Render(" [+]") + " All COPY blocks properly terminated\n")
} }
if result.Details.ExpandedSize > 0 { if result.Details.ExpandedSize > 0 {
sizeInfo := fmt.Sprintf("Expanded: %s (%.1fx)", formatSize(result.Details.ExpandedSize), result.Details.CompressionRatio) s.WriteString(fmt.Sprintf(" [-] Expanded: %s (%.1fx)\n", formatSize(result.Details.ExpandedSize), result.Details.CompressionRatio))
padding := 50 - len(sizeInfo)
if padding < 0 {
padding = 0
}
s.WriteString("| [-] " + sizeInfo + strings.Repeat(" ", padding) + "|\n")
} }
s.WriteString("+" + strings.Repeat("-", 60) + "+\n") s.WriteString("\n")
} }
// Errors Box // Errors
if len(result.Errors) > 0 { if len(result.Errors) > 0 {
s.WriteString("\n+--[ ERRORS ]" + strings.Repeat("-", 47) + "+\n") s.WriteString(diagnoseFailStyle.Render("[FAIL] Errors"))
s.WriteString("\n")
for i, e := range result.Errors { for i, e := range result.Errors {
if i >= 5 { if i >= 5 {
remaining := fmt.Sprintf("... and %d more errors", len(result.Errors)-5) s.WriteString(fmt.Sprintf(" ... and %d more errors\n", len(result.Errors)-5))
padding := 56 - len(remaining)
s.WriteString("| " + remaining + strings.Repeat(" ", padding) + "|\n")
break break
} }
errText := truncate(e, 54) s.WriteString(" " + truncate(e, 60) + "\n")
padding := 56 - len(errText)
if padding < 0 {
padding = 0
}
s.WriteString("| " + errText + strings.Repeat(" ", padding) + "|\n")
} }
s.WriteString("+" + strings.Repeat("-", 60) + "+\n") s.WriteString("\n")
} }
// Warnings Box // Warnings
if len(result.Warnings) > 0 { if len(result.Warnings) > 0 {
s.WriteString("\n+--[ WARNINGS ]" + strings.Repeat("-", 45) + "+\n") s.WriteString(diagnoseWarnStyle.Render("[WARN] Warnings"))
s.WriteString("\n")
for i, w := range result.Warnings { for i, w := range result.Warnings {
if i >= 3 { if i >= 3 {
remaining := fmt.Sprintf("... and %d more warnings", len(result.Warnings)-3) s.WriteString(fmt.Sprintf(" ... and %d more warnings\n", len(result.Warnings)-3))
padding := 56 - len(remaining)
s.WriteString("| " + remaining + strings.Repeat(" ", padding) + "|\n")
break break
} }
warnText := truncate(w, 54) s.WriteString(" " + truncate(w, 60) + "\n")
padding := 56 - len(warnText)
if padding < 0 {
padding = 0
}
s.WriteString("| " + warnText + strings.Repeat(" ", padding) + "|\n")
} }
s.WriteString("+" + strings.Repeat("-", 60) + "+\n") s.WriteString("\n")
} }
// Recommendations Box // Recommendations
if !result.IsValid { if !result.IsValid {
s.WriteString("\n+--[ RECOMMENDATIONS ]" + strings.Repeat("-", 38) + "+\n") s.WriteString(diagnoseInfoStyle.Render("[HINT] Recommendations"))
s.WriteString("\n")
if result.IsTruncated { if result.IsTruncated {
s.WriteString("| 1. Re-run backup with current version (v3.42.12+) |\n") s.WriteString(" 1. Re-run backup with current version (v3.42+)\n")
s.WriteString("| 2. Check disk space on backup server |\n") s.WriteString(" 2. Check disk space on backup server\n")
s.WriteString("| 3. Verify network stability for remote backups |\n") s.WriteString(" 3. Verify network stability for remote backups\n")
} }
if result.IsCorrupted { if result.IsCorrupted {
s.WriteString("| 1. Verify backup was transferred completely |\n") s.WriteString(" 1. Verify backup was transferred completely\n")
s.WriteString("| 2. Try restoring from a previous backup |\n") s.WriteString(" 2. Try restoring from a previous backup\n")
} }
s.WriteString("+" + strings.Repeat("-", 60) + "+\n")
} }
return s.String() return s.String()
@@ -349,10 +328,8 @@ func (m DiagnoseViewModel) renderClusterResults() string {
} }
} }
s.WriteString(strings.Repeat("-", 60))
s.WriteString("\n") s.WriteString("\n")
s.WriteString(diagnoseHeaderStyle.Render(fmt.Sprintf("CLUSTER SUMMARY: %d databases\n", len(m.results)))) s.WriteString(diagnoseHeaderStyle.Render(fmt.Sprintf("[STATS] Cluster Summary: %d databases", len(m.results))))
s.WriteString(strings.Repeat("-", 60))
s.WriteString("\n\n") s.WriteString("\n\n")
if invalidCount == 0 { if invalidCount == 0 {
@@ -364,7 +341,7 @@ func (m DiagnoseViewModel) renderClusterResults() string {
} }
// List all dumps with status // List all dumps with status
s.WriteString(diagnoseHeaderStyle.Render("Database Dumps:")) s.WriteString(diagnoseHeaderStyle.Render("[LIST] Database Dumps"))
s.WriteString("\n") s.WriteString("\n")
// Show visible range based on cursor // Show visible range based on cursor
@@ -413,9 +390,7 @@ func (m DiagnoseViewModel) renderClusterResults() string {
if m.cursor < len(m.results) { if m.cursor < len(m.results) {
selected := m.results[m.cursor] selected := m.results[m.cursor]
s.WriteString("\n") s.WriteString("\n")
s.WriteString(strings.Repeat("-", 60)) s.WriteString(diagnoseHeaderStyle.Render("[INFO] Selected: " + selected.FileName))
s.WriteString("\n")
s.WriteString(diagnoseHeaderStyle.Render("Selected: " + selected.FileName))
s.WriteString("\n\n") s.WriteString("\n\n")
// Show condensed details for selected // Show condensed details for selected

View File

@@ -334,13 +334,13 @@ func (m *MenuModel) View() string {
// handleSingleBackup opens database selector for single backup // handleSingleBackup opens database selector for single backup
func (m *MenuModel) handleSingleBackup() (tea.Model, tea.Cmd) { func (m *MenuModel) handleSingleBackup() (tea.Model, tea.Cmd) {
selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "[DB] Single Database Backup", "single") selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "[SELECT] Single Database Backup", "single")
return selector, selector.Init() return selector, selector.Init()
} }
// handleSampleBackup opens database selector for sample backup // handleSampleBackup opens database selector for sample backup
func (m *MenuModel) handleSampleBackup() (tea.Model, tea.Cmd) { func (m *MenuModel) handleSampleBackup() (tea.Model, tea.Cmd) {
selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "[STATS] Sample Database Backup", "sample") selector := NewDatabaseSelector(m.config, m.logger, m, m.ctx, "[SELECT] Sample Database Backup", "sample")
return selector, selector.Init() return selector, selector.Init()
} }
@@ -356,7 +356,7 @@ func (m *MenuModel) handleClusterBackup() (tea.Model, tea.Cmd) {
return executor, executor.Init() return executor, executor.Init()
} }
confirm := NewConfirmationModelWithAction(m.config, m.logger, m, confirm := NewConfirmationModelWithAction(m.config, m.logger, m,
"[DB] Cluster Backup", "[CHECK] Cluster Backup",
"This will backup ALL databases in the cluster. Continue?", "This will backup ALL databases in the cluster. Continue?",
func() (tea.Model, tea.Cmd) { func() (tea.Model, tea.Cmd) {
executor := NewBackupExecution(m.config, m.logger, m, m.ctx, "cluster", "", 0) executor := NewBackupExecution(m.config, m.logger, m, m.ctx, "cluster", "", 0)

View File

@@ -747,7 +747,7 @@ func (m SettingsModel) View() string {
// Current configuration summary // Current configuration summary
if !m.editing { if !m.editing {
b.WriteString("\n") b.WriteString("\n")
b.WriteString(infoStyle.Render("[LOG] Current Configuration:")) b.WriteString(infoStyle.Render("[INFO] Current Configuration"))
b.WriteString("\n") b.WriteString("\n")
summary := []string{ summary := []string{

View File

@@ -173,7 +173,7 @@ func (m StatusViewModel) View() string {
s.WriteString(errorStyle.Render(fmt.Sprintf("[FAIL] Error: %v\n", m.err))) s.WriteString(errorStyle.Render(fmt.Sprintf("[FAIL] Error: %v\n", m.err)))
s.WriteString("\n") s.WriteString("\n")
} else { } else {
s.WriteString("Connection Status:\n") s.WriteString("[CONN] Connection Status\n")
if m.connected { if m.connected {
s.WriteString(successStyle.Render(" [+] Connected\n")) s.WriteString(successStyle.Render(" [+] Connected\n"))
} else { } else {
@@ -181,11 +181,12 @@ func (m StatusViewModel) View() string {
} }
s.WriteString("\n") s.WriteString("\n")
s.WriteString(fmt.Sprintf("Database Type: %s (%s)\n", m.config.DisplayDatabaseType(), m.config.DatabaseType)) s.WriteString("[INFO] Server Details\n")
s.WriteString(fmt.Sprintf("Host: %s:%d\n", m.config.Host, m.config.Port)) s.WriteString(fmt.Sprintf(" Database Type: %s (%s)\n", m.config.DisplayDatabaseType(), m.config.DatabaseType))
s.WriteString(fmt.Sprintf("User: %s\n", m.config.User)) s.WriteString(fmt.Sprintf(" Host: %s:%d\n", m.config.Host, m.config.Port))
s.WriteString(fmt.Sprintf("Backup Directory: %s\n", m.config.BackupDir)) s.WriteString(fmt.Sprintf(" User: %s\n", m.config.User))
s.WriteString(fmt.Sprintf("Version: %s\n\n", m.dbVersion)) s.WriteString(fmt.Sprintf(" Backup Directory: %s\n", m.config.BackupDir))
s.WriteString(fmt.Sprintf(" Version: %s\n\n", m.dbVersion))
if m.dbCount > 0 { if m.dbCount > 0 {
s.WriteString(fmt.Sprintf("Databases Found: %s\n", successStyle.Render(fmt.Sprintf("%d", m.dbCount)))) s.WriteString(fmt.Sprintf("Databases Found: %s\n", successStyle.Render(fmt.Sprintf("%d", m.dbCount))))

View File

@@ -143,11 +143,11 @@ const (
PrefixConfig = "[CONFIG]" PrefixConfig = "[CONFIG]"
// Status prefixes // Status prefixes
PrefixOK = "[OK]" PrefixOK = "[OK]"
PrefixFail = "[FAIL]" PrefixFail = "[FAIL]"
PrefixWait = "[WAIT]" PrefixWait = "[WAIT]"
PrefixWarn = "[WARN]" PrefixWarn = "[WARN]"
PrefixInfo = "[INFO]" PrefixInfo = "[INFO]"
// List item prefixes // List item prefixes
PrefixPlus = "[+]" PrefixPlus = "[+]"