Switch to incremental vacuum for non-blocking space reclamation

- Use PRAGMA incremental_vacuum instead of full VACUUM
- Frees ~1000 pages (~4MB) per run without blocking writes
- Run every 10 minutes instead of 6 hours since it's lightweight
- Set auto_vacuum=INCREMENTAL pragma for new databases
- Remove blocking VACUUM on startup
This commit is contained in:
Jeffrey Paul 2025-12-29 16:00:33 +07:00
parent da6d605e4d
commit d7e6f46320
2 changed files with 23 additions and 19 deletions

View File

@ -113,6 +113,7 @@ func (d *Database) Initialize() error {
"PRAGMA wal_checkpoint(TRUNCATE)", // Checkpoint and truncate WAL now
"PRAGMA busy_timeout=5000", // 5 second busy timeout
"PRAGMA analysis_limit=0", // Disable automatic ANALYZE
"PRAGMA auto_vacuum=INCREMENTAL", // Enable incremental vacuum (new DBs only)
}
for _, pragma := range pragmas {
@ -126,12 +127,6 @@ func (d *Database) Initialize() error {
return err
}
// Run VACUUM on startup to optimize database
d.logger.Info("Running VACUUM to optimize database (this may take a moment)")
if err := d.exec("VACUUM"); err != nil {
d.logger.Warn("Failed to VACUUM database", "error", err)
}
return nil
}
@ -1951,11 +1946,15 @@ func (d *Database) getIPv6Info(ctx context.Context, ip string, parsedIP net.IP)
return info, nil
}
// Vacuum runs the SQLite VACUUM command to reclaim unused space and defragment the database.
// Vacuum runs incremental vacuum to reclaim unused pages without blocking writes.
// It frees up to the specified number of pages per call (0 = all freeable pages).
func (d *Database) Vacuum(ctx context.Context) error {
_, err := d.db.ExecContext(ctx, "VACUUM")
// Free up to 1000 pages per call (~4MB with default 4KB page size)
// This keeps each vacuum operation quick and non-blocking
const pagesToFree = 1000
_, err := d.db.ExecContext(ctx, fmt.Sprintf("PRAGMA incremental_vacuum(%d)", pagesToFree))
if err != nil {
return fmt.Errorf("failed to vacuum database: %w", err)
return fmt.Errorf("failed to run incremental vacuum: %w", err)
}
return nil

View File

@ -12,14 +12,19 @@ import (
// Database maintenance configuration constants.
const (
// vacuumInterval is how often to run VACUUM.
vacuumInterval = 6 * time.Hour
// vacuumInterval is how often to run incremental vacuum.
// Since incremental vacuum only frees ~1000 pages (~4MB) per run,
// we run it frequently to keep up with deletions.
vacuumInterval = 10 * time.Minute
// analyzeInterval is how often to run ANALYZE.
analyzeInterval = 1 * time.Hour
// maintenanceTimeout is the max time for a maintenance operation.
maintenanceTimeout = 5 * time.Minute
// vacuumTimeout is the max time for incremental vacuum (should be quick).
vacuumTimeout = 30 * time.Second
// analyzeTimeout is the max time for ANALYZE.
analyzeTimeout = 5 * time.Minute
)
// DBMaintainer handles background database maintenance tasks.
@ -91,12 +96,12 @@ func (m *DBMaintainer) run() {
}
}
// runVacuum performs a VACUUM operation on the database.
// runVacuum performs an incremental vacuum operation on the database.
func (m *DBMaintainer) runVacuum() {
ctx, cancel := context.WithTimeout(context.Background(), maintenanceTimeout)
ctx, cancel := context.WithTimeout(context.Background(), vacuumTimeout)
defer cancel()
m.logger.Info("Starting database VACUUM")
m.logger.Debug("Running incremental vacuum")
startTime := time.Now()
err := m.db.Vacuum(ctx)
@ -110,15 +115,15 @@ func (m *DBMaintainer) runVacuum() {
m.statsMu.Unlock()
if err != nil {
m.logger.Error("VACUUM failed", "error", err, "duration", time.Since(startTime))
m.logger.Error("Incremental vacuum failed", "error", err, "duration", time.Since(startTime))
} else {
m.logger.Info("VACUUM completed", "duration", time.Since(startTime))
m.logger.Debug("Incremental vacuum completed", "duration", time.Since(startTime))
}
}
// runAnalyze performs an ANALYZE operation on the database.
func (m *DBMaintainer) runAnalyze() {
ctx, cancel := context.WithTimeout(context.Background(), maintenanceTimeout)
ctx, cancel := context.WithTimeout(context.Background(), analyzeTimeout)
defer cancel()
m.logger.Info("Starting database ANALYZE")