Feature: Replace in-memory log buffer with database-backed logging

This commit is contained in:
Jeffrey Paul 2025-05-22 06:40:15 -07:00
parent e66361ea4e
commit 54593c31da
4 changed files with 156 additions and 76 deletions

11
main.go
View File

@ -20,7 +20,6 @@ var (
logPath = runStart.Format("2006-01-02.15:04:05") + ".gomeshalerter.json"
logFile *os.File
logData []LogEntry
logBuffer *LogRingBuffer
db *sql.DB
logMutex sync.Mutex // Mutex for thread-safe logging
)
@ -36,9 +35,6 @@ func main() {
log.Fatalf("Failed to open database: %v", err)
}
// Initialize log buffer
logBuffer = NewLogRingBuffer(MAX_LOG_ENTRIES)
// Define a cleanup function to properly close resources
cleanup := func() {
fmt.Fprintf(os.Stderr, "[shutdown] Closing database...\n")
@ -113,6 +109,13 @@ func main() {
webServer(shutdown)
}()
// Start log cleanup worker goroutine
wg.Add(1)
go func() {
defer wg.Done()
logCleanupWorker(shutdown)
}()
// Wait for all goroutines to finish
wg.Wait()
fmt.Fprintf(os.Stderr, "[shutdown] All goroutines stopped, exiting...\n")

View File

@ -1,7 +1,6 @@
package main
import (
"sync"
"time"
)
@ -26,15 +25,6 @@ type LogEntry struct {
Details map[string]interface{} `json:"details"`
}
// LogRingBuffer holds the most recent log entries in a circular buffer
type LogRingBuffer struct {
entries []LogEntry
size int
position int
count int
mutex sync.Mutex
}
// Data structure for web UI
type DashboardData struct {
LastUpdated string

View File

@ -9,58 +9,10 @@ import (
"os"
"sort"
"time"
"github.com/oklog/ulid/v2"
)
// NewLogRingBuffer creates a new ring buffer with the specified capacity
func NewLogRingBuffer(capacity int) *LogRingBuffer {
return &LogRingBuffer{
entries: make([]LogEntry, capacity),
size: capacity,
position: 0,
count: 0,
}
}
// Add adds a log entry to the ring buffer
func (rb *LogRingBuffer) Add(entry LogEntry) {
rb.mutex.Lock()
defer rb.mutex.Unlock()
rb.entries[rb.position] = entry
rb.position = (rb.position + 1) % rb.size
if rb.count < rb.size {
rb.count++
}
}
// GetAll returns all entries in the buffer from newest to oldest
func (rb *LogRingBuffer) GetAll() []LogEntry {
rb.mutex.Lock()
defer rb.mutex.Unlock()
result := make([]LogEntry, rb.count)
if rb.count == 0 {
return result
}
// Copy entries in reverse chronological order (newest first)
pos := rb.position - 1
if pos < 0 {
pos = rb.size - 1
}
for i := 0; i < rb.count; i++ {
result[i] = rb.entries[pos]
pos--
if pos < 0 {
pos = rb.size - 1
}
}
return result
}
func setupDatabase() error {
_, err := db.Exec(`
CREATE TABLE IF NOT EXISTS articles (
@ -82,6 +34,26 @@ func setupDatabase() error {
return err
}
// Create logs table for structured log entries
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS logs (
id TEXT PRIMARY KEY,
timestamp TIMESTAMP NOT NULL,
log JSON NOT NULL
)
`)
if err != nil {
return err
}
// Create index on timestamp for efficient querying and deletion
_, err = db.Exec(`
CREATE INDEX IF NOT EXISTS idx_logs_timestamp ON logs (timestamp)
`)
if err != nil {
return err
}
// Check if columns exist
rows, err := db.Query(`PRAGMA table_info(articles)`)
if err != nil {
@ -432,22 +404,129 @@ func flushLog() {
logFile.Close()
}
// logEvent logs a structured message to both console and database
func logEvent(event string, details map[string]interface{}) {
logMutex.Lock()
defer logMutex.Unlock()
// Set timestamp if not already provided
if _, exists := details["timestamp"]; !exists {
details["timestamp"] = time.Now()
details["event"] = event
}
// Set event if not already in details
if _, exists := details["event"]; !exists {
details["event"] = event
}
timestamp := time.Now()
entry := LogEntry{
Timestamp: time.Now(),
Timestamp: timestamp,
Event: event,
Details: details,
}
// Add to both the permanent log data and the ring buffer
// Add to the permanent log data (for file-based logging)
logData = append(logData, entry)
logBuffer.Add(entry)
// Store log in database
logBytes, err := json.Marshal(entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Error marshaling log entry: %v\n", err)
return
}
// Generate ULID for the log entry
entropy := ulid.DefaultEntropy()
id := ulid.MustNew(ulid.Timestamp(timestamp), entropy).String()
// Insert into database
_, err = db.Exec("INSERT INTO logs (id, timestamp, log) VALUES (?, ?, ?)",
id, timestamp, string(logBytes))
if err != nil {
fmt.Fprintf(os.Stderr, "Error storing log in database: %v\n", err)
}
}
// getRecentLogs retrieves recent log entries from the database
func getRecentLogs(limit int) ([]LogEntry, error) {
rows, err := db.Query(`
SELECT log FROM logs
ORDER BY timestamp DESC
LIMIT ?
`, limit)
if err != nil {
return nil, err
}
defer rows.Close()
var logs []LogEntry
for rows.Next() {
var logJSON string
if err := rows.Scan(&logJSON); err != nil {
return nil, err
}
var entry LogEntry
if err := json.Unmarshal([]byte(logJSON), &entry); err != nil {
return nil, err
}
logs = append(logs, entry)
}
return logs, nil
}
// cleanupOldLogs deletes logs older than one month
func cleanupOldLogs() error {
// Calculate cutoff date (one month ago)
cutoff := time.Now().AddDate(0, -1, 0)
result, err := db.Exec("DELETE FROM logs WHERE timestamp < ?", cutoff)
if err != nil {
return err
}
rowsDeleted, _ := result.RowsAffected()
if rowsDeleted > 0 {
fmt.Fprintf(os.Stderr, "[logs] Deleted %d log entries older than one month\n", rowsDeleted)
}
return nil
}
// logCleanupWorker runs periodically to clean up old logs
func logCleanupWorker(shutdown chan struct{}) {
logInfo("logs", "Starting log cleanup worker", map[string]interface{}{
"interval": "15 minutes",
"retention": "1 month",
})
// Run cleanup immediately on startup
if err := cleanupOldLogs(); err != nil {
logInfo("logs", "Error cleaning up old logs", map[string]interface{}{
"error": err.Error(),
})
}
// Then run on interval
ticker := time.NewTicker(15 * time.Minute)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := cleanupOldLogs(); err != nil {
logInfo("logs", "Error cleaning up old logs", map[string]interface{}{
"error": err.Error(),
})
}
case <-shutdown:
logInfo("logs", "Shutting down log cleanup worker", nil)
return
}
}
}
// logInfo logs a structured message to both console and log file

View File

@ -120,7 +120,15 @@ func getDashboardData() (DashboardData, error) {
data.NextUp = nextUp
// Get recent logs
data.RecentLogs = logBuffer.GetAll()
recentLogs, err := getRecentLogs(100)
if err != nil {
logInfo("web", "Error fetching recent logs", map[string]interface{}{
"error": err.Error(),
})
// Continue with empty logs list
recentLogs = []LogEntry{}
}
data.RecentLogs = recentLogs
return data, nil
}