Replace live_routes database table with in-memory routing table

- Remove live_routes table from SQL schema and all related indexes
- Create new internal/routingtable package with thread-safe RoutingTable
- Implement RouteKey-based indexing with secondary indexes for efficient lookups
- Add RoutingTableHandler to manage in-memory routes separately from database
- Update DatabaseHandler to only handle persistent database operations
- Wire up RoutingTable through fx dependency injection
- Update server to get live route count from routing table instead of database
- Remove LiveRoutes field from database.Stats struct
- Update tests to work with new architecture
This commit is contained in:
2025-07-27 23:16:19 +02:00
parent b49d3ce88c
commit a555a1dee2
14 changed files with 745 additions and 268 deletions

View File

@@ -25,7 +25,7 @@ const (
metricsLogInterval = 10 * time.Second
bytesPerKB = 1024
bytesPerMB = 1024 * 1024
maxConcurrentHandlers = 100 // Maximum number of concurrent message handlers
maxConcurrentHandlers = 200 // Maximum number of concurrent message handlers
)
// MessageHandler is an interface for handling RIS messages
@@ -141,16 +141,26 @@ func (s *Streamer) logMetrics() {
const bitsPerMegabit = 1000000
droppedMessages := atomic.LoadUint64(&s.droppedMessages)
s.logger.Info("Stream statistics",
"uptime", uptime,
"total_messages", metrics.TotalMessages,
"total_bytes", metrics.TotalBytes,
"total_mb", fmt.Sprintf("%.2f", float64(metrics.TotalBytes)/bytesPerMB),
"messages_per_sec", fmt.Sprintf("%.2f", metrics.MessagesPerSec),
"bits_per_sec", fmt.Sprintf("%.0f", metrics.BitsPerSec),
"mbps", fmt.Sprintf("%.2f", metrics.BitsPerSec/bitsPerMegabit),
"dropped_messages", droppedMessages,
"active_handlers", len(s.semaphore),
s.logger.Info(
"Stream statistics",
"uptime",
uptime,
"total_messages",
metrics.TotalMessages,
"total_bytes",
metrics.TotalBytes,
"total_mb",
fmt.Sprintf("%.2f", float64(metrics.TotalBytes)/bytesPerMB),
"messages_per_sec",
fmt.Sprintf("%.2f", metrics.MessagesPerSec),
"bits_per_sec",
fmt.Sprintf("%.0f", metrics.BitsPerSec),
"mbps",
fmt.Sprintf("%.2f", metrics.BitsPerSec/bitsPerMegabit),
"dropped_messages",
droppedMessages,
"active_handlers",
len(s.semaphore),
)
}
@@ -262,7 +272,8 @@ func (s *Streamer) stream(ctx context.Context) error {
msg := wrapper.Data
// Parse the timestamp
msg.ParsedTimestamp = time.Unix(int64(msg.Timestamp), 0).UTC()
msg.ParsedTimestamp = time.Unix(int64(msg.Timestamp), 0).
UTC()
// Process based on message type
switch msg.Type {
@@ -294,7 +305,12 @@ func (s *Streamer) stream(ctx context.Context) error {
msg.Type,
string(rawLine),
)
panic(fmt.Sprintf("Unknown RIS message type: %s", msg.Type))
panic(
fmt.Sprintf(
"Unknown RIS message type: %s",
msg.Type,
),
)
}
// Call handlers synchronously within this goroutine
@@ -309,7 +325,13 @@ func (s *Streamer) stream(ctx context.Context) error {
// Semaphore is full, drop the message
dropped := atomic.AddUint64(&s.droppedMessages, 1)
if dropped%1000 == 0 { // Log every 1000 dropped messages
s.logger.Warn("Dropping messages due to overload", "total_dropped", dropped, "max_handlers", maxConcurrentHandlers)
s.logger.Warn(
"Dropping messages due to overload",
"total_dropped",
dropped,
"max_handlers",
maxConcurrentHandlers,
)
}
}
}