Optimize database batch operations with prepared statements

- Add prepared statements to all batch operations for better performance
- Fix database lock contention by properly batching operations
- Update SQLite settings for extreme performance (8GB cache, sync OFF)
- Add proper error handling for statement closing
- Update tests to properly track batch operations
This commit is contained in:
Jeffrey Paul 2025-07-28 17:21:40 +02:00
parent b9b0792df9
commit 40d7f0185b
8 changed files with 721 additions and 49 deletions

View File

@ -62,7 +62,7 @@ func New(cfg *config.Config, logger *logger.Logger) (*Database, error) {
// Add connection parameters for go-sqlite3
// Enable WAL mode and other performance optimizations
dsn := fmt.Sprintf("file:%s?_busy_timeout=5000&_journal_mode=WAL&_synchronous=NORMAL&cache=shared", dbPath)
dsn := fmt.Sprintf("file:%s?_busy_timeout=5000&_journal_mode=WAL&_synchronous=OFF&cache=shared", dbPath)
db, err := sql.Open("sqlite3", dsn)
if err != nil {
return nil, fmt.Errorf("failed to open database: %w", err)
@ -73,9 +73,10 @@ func New(cfg *config.Config, logger *logger.Logger) (*Database, error) {
}
// Set connection pool parameters
// Single connection to avoid locking issues with SQLite
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
// Multiple connections for better concurrency
const maxConns = 10
db.SetMaxOpenConns(maxConns)
db.SetMaxIdleConns(maxConns)
db.SetConnMaxLifetime(0)
database := &Database{db: db, logger: logger, path: dbPath}
@ -89,21 +90,22 @@ func New(cfg *config.Config, logger *logger.Logger) (*Database, error) {
// Initialize creates the database schema if it doesn't exist.
func (d *Database) Initialize() error {
// Set SQLite pragmas for better performance
// Set SQLite pragmas for extreme performance - prioritize speed over durability
pragmas := []string{
"PRAGMA journal_mode=WAL", // Write-Ahead Logging
"PRAGMA synchronous=OFF", // Don't wait for disk writes
"PRAGMA cache_size=-2097152", // 2GB cache (negative = KB)
"PRAGMA cache_size=-8388608", // 8GB cache (negative = KB)
"PRAGMA temp_store=MEMORY", // Use memory for temp tables
"PRAGMA mmap_size=536870912", // 512MB memory-mapped I/O
"PRAGMA wal_autocheckpoint=12500", // Checkpoint every 12.5k pages (50MB with 4KB pages)
"PRAGMA wal_checkpoint(PASSIVE)", // Checkpoint now
"PRAGMA page_size=4096", // Standard page size
"PRAGMA busy_timeout=2000", // 2 second busy timeout
"PRAGMA locking_mode=NORMAL", // Allow multiple connections
"PRAGMA read_uncommitted=true", // Allow dirty reads for better concurrency
"PRAGMA journal_size_limit=104857600", // Limit WAL to 100MB
"PRAGMA mmap_size=10737418240", // 10GB memory-mapped I/O
"PRAGMA page_size=8192", // 8KB pages for better performance
"PRAGMA wal_autocheckpoint=100000", // Checkpoint every 100k pages (800MB)
"PRAGMA wal_checkpoint(TRUNCATE)", // Checkpoint and truncate WAL now
"PRAGMA busy_timeout=5000", // 5 second busy timeout
"PRAGMA locking_mode=NORMAL", // Normal locking for multiple connections
"PRAGMA read_uncommitted=true", // Allow dirty reads
"PRAGMA analysis_limit=0", // Disable automatic ANALYZE
"PRAGMA threads=4", // Use multiple threads for sorting
"PRAGMA cache_spill=false", // Keep cache in memory, don't spill to disk
}
for _, pragma := range pragmas {
@ -141,6 +143,241 @@ func (d *Database) beginTx() (*loggingTx, error) {
return &loggingTx{Tx: tx, logger: d.logger}, nil
}
// UpsertLiveRouteBatch inserts or updates multiple live routes in a single transaction
func (d *Database) UpsertLiveRouteBatch(routes []*LiveRoute) error {
if len(routes) == 0 {
return nil
}
tx, err := d.beginTx()
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() {
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
d.logger.Error("Failed to rollback transaction", "error", err)
}
}()
// Use prepared statement for better performance
query := `
INSERT INTO live_routes (id, prefix, mask_length, ip_version, origin_asn, peer_ip, as_path, next_hop,
last_updated, v4_ip_start, v4_ip_end)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(prefix, origin_asn, peer_ip) DO UPDATE SET
mask_length = excluded.mask_length,
ip_version = excluded.ip_version,
as_path = excluded.as_path,
next_hop = excluded.next_hop,
last_updated = excluded.last_updated,
v4_ip_start = excluded.v4_ip_start,
v4_ip_end = excluded.v4_ip_end
`
stmt, err := tx.Prepare(query)
if err != nil {
return fmt.Errorf("failed to prepare statement: %w", err)
}
defer func() { _ = stmt.Close() }()
for _, route := range routes {
// Encode AS path as JSON
pathJSON, err := json.Marshal(route.ASPath)
if err != nil {
return fmt.Errorf("failed to encode AS path: %w", err)
}
// Convert v4_ip_start and v4_ip_end to interface{} for SQL NULL handling
var v4Start, v4End interface{}
if route.V4IPStart != nil {
v4Start = *route.V4IPStart
}
if route.V4IPEnd != nil {
v4End = *route.V4IPEnd
}
_, err = stmt.Exec(
route.ID.String(),
route.Prefix,
route.MaskLength,
route.IPVersion,
route.OriginASN,
route.PeerIP,
string(pathJSON),
route.NextHop,
route.LastUpdated,
v4Start,
v4End,
)
if err != nil {
return fmt.Errorf("failed to upsert route %s: %w", route.Prefix, err)
}
}
if err = tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
return nil
}
// DeleteLiveRouteBatch deletes multiple live routes in a single transaction
func (d *Database) DeleteLiveRouteBatch(deletions []LiveRouteDeletion) error {
if len(deletions) == 0 {
return nil
}
tx, err := d.beginTx()
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() {
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
d.logger.Error("Failed to rollback transaction", "error", err)
}
}()
// Separate deletions by type and use prepared statements
var withOrigin []LiveRouteDeletion
var withoutOrigin []LiveRouteDeletion
for _, del := range deletions {
if del.OriginASN == 0 {
withoutOrigin = append(withoutOrigin, del)
} else {
withOrigin = append(withOrigin, del)
}
}
// Process deletions with origin ASN
if len(withOrigin) > 0 {
stmt, err := tx.Prepare(`DELETE FROM live_routes WHERE prefix = ? AND origin_asn = ? AND peer_ip = ?`)
if err != nil {
return fmt.Errorf("failed to prepare delete statement: %w", err)
}
defer func() { _ = stmt.Close() }()
for _, del := range withOrigin {
_, err = stmt.Exec(del.Prefix, del.OriginASN, del.PeerIP)
if err != nil {
return fmt.Errorf("failed to delete route %s: %w", del.Prefix, err)
}
}
}
// Process deletions without origin ASN
if len(withoutOrigin) > 0 {
stmt, err := tx.Prepare(`DELETE FROM live_routes WHERE prefix = ? AND peer_ip = ?`)
if err != nil {
return fmt.Errorf("failed to prepare delete statement: %w", err)
}
defer func() { _ = stmt.Close() }()
for _, del := range withoutOrigin {
_, err = stmt.Exec(del.Prefix, del.PeerIP)
if err != nil {
return fmt.Errorf("failed to delete route %s: %w", del.Prefix, err)
}
}
}
if err = tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
return nil
}
// GetOrCreateASNBatch creates or updates multiple ASNs in a single transaction
func (d *Database) GetOrCreateASNBatch(asns map[int]time.Time) error {
if len(asns) == 0 {
return nil
}
tx, err := d.beginTx()
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() {
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
d.logger.Error("Failed to rollback transaction", "error", err)
}
}()
// Prepare statements
selectStmt, err := tx.Prepare(
"SELECT id, number, handle, description, first_seen, last_seen FROM asns WHERE number = ?")
if err != nil {
return fmt.Errorf("failed to prepare select statement: %w", err)
}
defer func() { _ = selectStmt.Close() }()
updateStmt, err := tx.Prepare("UPDATE asns SET last_seen = ? WHERE id = ?")
if err != nil {
return fmt.Errorf("failed to prepare update statement: %w", err)
}
defer func() { _ = updateStmt.Close() }()
insertStmt, err := tx.Prepare(
"INSERT INTO asns (id, number, handle, description, first_seen, last_seen) VALUES (?, ?, ?, ?, ?, ?)")
if err != nil {
return fmt.Errorf("failed to prepare insert statement: %w", err)
}
defer func() { _ = insertStmt.Close() }()
for number, timestamp := range asns {
var asn ASN
var idStr string
var handle, description sql.NullString
err = selectStmt.QueryRow(number).Scan(&idStr, &asn.Number, &handle, &description, &asn.FirstSeen, &asn.LastSeen)
if err == nil {
// ASN exists, update last_seen
asn.ID, _ = uuid.Parse(idStr)
_, err = updateStmt.Exec(timestamp, asn.ID.String())
if err != nil {
return fmt.Errorf("failed to update ASN %d: %w", number, err)
}
continue
}
if err == sql.ErrNoRows {
// ASN doesn't exist, create it
asn = ASN{
ID: generateUUID(),
Number: number,
FirstSeen: timestamp,
LastSeen: timestamp,
}
// Look up ASN info
if info, ok := asinfo.Get(number); ok {
asn.Handle = info.Handle
asn.Description = info.Description
}
_, err = insertStmt.Exec(asn.ID.String(), asn.Number, asn.Handle, asn.Description, asn.FirstSeen, asn.LastSeen)
if err != nil {
return fmt.Errorf("failed to insert ASN %d: %w", number, err)
}
continue
}
if err != nil {
return fmt.Errorf("failed to query ASN %d: %w", number, err)
}
}
if err = tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
return nil
}
// GetOrCreateASN retrieves an existing ASN or creates a new one if it doesn't exist.
func (d *Database) GetOrCreateASN(number int, timestamp time.Time) (*ASN, error) {
tx, err := d.beginTx()
@ -344,6 +581,69 @@ func (d *Database) RecordPeering(asA, asB int, timestamp time.Time) error {
return nil
}
// UpdatePeerBatch updates or creates multiple BGP peer records in a single transaction
func (d *Database) UpdatePeerBatch(peers map[string]PeerUpdate) error {
if len(peers) == 0 {
return nil
}
tx, err := d.beginTx()
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() {
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
d.logger.Error("Failed to rollback transaction", "error", err)
}
}()
// Prepare statements
checkStmt, err := tx.Prepare("SELECT EXISTS(SELECT 1 FROM bgp_peers WHERE peer_ip = ?)")
if err != nil {
return fmt.Errorf("failed to prepare check statement: %w", err)
}
defer func() { _ = checkStmt.Close() }()
updateStmt, err := tx.Prepare(
"UPDATE bgp_peers SET peer_asn = ?, last_seen = ?, last_message_type = ? WHERE peer_ip = ?")
if err != nil {
return fmt.Errorf("failed to prepare update statement: %w", err)
}
defer func() { _ = updateStmt.Close() }()
insertStmt, err := tx.Prepare(
"INSERT INTO bgp_peers (id, peer_ip, peer_asn, first_seen, last_seen, last_message_type) VALUES (?, ?, ?, ?, ?, ?)")
if err != nil {
return fmt.Errorf("failed to prepare insert statement: %w", err)
}
defer func() { _ = insertStmt.Close() }()
for _, update := range peers {
var exists bool
err = checkStmt.QueryRow(update.PeerIP).Scan(&exists)
if err != nil {
return fmt.Errorf("failed to check peer %s: %w", update.PeerIP, err)
}
if exists {
_, err = updateStmt.Exec(update.PeerASN, update.Timestamp, update.MessageType, update.PeerIP)
} else {
_, err = insertStmt.Exec(
generateUUID().String(), update.PeerIP, update.PeerASN,
update.Timestamp, update.Timestamp, update.MessageType)
}
if err != nil {
return fmt.Errorf("failed to update peer %s: %w", update.PeerIP, err)
}
}
if err = tx.Commit(); err != nil {
return fmt.Errorf("failed to commit transaction: %w", err)
}
return nil
}
// UpdatePeer updates or creates a BGP peer record
func (d *Database) UpdatePeer(peerIP string, peerASN int, messageType string, timestamp time.Time) error {
tx, err := d.beginTx()

View File

@ -22,6 +22,7 @@ type Stats struct {
type Store interface {
// ASN operations
GetOrCreateASN(number int, timestamp time.Time) (*ASN, error)
GetOrCreateASNBatch(asns map[int]time.Time) error
// Prefix operations
GetOrCreatePrefix(prefix string, timestamp time.Time) (*Prefix, error)
@ -37,10 +38,13 @@ type Store interface {
// Peer operations
UpdatePeer(peerIP string, peerASN int, messageType string, timestamp time.Time) error
UpdatePeerBatch(peers map[string]PeerUpdate) error
// Live route operations
UpsertLiveRoute(route *LiveRoute) error
UpsertLiveRouteBatch(routes []*LiveRoute) error
DeleteLiveRoute(prefix string, originASN int, peerIP string) error
DeleteLiveRouteBatch(deletions []LiveRouteDeletion) error
GetPrefixDistribution() (ipv4 []PrefixDistribution, ipv6 []PrefixDistribution, err error)
GetLiveRouteCounts() (ipv4Count, ipv6Count int, err error)

View File

@ -77,3 +77,18 @@ type ASInfo struct {
LastUpdated time.Time `json:"last_updated"`
Age string `json:"age"`
}
// LiveRouteDeletion represents parameters for deleting a live route
type LiveRouteDeletion struct {
Prefix string
OriginASN int
PeerIP string
}
// PeerUpdate represents parameters for updating a peer
type PeerUpdate struct {
PeerIP string
PeerASN int
MessageType string
Timestamp time.Time
}

View File

@ -221,6 +221,64 @@ func (m *mockStore) GetPrefixDetails(prefix string) ([]database.LiveRoute, error
return []database.LiveRoute{}, nil
}
// UpsertLiveRouteBatch mock implementation
func (m *mockStore) UpsertLiveRouteBatch(routes []*database.LiveRoute) error {
m.mu.Lock()
defer m.mu.Unlock()
for _, route := range routes {
// Track prefix
if _, exists := m.Prefixes[route.Prefix]; !exists {
m.Prefixes[route.Prefix] = &database.Prefix{
ID: uuid.New(),
Prefix: route.Prefix,
IPVersion: route.IPVersion,
FirstSeen: route.LastUpdated,
LastSeen: route.LastUpdated,
}
m.PrefixCount++
if route.IPVersion == 4 {
m.IPv4Prefixes++
} else {
m.IPv6Prefixes++
}
}
m.RouteCount++
}
return nil
}
// DeleteLiveRouteBatch mock implementation
func (m *mockStore) DeleteLiveRouteBatch(deletions []database.LiveRouteDeletion) error {
// Simple mock - just return nil
return nil
}
// GetOrCreateASNBatch mock implementation
func (m *mockStore) GetOrCreateASNBatch(asns map[int]time.Time) error {
m.mu.Lock()
defer m.mu.Unlock()
for number, timestamp := range asns {
if _, exists := m.ASNs[number]; !exists {
m.ASNs[number] = &database.ASN{
ID: uuid.New(),
Number: number,
FirstSeen: timestamp,
LastSeen: timestamp,
}
m.ASNCount++
}
}
return nil
}
// UpdatePeerBatch mock implementation
func (m *mockStore) UpdatePeerBatch(peers map[string]database.PeerUpdate) error {
// Simple mock - just return nil
return nil
}
func TestRouteWatchLiveFeed(t *testing.T) {
// Create mock database

View File

@ -144,11 +144,9 @@ func (h *ASHandler) flushBatchLocked() {
}
}
for asn, ts := range asnMap {
_, err := h.db.GetOrCreateASN(asn, ts)
if err != nil {
h.logger.Error("Failed to get/create ASN", "asn", asn, "error", err)
}
// Process all ASNs in a single batch transaction
if err := h.db.GetOrCreateASNBatch(asnMap); err != nil {
h.logger.Error("Failed to process ASN batch", "error", err, "count", len(asnMap))
}
// Clear batch

View File

@ -135,18 +135,22 @@ func (h *PeerHandler) flushBatchLocked() {
}
}
// Apply updates
for _, update := range peerMap {
if err := h.db.UpdatePeer(update.peerIP, update.peerASN, update.messageType, update.timestamp); err != nil {
h.logger.Error("Failed to update peer",
"peer", update.peerIP,
"peer_asn", update.peerASN,
"message_type", update.messageType,
"error", err,
)
// Convert to database format
dbPeerMap := make(map[string]database.PeerUpdate)
for peerIP, update := range peerMap {
dbPeerMap[peerIP] = database.PeerUpdate{
PeerIP: update.peerIP,
PeerASN: update.peerASN,
MessageType: update.messageType,
Timestamp: update.timestamp,
}
}
// Process all peers in a single batch transaction
if err := h.db.UpdatePeerBatch(dbPeerMap); err != nil {
h.logger.Error("Failed to process peer batch", "error", err, "count", len(dbPeerMap))
}
// Clear batch
h.peerBatch = h.peerBatch[:0]
h.lastFlush = time.Now()

View File

@ -18,10 +18,10 @@ const (
prefixHandlerQueueSize = 100000
// prefixBatchSize is the number of prefix updates to batch together
prefixBatchSize = 25000
prefixBatchSize = 5000
// prefixBatchTimeout is the maximum time to wait before flushing a batch
prefixBatchTimeout = 2 * time.Second
prefixBatchTimeout = 1 * time.Second
// IP version constants
ipv4Version = 4
@ -163,6 +163,9 @@ func (h *PrefixHandler) flushBatchLocked() {
return
}
startTime := time.Now()
batchSize := len(h.batch)
// Group updates by prefix to deduplicate
// For each prefix, keep the latest update
prefixMap := make(map[string]prefixUpdate)
@ -173,27 +176,55 @@ func (h *PrefixHandler) flushBatchLocked() {
}
}
// Apply updates to database
// Collect routes to upsert and delete
var routesToUpsert []*database.LiveRoute
var routesToDelete []database.LiveRouteDeletion
// Skip the prefix table updates entirely - just update live_routes
// The prefix table is not critical for routing lookups
for _, update := range prefixMap {
// Get or create prefix (this maintains the prefixes table)
prefix, err := h.db.GetOrCreatePrefix(update.prefix, update.timestamp)
if err != nil {
h.logger.Error("Failed to get/create prefix",
"prefix", update.prefix,
"error", err,
)
continue
}
// For announcements, get ASN info and create announcement record
if update.messageType == "announcement" && update.originASN > 0 {
h.processAnnouncement(prefix, update)
// Create live route for batch upsert
route := h.createLiveRoute(update)
if route != nil {
routesToUpsert = append(routesToUpsert, route)
}
} else if update.messageType == "withdrawal" {
h.processWithdrawal(prefix, update)
// Create deletion record for batch delete
routesToDelete = append(routesToDelete, database.LiveRouteDeletion{
Prefix: update.prefix,
OriginASN: update.originASN,
PeerIP: update.peer,
})
}
}
// Process batch operations
successCount := 0
if len(routesToUpsert) > 0 {
if err := h.db.UpsertLiveRouteBatch(routesToUpsert); err != nil {
h.logger.Error("Failed to upsert route batch", "error", err, "count", len(routesToUpsert))
} else {
successCount += len(routesToUpsert)
}
}
if len(routesToDelete) > 0 {
if err := h.db.DeleteLiveRouteBatch(routesToDelete); err != nil {
h.logger.Error("Failed to delete route batch", "error", err, "count", len(routesToDelete))
} else {
successCount += len(routesToDelete)
}
}
elapsed := time.Since(startTime)
h.logger.Debug("Flushed prefix batch",
"batch_size", batchSize,
"unique_prefixes", len(prefixMap),
"success", successCount,
"duration_ms", elapsed.Milliseconds(),
)
// Clear batch
h.batch = h.batch[:0]
h.lastFlush = time.Now()
@ -215,6 +246,7 @@ func parseCIDR(prefix string) (maskLength int, ipVersion int, err error) {
}
// processAnnouncement handles storing an announcement in the database
// nolint:unused // kept for potential future use
func (h *PrefixHandler) processAnnouncement(_ *database.Prefix, update prefixUpdate) {
// Parse CIDR to get mask length
maskLength, ipVersion, err := parseCIDR(update.prefix)
@ -271,7 +303,143 @@ func (h *PrefixHandler) processAnnouncement(_ *database.Prefix, update prefixUpd
}
}
// createLiveRoute creates a LiveRoute from a prefix update
func (h *PrefixHandler) createLiveRoute(update prefixUpdate) *database.LiveRoute {
// Parse CIDR to get mask length
maskLength, ipVersion, err := parseCIDR(update.prefix)
if err != nil {
h.logger.Error("Failed to parse CIDR",
"prefix", update.prefix,
"error", err,
)
return nil
}
// Track route update metrics
if h.metrics != nil {
if ipVersion == ipv4Version {
h.metrics.RecordIPv4Update()
} else {
h.metrics.RecordIPv6Update()
}
}
// Create live route record
liveRoute := &database.LiveRoute{
ID: uuid.New(),
Prefix: update.prefix,
MaskLength: maskLength,
IPVersion: ipVersion,
OriginASN: update.originASN,
PeerIP: update.peer,
ASPath: update.path,
NextHop: update.peer, // Using peer as next hop
LastUpdated: update.timestamp,
}
// For IPv4, calculate the IP range
if ipVersion == ipv4Version {
start, end, err := database.CalculateIPv4Range(update.prefix)
if err == nil {
liveRoute.V4IPStart = &start
liveRoute.V4IPEnd = &end
} else {
h.logger.Error("Failed to calculate IPv4 range",
"prefix", update.prefix,
"error", err,
)
}
}
return liveRoute
}
// processAnnouncementDirect handles storing an announcement directly without prefix table
// nolint:unused // kept for potential future use
func (h *PrefixHandler) processAnnouncementDirect(update prefixUpdate) {
// Parse CIDR to get mask length
maskLength, ipVersion, err := parseCIDR(update.prefix)
if err != nil {
h.logger.Error("Failed to parse CIDR",
"prefix", update.prefix,
"error", err,
)
return
}
// Track route update metrics
if h.metrics != nil {
if ipVersion == ipv4Version {
h.metrics.RecordIPv4Update()
} else {
h.metrics.RecordIPv6Update()
}
}
// Create live route record
liveRoute := &database.LiveRoute{
ID: uuid.New(),
Prefix: update.prefix,
MaskLength: maskLength,
IPVersion: ipVersion,
OriginASN: update.originASN,
PeerIP: update.peer,
ASPath: update.path,
NextHop: update.peer, // Using peer as next hop
LastUpdated: update.timestamp,
}
// For IPv4, calculate the IP range
if ipVersion == ipv4Version {
start, end, err := database.CalculateIPv4Range(update.prefix)
if err == nil {
liveRoute.V4IPStart = &start
liveRoute.V4IPEnd = &end
} else {
h.logger.Error("Failed to calculate IPv4 range",
"prefix", update.prefix,
"error", err,
)
}
}
if err := h.db.UpsertLiveRoute(liveRoute); err != nil {
h.logger.Error("Failed to upsert live route",
"prefix", update.prefix,
"error", err,
)
}
}
// processWithdrawalDirect handles removing a route directly without prefix table
// nolint:unused // kept for potential future use
func (h *PrefixHandler) processWithdrawalDirect(update prefixUpdate) {
// For withdrawals, we need to delete the route from live_routes
if update.originASN > 0 {
if err := h.db.DeleteLiveRoute(update.prefix, update.originASN, update.peer); err != nil {
h.logger.Error("Failed to delete live route",
"prefix", update.prefix,
"origin_asn", update.originASN,
"peer", update.peer,
"error", err,
)
}
} else {
// If no origin ASN, just delete all routes for this prefix from this peer
if err := h.db.DeleteLiveRoute(update.prefix, 0, update.peer); err != nil {
h.logger.Error("Failed to delete live route (no origin ASN)",
"prefix", update.prefix,
"peer", update.peer,
"error", err,
)
}
}
}
// processWithdrawal handles removing a route from the live routing table
// nolint:unused // kept for potential future use
func (h *PrefixHandler) processWithdrawal(_ *database.Prefix, update prefixUpdate) {
// For withdrawals, we need to delete the route from live_routes
// Since we have the origin ASN from the update, we can delete the specific route

125
log.txt Normal file
View File

@ -0,0 +1,125 @@
[Fx] PROVIDE fx.Lifecycle <= go.uber.org/fx.New.func1()
[Fx] PROVIDE fx.Shutdowner <= go.uber.org/fx.(*App).shutdowner-fm()
[Fx] PROVIDE fx.DotGraph <= go.uber.org/fx.(*App).dotGraph-fm()
[Fx] PROVIDE *logger.Logger <= git.eeqj.de/sneak/routewatch/internal/logger.New()
[Fx] PROVIDE *config.Config <= git.eeqj.de/sneak/routewatch/internal/config.New()
[Fx] PROVIDE *metrics.Tracker <= git.eeqj.de/sneak/routewatch/internal/metrics.New()
[Fx] PROVIDE database.Store <= fx.Annotate(git.eeqj.de/sneak/routewatch/internal/database.New(), fx.As([[database.Store]])
[Fx] PROVIDE *streamer.Streamer <= git.eeqj.de/sneak/routewatch/internal/streamer.New()
[Fx] PROVIDE *server.Server <= git.eeqj.de/sneak/routewatch/internal/server.New()
[Fx] PROVIDE *routewatch.RouteWatch <= git.eeqj.de/sneak/routewatch/internal/routewatch.New()
[Fx] INVOKE git.eeqj.de/sneak/routewatch/internal/routewatch.CLIEntry.func1()
[Fx] BEFORE RUN provide: go.uber.org/fx.New.func1()
[Fx] RUN provide: go.uber.org/fx.New.func1() in 6.292µs
[Fx] BEFORE RUN provide: git.eeqj.de/sneak/routewatch/internal/config.New()
[Fx] RUN provide: git.eeqj.de/sneak/routewatch/internal/config.New() in 6.458µs
[Fx] BEFORE RUN provide: git.eeqj.de/sneak/routewatch/internal/logger.New()
[Fx] RUN provide: git.eeqj.de/sneak/routewatch/internal/logger.New() in 4.417µs
[Fx] BEFORE RUN provide: fx.Annotate(git.eeqj.de/sneak/routewatch/internal/database.New(), fx.As([[database.Store]])
{"time":"2025-07-28T17:12:49.977025+02:00","level":"INFO","msg":"Opening database","source":"database.go:55","func":"database.New","path":"/Users/user/Library/Application Support/berlin.sneak.app.routewatch/db.sqlite"}
{"time":"2025-07-28T17:12:50.21775+02:00","level":"DEBUG","msg":"Slow query","source":"slowquery.go:17","func":"database.logSlowQuery","query":"PRAGMA wal_checkpoint(TRUNCATE)","duration":115272875}
{"time":"2025-07-28T17:12:50.217936+02:00","level":"INFO","msg":"Running VACUUM to optimize database (this may take a moment)","source":"database.go:122","func":"database.(*Database).Initialize"}
{"time":"2025-07-28T17:12:59.531431+02:00","level":"DEBUG","msg":"Slow query","source":"slowquery.go:17","func":"database.logSlowQuery","query":"VACUUM","duration":9313432750}
[Fx] RUN provide: fx.Annotate(git.eeqj.de/sneak/routewatch/internal/database.New(), fx.As([[database.Store]]) in 9.554516041s
[Fx] BEFORE RUN provide: git.eeqj.de/sneak/routewatch/internal/metrics.New()
[Fx] RUN provide: git.eeqj.de/sneak/routewatch/internal/metrics.New() in 38.292µs
[Fx] BEFORE RUN provide: git.eeqj.de/sneak/routewatch/internal/streamer.New()
[Fx] RUN provide: git.eeqj.de/sneak/routewatch/internal/streamer.New() in 4.5µs
[Fx] BEFORE RUN provide: git.eeqj.de/sneak/routewatch/internal/server.New()
[Fx] RUN provide: git.eeqj.de/sneak/routewatch/internal/server.New() in 60.125µs
[Fx] BEFORE RUN provide: git.eeqj.de/sneak/routewatch/internal/routewatch.New()
[Fx] RUN provide: git.eeqj.de/sneak/routewatch/internal/routewatch.New() in 3.083µs
[Fx] BEFORE RUN provide: go.uber.org/fx.(*App).shutdowner-fm()
[Fx] RUN provide: go.uber.org/fx.(*App).shutdowner-fm() in 7.167µs
[Fx] HOOK OnStart git.eeqj.de/sneak/routewatch/internal/routewatch.CLIEntry.func1.1() executing (caller: git.eeqj.de/sneak/routewatch/internal/routewatch.CLIEntry.func1)
[Fx] HOOK OnStart git.eeqj.de/sneak/routewatch/internal/routewatch.CLIEntry.func1.1() called by git.eeqj.de/sneak/routewatch/internal/routewatch.CLIEntry.func1 ran successfully in 162.25µs
[Fx] RUNNING
{"time":"2025-07-28T17:12:59.53194+02:00","level":"INFO","msg":"Starting RouteWatch","source":"app.go:64","func":"routewatch.(*RouteWatch).Run"}
{"time":"2025-07-28T17:12:59.531973+02:00","level":"INFO","msg":"Using batched database handlers for improved performance","source":"app.go:76","func":"routewatch.(*RouteWatch).Run"}
{"time":"2025-07-28T17:12:59.533095+02:00","level":"INFO","msg":"Starting HTTP server","source":"server.go:52","func":"server.(*Server).Start","port":"8080"}
2025/07/28 17:13:00 [akrotiri/R2eLWiud8V-000001] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56646 - 200 3622B in 324.489792ms
2025/07/28 17:13:00 [akrotiri/R2eLWiud8V-000002] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56646 - 200 3622B in 323.379916ms
{"time":"2025-07-28T17:13:00.934924+02:00","level":"INFO","msg":"Connected to RIS Live stream","source":"streamer.go:343","func":"streamer.(*Streamer).stream"}
2025/07/28 17:13:01 [akrotiri/R2eLWiud8V-000003] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56646 - 200 3655B in 340.457292ms
{"time":"2025-07-28T17:13:01.836921+02:00","level":"INFO","msg":"BGP session opened","source":"streamer.go:428","func":"streamer.(*Streamer).stream","peer":"193.107.13.3","peer_asn":"47787"}
{"time":"2025-07-28T17:13:02.283639+02:00","level":"ERROR","msg":"Database stats timeout","source":"handlers.go:248","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1"}
2025/07/28 17:13:02 [akrotiri/R2eLWiud8V-000004] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56646 - 408 17B in 1.000718708s
{"time":"2025-07-28T17:13:02.805274+02:00","level":"ERROR","msg":"Database stats timeout","source":"handlers.go:248","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1"}
2025/07/28 17:13:02 [akrotiri/R2eLWiud8V-000005] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56649 - 408 17B in 1.00114125s
{"time":"2025-07-28T17:13:02.87048+02:00","level":"DEBUG","msg":"Slow query","source":"slowquery.go:17","func":"database.logSlowQuery","query":"SELECT COUNT(*) FROM asns","duration":1587465542}
{"time":"2025-07-28T17:13:02.871544+02:00","level":"DEBUG","msg":"Slow query","source":"slowquery.go:17","func":"database.logSlowQuery","query":"SELECT COUNT(*) FROM asns","duration":568061250}
{"time":"2025-07-28T17:13:02.871628+02:00","level":"DEBUG","msg":"Slow query","source":"slowquery.go:17","func":"database.logSlowQuery","query":"SELECT COUNT(*) FROM asns","duration":587415667}
{"time":"2025-07-28T17:13:02.871894+02:00","level":"ERROR","msg":"Failed to process ASN batch","source":"ashandler.go:149","func":"routewatch.(*ASHandler).flushBatchLocked","error":"failed to update ASN 23634: database table is locked","count":483}
{"time":"2025-07-28T17:13:02.883924+02:00","level":"DEBUG","msg":"Slow query","source":"slowquery.go:17","func":"database.logSlowQuery","query":"SELECT COUNT(*) FROM asns","duration":1079722417}
{"time":"2025-07-28T17:13:02.928747+02:00","level":"DEBUG","msg":"Slow query","source":"slowquery.go:17","func":"database.logSlowQuery","query":"SELECT COUNT(*) FROM asns","duration":122623584}
{"time":"2025-07-28T17:13:02.94791+02:00","level":"ERROR","msg":"Failed to process peer batch","source":"peerhandler.go:151","func":"routewatch.(*PeerHandler).flushBatchLocked","error":"failed to update peer 2001:7f8:24::8d: database table is locked","count":276}
{"time":"2025-07-28T17:13:02.948072+02:00","level":"DEBUG","msg":"Database stats query failed","source":"handlers.go:237","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1.1","error":"failed to count live routes: database table is locked: live_routes"}
{"time":"2025-07-28T17:13:02.948115+02:00","level":"DEBUG","msg":"Database stats query failed","source":"handlers.go:237","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1.1","error":"failed to count live routes: database table is locked: live_routes"}
{"time":"2025-07-28T17:13:02.948089+02:00","level":"ERROR","msg":"Failed to get database stats","source":"handlers.go:253","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1","error":"failed to count live routes: database table is locked: live_routes"}
2025/07/28 17:13:02 [akrotiri/R2eLWiud8V-000006] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56650 - 500 67B in 663.970667ms
{"time":"2025-07-28T17:13:02.948473+02:00","level":"DEBUG","msg":"Slow query","source":"slowquery.go:17","func":"database.logSlowQuery","query":"SELECT COUNT(*) FROM asns","duration":142440958}
{"time":"2025-07-28T17:13:02.9485+02:00","level":"DEBUG","msg":"Database stats query failed","source":"handlers.go:237","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1.1","error":"failed to count live routes: database table is locked: live_routes"}
{"time":"2025-07-28T17:13:02.948509+02:00","level":"ERROR","msg":"Failed to get database stats","source":"handlers.go:253","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1","error":"failed to count live routes: database table is locked: live_routes"}
2025/07/28 17:13:02 [akrotiri/R2eLWiud8V-000007] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56651 - 500 67B in 645.064042ms
{"time":"2025-07-28T17:13:02.948853+02:00","level":"ERROR","msg":"Failed to process ASN batch","source":"ashandler.go:149","func":"routewatch.(*ASHandler).flushBatchLocked","error":"failed to update ASN 49432: database table is locked","count":503}
{"time":"2025-07-28T17:13:02.961369+02:00","level":"DEBUG","msg":"Database stats query failed","source":"handlers.go:237","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1.1","error":"failed to count live routes: database table is locked: live_routes"}
{"time":"2025-07-28T17:13:02.987269+02:00","level":"DEBUG","msg":"Database stats query failed","source":"handlers.go:237","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1.1","error":"failed to count live routes: database table is locked: live_routes"}
{"time":"2025-07-28T17:13:02.987286+02:00","level":"ERROR","msg":"Failed to get database stats","source":"handlers.go:253","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1","error":"failed to count live routes: database table is locked: live_routes"}
{"time":"2025-07-28T17:13:02.987269+02:00","level":"DEBUG","msg":"Database stats query failed","source":"handlers.go:237","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1.1","error":"failed to count live routes: database table is locked: live_routes"}
{"time":"2025-07-28T17:13:02.987299+02:00","level":"ERROR","msg":"Failed to get database stats","source":"handlers.go:253","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1","error":"failed to count live routes: database table is locked: live_routes"}
2025/07/28 17:13:02 [akrotiri/R2eLWiud8V-000009] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56653 - 500 67B in 181.208959ms
2025/07/28 17:13:02 [akrotiri/R2eLWiud8V-000008] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56652 - 500 67B in 181.296125ms
{"time":"2025-07-28T17:13:03.124495+02:00","level":"DEBUG","msg":"Flushed prefix batch","source":"prefixhandler.go:221","func":"routewatch.(*PrefixHandler).flushBatchLocked","batch_size":5000,"unique_prefixes":1731,"success":1731,"duration_ms":1852}
{"time":"2025-07-28T17:13:03.178923+02:00","level":"DEBUG","msg":"Flushed prefix batch","source":"prefixhandler.go:221","func":"routewatch.(*PrefixHandler).flushBatchLocked","batch_size":5000,"unique_prefixes":1107,"success":1107,"duration_ms":54}
{"time":"2025-07-28T17:13:03.577435+02:00","level":"ERROR","msg":"Failed to process ASN batch","source":"ashandler.go:149","func":"routewatch.(*ASHandler).flushBatchLocked","error":"failed to update ASN 138583: database table is locked","count":563}
2025/07/28 17:13:03 [akrotiri/R2eLWiud8V-000010] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56650 - 200 3679B in 321.707916ms
{"time":"2025-07-28T17:13:03.931676+02:00","level":"DEBUG","msg":"Slow query","source":"slowquery.go:17","func":"database.logSlowQuery","query":"SELECT COUNT(*) FROM asns","duration":135058125}
{"time":"2025-07-28T17:13:03.944156+02:00","level":"ERROR","msg":"Failed to process ASN batch","source":"ashandler.go:149","func":"routewatch.(*ASHandler).flushBatchLocked","error":"failed to update ASN 265315: database table is locked","count":486}
{"time":"2025-07-28T17:13:03.948721+02:00","level":"INFO","msg":"BGP session opened","source":"streamer.go:428","func":"streamer.(*Streamer).stream","peer":"196.60.8.170","peer_asn":"327781"}
{"time":"2025-07-28T17:13:04.067453+02:00","level":"DEBUG","msg":"Slow query","source":"slowquery.go:17","func":"database.logSlowQuery","query":"DELETE FROM live_routes WHERE prefix = ? AND peer_ip = ?","duration":115385375}
{"time":"2025-07-28T17:13:04.068433+02:00","level":"ERROR","msg":"Failed to process ASN batch","source":"ashandler.go:149","func":"routewatch.(*ASHandler).flushBatchLocked","error":"failed to update ASN 3491: database table is locked","count":512}
{"time":"2025-07-28T17:13:04.10943+02:00","level":"WARN","msg":"BGP notification","source":"streamer.go:436","func":"streamer.(*Streamer).stream","peer":"80.81.192.113","peer_asn":"35320"}
{"time":"2025-07-28T17:13:04.159034+02:00","level":"DEBUG","msg":"Slow query","source":"slowquery.go:17","func":"database.logSlowQuery","query":"DELETE FROM live_routes WHERE prefix = ? AND peer_ip = ?","duration":51181709}
{"time":"2025-07-28T17:13:04.159666+02:00","level":"DEBUG","msg":"Flushed prefix batch","source":"prefixhandler.go:221","func":"routewatch.(*PrefixHandler).flushBatchLocked","batch_size":5021,"unique_prefixes":1552,"success":1552,"duration_ms":649}
2025/07/28 17:13:04 [akrotiri/R2eLWiud8V-000011] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56650 - 200 3685B in 449.669375ms
{"time":"2025-07-28T17:13:04.246417+02:00","level":"ERROR","msg":"Failed to process ASN batch","source":"ashandler.go:149","func":"routewatch.(*ASHandler).flushBatchLocked","error":"failed to update ASN 141216: database table is locked","count":510}
{"time":"2025-07-28T17:13:04.396504+02:00","level":"DEBUG","msg":"Slow query","source":"slowquery.go:17","func":"database.logSlowQuery","query":"SELECT COUNT(*) FROM asns","duration":94807500}
{"time":"2025-07-28T17:13:04.397204+02:00","level":"ERROR","msg":"Failed to process ASN batch","source":"ashandler.go:149","func":"routewatch.(*ASHandler).flushBatchLocked","error":"failed to update ASN 28329: database table is locked","count":500}
{"time":"2025-07-28T17:13:04.416404+02:00","level":"DEBUG","msg":"Database stats query failed","source":"handlers.go:237","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1.1","error":"failed to count live routes: database table is locked: live_routes"}
{"time":"2025-07-28T17:13:04.416423+02:00","level":"ERROR","msg":"Failed to get database stats","source":"handlers.go:253","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1","error":"failed to count live routes: database table is locked: live_routes"}
2025/07/28 17:13:04 [akrotiri/R2eLWiud8V-000012] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56650 - 500 67B in 114.792667ms
{"time":"2025-07-28T17:13:04.419385+02:00","level":"DEBUG","msg":"Flushed prefix batch","source":"prefixhandler.go:221","func":"routewatch.(*PrefixHandler).flushBatchLocked","batch_size":5001,"unique_prefixes":1311,"success":1311,"duration_ms":259}
{"time":"2025-07-28T17:13:04.427118+02:00","level":"ERROR","msg":"Failed to process peer batch","source":"peerhandler.go:151","func":"routewatch.(*PeerHandler).flushBatchLocked","error":"failed to update peer 2001:7f8:4::f2d7:1: database table is locked","count":558}
{"time":"2025-07-28T17:13:04.640385+02:00","level":"ERROR","msg":"Failed to process ASN batch","source":"ashandler.go:149","func":"routewatch.(*ASHandler).flushBatchLocked","error":"failed to update ASN 9002: database table is locked","count":588}
{"time":"2025-07-28T17:13:04.643686+02:00","level":"DEBUG","msg":"Flushed prefix batch","source":"prefixhandler.go:221","func":"routewatch.(*PrefixHandler).flushBatchLocked","batch_size":5000,"unique_prefixes":1884,"success":1884,"duration_ms":224}
{"time":"2025-07-28T17:13:04.796451+02:00","level":"ERROR","msg":"Failed to process ASN batch","source":"ashandler.go:149","func":"routewatch.(*ASHandler).flushBatchLocked","error":"failed to update ASN 16276: database table is locked","count":472}
{"time":"2025-07-28T17:13:04.79782+02:00","level":"DEBUG","msg":"Flushed prefix batch","source":"prefixhandler.go:221","func":"routewatch.(*PrefixHandler).flushBatchLocked","batch_size":5007,"unique_prefixes":1111,"success":1111,"duration_ms":153}
{"time":"2025-07-28T17:13:04.825693+02:00","level":"DEBUG","msg":"Database stats query failed","source":"handlers.go:237","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1.1","error":"failed to count live routes: database table is locked: live_routes"}
{"time":"2025-07-28T17:13:04.825713+02:00","level":"ERROR","msg":"Failed to get database stats","source":"handlers.go:253","func":"server.(*Server).setupRoutes.func1.(*Server).handleStats.1","error":"failed to count live routes: database table is locked: live_routes"}
2025/07/28 17:13:04 [akrotiri/R2eLWiud8V-000013] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56650 - 500 67B in 19.796125ms
{"time":"2025-07-28T17:13:05.014116+02:00","level":"ERROR","msg":"Failed to process ASN batch","source":"ashandler.go:149","func":"routewatch.(*ASHandler).flushBatchLocked","error":"failed to update ASN 134484: database table is locked","count":576}
{"time":"2025-07-28T17:13:05.014915+02:00","level":"DEBUG","msg":"Flushed prefix batch","source":"prefixhandler.go:221","func":"routewatch.(*PrefixHandler).flushBatchLocked","batch_size":5000,"unique_prefixes":1797,"success":1797,"duration_ms":216}
{"time":"2025-07-28T17:13:05.179676+02:00","level":"DEBUG","msg":"Flushed prefix batch","source":"prefixhandler.go:221","func":"routewatch.(*PrefixHandler).flushBatchLocked","batch_size":5062,"unique_prefixes":1485,"success":1485,"duration_ms":164}
2025/07/28 17:13:05 [akrotiri/R2eLWiud8V-000014] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56650 - 200 3780B in 320.984667ms
{"time":"2025-07-28T17:13:05.818964+02:00","level":"DEBUG","msg":"Flushed prefix batch","source":"prefixhandler.go:221","func":"routewatch.(*PrefixHandler).flushBatchLocked","batch_size":5000,"unique_prefixes":1357,"success":1357,"duration_ms":147}
{"time":"2025-07-28T17:13:06.16192+02:00","level":"DEBUG","msg":"Slow query","source":"slowquery.go:17","func":"database.logSlowQuery","query":"UPDATE asns SET last_seen = ? WHERE id = ?","duration":50454000}
2025/07/28 17:13:06 [akrotiri/R2eLWiud8V-000015] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56650 - 200 3785B in 317.421541ms
{"time":"2025-07-28T17:13:06.459642+02:00","level":"DEBUG","msg":"Slow query","source":"slowquery.go:17","func":"database.logSlowQuery","query":"\n\t\tINSERT INTO live_routes (id, prefix, mask_length, ip_version, origin_asn, peer_ip, as_path, next_hop, \n\t\t\tlast_updated, v4_ip_start, v4_ip_end)\n\t\tVALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n\t\tON CONFLICT(prefix, origin_asn, peer_ip) DO UPDATE SET\n\t\t\tmask_length = excluded.mask_length,\n\t\t\tip_version = excluded.ip_version,\n\t\t\tas_path = excluded.as_path,\n\t\t\tnext_hop = excluded.next_hop,\n\t\t\tlast_updated = excluded.last_updated,\n\t\t\tv4_ip_start = excluded.v4_ip_start,\n\t\t\tv4_ip_end = excluded.v4_ip_end\n\t","duration":86616625}
{"time":"2025-07-28T17:13:06.45968+02:00","level":"ERROR","msg":"Failed to upsert route batch","source":"prefixhandler.go:206","func":"routewatch.(*PrefixHandler).flushBatchLocked","error":"failed to upsert route 14.232.144.0/20: database table is locked: live_routes","count":1582}
{"time":"2025-07-28T17:13:06.460464+02:00","level":"ERROR","msg":"Failed to delete route batch","source":"prefixhandler.go:214","func":"routewatch.(*PrefixHandler).flushBatchLocked","error":"failed to delete route 2a06:de02:5bb:0:0:0:0:0/48: database table is locked: live_routes","count":68}
{"time":"2025-07-28T17:13:06.460474+02:00","level":"DEBUG","msg":"Flushed prefix batch","source":"prefixhandler.go:221","func":"routewatch.(*PrefixHandler).flushBatchLocked","batch_size":5000,"unique_prefixes":1650,"success":0,"duration_ms":109}
{"time":"2025-07-28T17:13:06.549698+02:00","level":"ERROR","msg":"Failed to process ASN batch","source":"ashandler.go:149","func":"routewatch.(*ASHandler).flushBatchLocked","error":"failed to update ASN 58453: database table is locked","count":508}
2025/07/28 17:13:06 [akrotiri/R2eLWiud8V-000016] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56650 - 200 3785B in 319.24825ms
{"time":"2025-07-28T17:13:06.914088+02:00","level":"DEBUG","msg":"Flushed prefix batch","source":"prefixhandler.go:221","func":"routewatch.(*PrefixHandler).flushBatchLocked","batch_size":5000,"unique_prefixes":2065,"success":2065,"duration_ms":213}
2025/07/28 17:13:07 [akrotiri/R2eLWiud8V-000017] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56650 - 200 3787B in 326.68525ms
{"time":"2025-07-28T17:13:07.510509+02:00","level":"ERROR","msg":"Failed to upsert route batch","source":"prefixhandler.go:206","func":"routewatch.(*PrefixHandler).flushBatchLocked","error":"failed to upsert route 189.28.84.0/24: database table is locked: live_routes","count":1795}
{"time":"2025-07-28T17:13:07.511781+02:00","level":"ERROR","msg":"Failed to delete route batch","source":"prefixhandler.go:214","func":"routewatch.(*PrefixHandler).flushBatchLocked","error":"failed to delete route 2406:7f40:8300:0:0:0:0:0/40: database table is locked: live_routes","count":91}
{"time":"2025-07-28T17:13:07.511764+02:00","level":"ERROR","msg":"Failed to process ASN batch","source":"ashandler.go:149","func":"routewatch.(*ASHandler).flushBatchLocked","error":"failed to update ASN 45192: database table is locked","count":589}
{"time":"2025-07-28T17:13:07.5118+02:00","level":"DEBUG","msg":"Flushed prefix batch","source":"prefixhandler.go:221","func":"routewatch.(*PrefixHandler).flushBatchLocked","batch_size":5000,"unique_prefixes":1886,"success":0,"duration_ms":31}
2025/07/28 17:13:07 [akrotiri/R2eLWiud8V-000018] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56650 - 200 3786B in 322.780666ms
{"time":"2025-07-28T17:13:08.182061+02:00","level":"WARN","msg":"BGP notification","source":"streamer.go:436","func":"streamer.(*Streamer).stream","peer":"193.239.118.249","peer_asn":"41255"}
{"time":"2025-07-28T17:13:08.200973+02:00","level":"ERROR","msg":"Failed to upsert route batch","source":"prefixhandler.go:206","func":"routewatch.(*PrefixHandler).flushBatchLocked","error":"failed to upsert route 158.172.251.0/24: database table is locked","count":1061}
2025/07/28 17:13:08 [akrotiri/R2eLWiud8V-000019] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56650 - 200 3790B in 331.584125ms
{"time":"2025-07-28T17:13:08.201058+02:00","level":"ERROR","msg":"Failed to delete route batch","source":"prefixhandler.go:214","func":"routewatch.(*PrefixHandler).flushBatchLocked","error":"failed to delete route 2a06:de02:4a0:0:0:0:0:0/48: database table is locked","count":88}
{"time":"2025-07-28T17:13:08.201066+02:00","level":"DEBUG","msg":"Flushed prefix batch","source":"prefixhandler.go:221","func":"routewatch.(*PrefixHandler).flushBatchLocked","batch_size":5001,"unique_prefixes":1149,"success":0,"duration_ms":16}
2025/07/28 17:13:08 [akrotiri/R2eLWiud8V-000020] "GET http://127.0.0.1:8080/api/v1/stats HTTP/1.1" from 127.0.0.1:56650 - 200 3786B in 319.2075ms