- Remove RoutingTableHandler as PrefixHandler maintains live_routes table - Update server to get route counts from database instead of in-memory routing table - Add GetLiveRouteCounts method to database for IPv4/IPv6 route counts - Use metrics tracker in PrefixHandler for route update rates - Remove snapshotter entirely as database contains all information - Update tests to work without routing table
542 lines
15 KiB
Go
542 lines
15 KiB
Go
// Package database provides SQLite storage for BGP routing data including ASNs, prefixes, announcements and peerings.
|
|
package database
|
|
|
|
import (
|
|
"database/sql"
|
|
_ "embed"
|
|
"encoding/json"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"time"
|
|
|
|
"git.eeqj.de/sneak/routewatch/internal/config"
|
|
"git.eeqj.de/sneak/routewatch/internal/logger"
|
|
"git.eeqj.de/sneak/routewatch/pkg/asinfo"
|
|
"github.com/google/uuid"
|
|
_ "github.com/mattn/go-sqlite3" // CGO SQLite driver
|
|
)
|
|
|
|
//go:embed schema.sql
|
|
var dbSchema string
|
|
|
|
const dirPermissions = 0750 // rwxr-x---
|
|
|
|
// Database manages the SQLite database connection and operations.
|
|
type Database struct {
|
|
db *sql.DB
|
|
logger *logger.Logger
|
|
path string
|
|
}
|
|
|
|
// New creates a new database connection and initializes the schema.
|
|
func New(cfg *config.Config, logger *logger.Logger) (*Database, error) {
|
|
dbPath := filepath.Join(cfg.GetStateDir(), "db.sqlite")
|
|
|
|
// Log database path
|
|
logger.Info("Opening database", "path", dbPath)
|
|
|
|
// Ensure directory exists
|
|
dir := filepath.Dir(dbPath)
|
|
if err := os.MkdirAll(dir, dirPermissions); err != nil {
|
|
return nil, fmt.Errorf("failed to create database directory: %w", err)
|
|
}
|
|
|
|
// Add connection parameters for go-sqlite3
|
|
// Enable WAL mode and other performance optimizations
|
|
dsn := fmt.Sprintf("file:%s?_busy_timeout=5000&_journal_mode=WAL&_synchronous=NORMAL&cache=shared", dbPath)
|
|
db, err := sql.Open("sqlite3", dsn)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to open database: %w", err)
|
|
}
|
|
|
|
if err := db.Ping(); err != nil {
|
|
return nil, fmt.Errorf("failed to ping database: %w", err)
|
|
}
|
|
|
|
// Set connection pool parameters
|
|
// Single connection to avoid locking issues with SQLite
|
|
db.SetMaxOpenConns(1)
|
|
db.SetMaxIdleConns(1)
|
|
db.SetConnMaxLifetime(0)
|
|
|
|
database := &Database{db: db, logger: logger, path: dbPath}
|
|
|
|
if err := database.Initialize(); err != nil {
|
|
return nil, fmt.Errorf("failed to initialize database: %w", err)
|
|
}
|
|
|
|
return database, nil
|
|
}
|
|
|
|
// Initialize creates the database schema if it doesn't exist.
|
|
func (d *Database) Initialize() error {
|
|
// Set SQLite pragmas for better performance
|
|
// WARNING: These settings trade durability for speed
|
|
pragmas := []string{
|
|
"PRAGMA journal_mode=WAL", // Write-Ahead Logging
|
|
"PRAGMA synchronous=OFF", // Don't wait for disk writes - RISKY but FAST
|
|
"PRAGMA cache_size=-1048576", // 1GB cache (negative = KB)
|
|
"PRAGMA temp_store=MEMORY", // Use memory for temp tables
|
|
"PRAGMA mmap_size=536870912", // 512MB memory-mapped I/O
|
|
"PRAGMA wal_autocheckpoint=10000", // Checkpoint every 10000 pages (less frequent)
|
|
"PRAGMA wal_checkpoint(PASSIVE)", // Checkpoint now
|
|
"PRAGMA page_size=8192", // Larger page size for better performance
|
|
"PRAGMA busy_timeout=30000", // 30 second busy timeout
|
|
"PRAGMA optimize", // Run optimizer
|
|
}
|
|
|
|
for _, pragma := range pragmas {
|
|
if err := d.exec(pragma); err != nil {
|
|
d.logger.Warn("Failed to set pragma", "pragma", pragma, "error", err)
|
|
}
|
|
}
|
|
|
|
err := d.exec(dbSchema)
|
|
|
|
return err
|
|
}
|
|
|
|
// Close closes the database connection.
|
|
func (d *Database) Close() error {
|
|
return d.db.Close()
|
|
}
|
|
|
|
// beginTx starts a new transaction with logging
|
|
func (d *Database) beginTx() (*loggingTx, error) {
|
|
tx, err := d.db.Begin()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return &loggingTx{Tx: tx, logger: d.logger}, nil
|
|
}
|
|
|
|
// GetOrCreateASN retrieves an existing ASN or creates a new one if it doesn't exist.
|
|
func (d *Database) GetOrCreateASN(number int, timestamp time.Time) (*ASN, error) {
|
|
tx, err := d.beginTx()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer func() {
|
|
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
|
|
d.logger.Error("Failed to rollback transaction", "error", err)
|
|
}
|
|
}()
|
|
|
|
var asn ASN
|
|
var idStr string
|
|
var handle, description sql.NullString
|
|
err = tx.QueryRow("SELECT id, number, handle, description, first_seen, last_seen FROM asns WHERE number = ?", number).
|
|
Scan(&idStr, &asn.Number, &handle, &description, &asn.FirstSeen, &asn.LastSeen)
|
|
|
|
if err == nil {
|
|
// ASN exists, update last_seen
|
|
asn.ID, _ = uuid.Parse(idStr)
|
|
asn.Handle = handle.String
|
|
asn.Description = description.String
|
|
_, err = tx.Exec("UPDATE asns SET last_seen = ? WHERE id = ?", timestamp, asn.ID.String())
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
asn.LastSeen = timestamp
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
d.logger.Error("Failed to commit transaction for ASN update", "asn", number, "error", err)
|
|
|
|
return nil, err
|
|
}
|
|
|
|
return &asn, nil
|
|
}
|
|
|
|
if err != sql.ErrNoRows {
|
|
return nil, err
|
|
}
|
|
|
|
// ASN doesn't exist, create it with ASN info lookup
|
|
asn = ASN{
|
|
ID: generateUUID(),
|
|
Number: number,
|
|
FirstSeen: timestamp,
|
|
LastSeen: timestamp,
|
|
}
|
|
|
|
// Look up ASN info
|
|
if info, ok := asinfo.Get(number); ok {
|
|
asn.Handle = info.Handle
|
|
asn.Description = info.Description
|
|
}
|
|
|
|
_, err = tx.Exec("INSERT INTO asns (id, number, handle, description, first_seen, last_seen) VALUES (?, ?, ?, ?, ?, ?)",
|
|
asn.ID.String(), asn.Number, asn.Handle, asn.Description, asn.FirstSeen, asn.LastSeen)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
d.logger.Error("Failed to commit transaction for ASN creation", "asn", number, "error", err)
|
|
|
|
return nil, err
|
|
}
|
|
|
|
return &asn, nil
|
|
}
|
|
|
|
// GetOrCreatePrefix retrieves an existing prefix or creates a new one if it doesn't exist.
|
|
func (d *Database) GetOrCreatePrefix(prefix string, timestamp time.Time) (*Prefix, error) {
|
|
tx, err := d.beginTx()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer func() {
|
|
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
|
|
d.logger.Error("Failed to rollback transaction", "error", err)
|
|
}
|
|
}()
|
|
|
|
var p Prefix
|
|
var idStr string
|
|
err = tx.QueryRow("SELECT id, prefix, ip_version, first_seen, last_seen FROM prefixes WHERE prefix = ?", prefix).
|
|
Scan(&idStr, &p.Prefix, &p.IPVersion, &p.FirstSeen, &p.LastSeen)
|
|
|
|
if err == nil {
|
|
// Prefix exists, update last_seen
|
|
p.ID, _ = uuid.Parse(idStr)
|
|
_, err = tx.Exec("UPDATE prefixes SET last_seen = ? WHERE id = ?", timestamp, p.ID.String())
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
p.LastSeen = timestamp
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
d.logger.Error("Failed to commit transaction for prefix update", "prefix", prefix, "error", err)
|
|
|
|
return nil, err
|
|
}
|
|
|
|
return &p, nil
|
|
}
|
|
|
|
if err != sql.ErrNoRows {
|
|
return nil, err
|
|
}
|
|
|
|
// Prefix doesn't exist, create it
|
|
p = Prefix{
|
|
ID: generateUUID(),
|
|
Prefix: prefix,
|
|
IPVersion: detectIPVersion(prefix),
|
|
FirstSeen: timestamp,
|
|
LastSeen: timestamp,
|
|
}
|
|
_, err = tx.Exec("INSERT INTO prefixes (id, prefix, ip_version, first_seen, last_seen) VALUES (?, ?, ?, ?, ?)",
|
|
p.ID.String(), p.Prefix, p.IPVersion, p.FirstSeen, p.LastSeen)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
d.logger.Error("Failed to commit transaction for prefix creation", "prefix", prefix, "error", err)
|
|
|
|
return nil, err
|
|
}
|
|
|
|
return &p, nil
|
|
}
|
|
|
|
// RecordAnnouncement inserts a new BGP announcement or withdrawal into the database.
|
|
func (d *Database) RecordAnnouncement(announcement *Announcement) error {
|
|
err := d.exec(`
|
|
INSERT INTO announcements (id, prefix_id, asn_id, origin_asn_id, path, next_hop, timestamp, is_withdrawal)
|
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`,
|
|
announcement.ID.String(), announcement.PrefixID.String(),
|
|
announcement.ASNID.String(), announcement.OriginASNID.String(),
|
|
announcement.Path, announcement.NextHop, announcement.Timestamp, announcement.IsWithdrawal)
|
|
|
|
return err
|
|
}
|
|
|
|
// RecordPeering records a peering relationship between two ASNs.
|
|
func (d *Database) RecordPeering(asA, asB int, timestamp time.Time) error {
|
|
// Validate ASNs
|
|
if asA <= 0 || asB <= 0 {
|
|
return fmt.Errorf("invalid ASN: asA=%d, asB=%d", asA, asB)
|
|
}
|
|
if asA == asB {
|
|
return fmt.Errorf("cannot create peering with same ASN: %d", asA)
|
|
}
|
|
|
|
// Normalize: ensure asA < asB
|
|
if asA > asB {
|
|
asA, asB = asB, asA
|
|
}
|
|
|
|
tx, err := d.beginTx()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() {
|
|
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
|
|
d.logger.Error("Failed to rollback transaction", "error", err)
|
|
}
|
|
}()
|
|
|
|
var exists bool
|
|
err = tx.QueryRow("SELECT EXISTS(SELECT 1 FROM peerings WHERE as_a = ? AND as_b = ?)",
|
|
asA, asB).Scan(&exists)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if exists {
|
|
_, err = tx.Exec("UPDATE peerings SET last_seen = ? WHERE as_a = ? AND as_b = ?",
|
|
timestamp, asA, asB)
|
|
} else {
|
|
_, err = tx.Exec(`
|
|
INSERT INTO peerings (id, as_a, as_b, first_seen, last_seen)
|
|
VALUES (?, ?, ?, ?, ?)`,
|
|
generateUUID().String(), asA, asB, timestamp, timestamp)
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
d.logger.Error("Failed to commit transaction for peering",
|
|
"as_a", asA,
|
|
"as_b", asB,
|
|
"error", err,
|
|
)
|
|
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// UpdatePeer updates or creates a BGP peer record
|
|
func (d *Database) UpdatePeer(peerIP string, peerASN int, messageType string, timestamp time.Time) error {
|
|
tx, err := d.beginTx()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() {
|
|
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
|
|
d.logger.Error("Failed to rollback transaction", "error", err)
|
|
}
|
|
}()
|
|
|
|
var exists bool
|
|
err = tx.QueryRow("SELECT EXISTS(SELECT 1 FROM bgp_peers WHERE peer_ip = ?)", peerIP).Scan(&exists)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if exists {
|
|
_, err = tx.Exec(
|
|
"UPDATE bgp_peers SET peer_asn = ?, last_seen = ?, last_message_type = ? WHERE peer_ip = ?",
|
|
peerASN, timestamp, messageType, peerIP,
|
|
)
|
|
} else {
|
|
_, err = tx.Exec(
|
|
"INSERT INTO bgp_peers (id, peer_ip, peer_asn, first_seen, last_seen, last_message_type) VALUES (?, ?, ?, ?, ?, ?)",
|
|
generateUUID().String(), peerIP, peerASN, timestamp, timestamp, messageType,
|
|
)
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
d.logger.Error("Failed to commit transaction for peer update",
|
|
"peer_ip", peerIP,
|
|
"peer_asn", peerASN,
|
|
"error", err,
|
|
)
|
|
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// GetStats returns database statistics
|
|
func (d *Database) GetStats() (Stats, error) {
|
|
var stats Stats
|
|
|
|
// Count ASNs
|
|
err := d.queryRow("SELECT COUNT(*) FROM asns").Scan(&stats.ASNs)
|
|
if err != nil {
|
|
return stats, err
|
|
}
|
|
|
|
// Count prefixes
|
|
err = d.queryRow("SELECT COUNT(*) FROM prefixes").Scan(&stats.Prefixes)
|
|
if err != nil {
|
|
return stats, err
|
|
}
|
|
|
|
// Count IPv4 and IPv6 prefixes
|
|
const ipVersionV4 = 4
|
|
err = d.queryRow("SELECT COUNT(*) FROM prefixes WHERE ip_version = ?", ipVersionV4).Scan(&stats.IPv4Prefixes)
|
|
if err != nil {
|
|
return stats, err
|
|
}
|
|
|
|
const ipVersionV6 = 6
|
|
err = d.queryRow("SELECT COUNT(*) FROM prefixes WHERE ip_version = ?", ipVersionV6).Scan(&stats.IPv6Prefixes)
|
|
if err != nil {
|
|
return stats, err
|
|
}
|
|
|
|
// Count peerings
|
|
err = d.queryRow("SELECT COUNT(*) FROM peerings").Scan(&stats.Peerings)
|
|
if err != nil {
|
|
return stats, err
|
|
}
|
|
|
|
// Get database file size
|
|
fileInfo, err := os.Stat(d.path)
|
|
if err != nil {
|
|
d.logger.Warn("Failed to get database file size", "error", err)
|
|
stats.FileSizeBytes = 0
|
|
} else {
|
|
stats.FileSizeBytes = fileInfo.Size()
|
|
}
|
|
|
|
// Get live routes count
|
|
err = d.db.QueryRow("SELECT COUNT(*) FROM live_routes").Scan(&stats.LiveRoutes)
|
|
if err != nil {
|
|
return stats, fmt.Errorf("failed to count live routes: %w", err)
|
|
}
|
|
|
|
// Get prefix distribution
|
|
stats.IPv4PrefixDistribution, stats.IPv6PrefixDistribution, err = d.GetPrefixDistribution()
|
|
if err != nil {
|
|
// Log but don't fail
|
|
d.logger.Warn("Failed to get prefix distribution", "error", err)
|
|
}
|
|
|
|
return stats, nil
|
|
}
|
|
|
|
// UpsertLiveRoute inserts or updates a live route
|
|
func (d *Database) UpsertLiveRoute(route *LiveRoute) error {
|
|
query := `
|
|
INSERT INTO live_routes (id, prefix, mask_length, ip_version, origin_asn, peer_ip, as_path, next_hop, last_updated)
|
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
ON CONFLICT(prefix, origin_asn, peer_ip) DO UPDATE SET
|
|
mask_length = excluded.mask_length,
|
|
ip_version = excluded.ip_version,
|
|
as_path = excluded.as_path,
|
|
next_hop = excluded.next_hop,
|
|
last_updated = excluded.last_updated
|
|
`
|
|
|
|
// Encode AS path as JSON
|
|
pathJSON, err := json.Marshal(route.ASPath)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to encode AS path: %w", err)
|
|
}
|
|
|
|
_, err = d.db.Exec(query,
|
|
route.ID.String(),
|
|
route.Prefix,
|
|
route.MaskLength,
|
|
route.IPVersion,
|
|
route.OriginASN,
|
|
route.PeerIP,
|
|
string(pathJSON),
|
|
route.NextHop,
|
|
route.LastUpdated,
|
|
)
|
|
|
|
return err
|
|
}
|
|
|
|
// DeleteLiveRoute deletes a live route
|
|
// If originASN is 0, deletes all routes for the prefix/peer combination
|
|
func (d *Database) DeleteLiveRoute(prefix string, originASN int, peerIP string) error {
|
|
var query string
|
|
var err error
|
|
|
|
if originASN == 0 {
|
|
// Delete all routes for this prefix from this peer
|
|
query = `DELETE FROM live_routes WHERE prefix = ? AND peer_ip = ?`
|
|
_, err = d.db.Exec(query, prefix, peerIP)
|
|
} else {
|
|
// Delete specific route
|
|
query = `DELETE FROM live_routes WHERE prefix = ? AND origin_asn = ? AND peer_ip = ?`
|
|
_, err = d.db.Exec(query, prefix, originASN, peerIP)
|
|
}
|
|
|
|
return err
|
|
}
|
|
|
|
// GetPrefixDistribution returns the distribution of prefixes by mask length
|
|
func (d *Database) GetPrefixDistribution() (ipv4 []PrefixDistribution, ipv6 []PrefixDistribution, err error) {
|
|
// IPv4 distribution
|
|
query := `
|
|
SELECT mask_length, COUNT(*) as count
|
|
FROM live_routes
|
|
WHERE ip_version = 4
|
|
GROUP BY mask_length
|
|
ORDER BY mask_length
|
|
`
|
|
rows, err := d.db.Query(query)
|
|
if err != nil {
|
|
return nil, nil, fmt.Errorf("failed to query IPv4 distribution: %w", err)
|
|
}
|
|
defer func() { _ = rows.Close() }()
|
|
|
|
for rows.Next() {
|
|
var dist PrefixDistribution
|
|
if err := rows.Scan(&dist.MaskLength, &dist.Count); err != nil {
|
|
return nil, nil, fmt.Errorf("failed to scan IPv4 distribution: %w", err)
|
|
}
|
|
ipv4 = append(ipv4, dist)
|
|
}
|
|
|
|
// IPv6 distribution
|
|
query = `
|
|
SELECT mask_length, COUNT(*) as count
|
|
FROM live_routes
|
|
WHERE ip_version = 6
|
|
GROUP BY mask_length
|
|
ORDER BY mask_length
|
|
`
|
|
rows, err = d.db.Query(query)
|
|
if err != nil {
|
|
return nil, nil, fmt.Errorf("failed to query IPv6 distribution: %w", err)
|
|
}
|
|
defer func() { _ = rows.Close() }()
|
|
|
|
for rows.Next() {
|
|
var dist PrefixDistribution
|
|
if err := rows.Scan(&dist.MaskLength, &dist.Count); err != nil {
|
|
return nil, nil, fmt.Errorf("failed to scan IPv6 distribution: %w", err)
|
|
}
|
|
ipv6 = append(ipv6, dist)
|
|
}
|
|
|
|
return ipv4, ipv6, nil
|
|
}
|
|
|
|
// GetLiveRouteCounts returns the count of IPv4 and IPv6 routes
|
|
func (d *Database) GetLiveRouteCounts() (ipv4Count, ipv6Count int, err error) {
|
|
// Get IPv4 count
|
|
err = d.db.QueryRow("SELECT COUNT(*) FROM live_routes WHERE ip_version = 4").Scan(&ipv4Count)
|
|
if err != nil {
|
|
return 0, 0, fmt.Errorf("failed to count IPv4 routes: %w", err)
|
|
}
|
|
|
|
// Get IPv6 count
|
|
err = d.db.QueryRow("SELECT COUNT(*) FROM live_routes WHERE ip_version = 6").Scan(&ipv6Count)
|
|
if err != nil {
|
|
return 0, 0, fmt.Errorf("failed to count IPv6 routes: %w", err)
|
|
}
|
|
|
|
return ipv4Count, ipv6Count, nil
|
|
}
|