routewatch/internal/database/database.go
sneak 9b649c98c9 Fix AS detail view and add prefix sorting
- Fix GetASDetails to properly handle timestamp from MAX(last_updated)
- Parse timestamp string from SQLite aggregate function result
- Add natural sorting of prefixes by IP address in AS detail view
- Sort IPv4 and IPv6 prefixes separately by network address
- Remove SQL ORDER BY since we're sorting in Go
- This fixes the issue where AS detail pages showed no prefixes
2025-07-28 04:42:10 +02:00

858 lines
23 KiB
Go

// Package database provides SQLite storage for BGP routing data including ASNs, prefixes, announcements and peerings.
package database
import (
"database/sql"
_ "embed"
"encoding/json"
"errors"
"fmt"
"net"
"os"
"path/filepath"
"time"
"git.eeqj.de/sneak/routewatch/internal/config"
"git.eeqj.de/sneak/routewatch/internal/logger"
"git.eeqj.de/sneak/routewatch/pkg/asinfo"
"github.com/google/uuid"
_ "github.com/mattn/go-sqlite3" // CGO SQLite driver
)
//go:embed schema.sql
var dbSchema string
const (
dirPermissions = 0750 // rwxr-x---
ipVersionV4 = 4
ipVersionV6 = 6
ipv6Length = 16
ipv4Offset = 12
ipv4Bits = 32
maxIPv4 = 0xFFFFFFFF
)
// Common errors
var (
// ErrInvalidIP is returned when an IP address is malformed
ErrInvalidIP = errors.New("invalid IP address")
// ErrNoRoute is returned when no route is found for an IP
ErrNoRoute = errors.New("no route found")
)
// Database manages the SQLite database connection and operations.
type Database struct {
db *sql.DB
logger *logger.Logger
path string
}
// New creates a new database connection and initializes the schema.
func New(cfg *config.Config, logger *logger.Logger) (*Database, error) {
dbPath := filepath.Join(cfg.GetStateDir(), "db.sqlite")
// Log database path
logger.Info("Opening database", "path", dbPath)
// Ensure directory exists
dir := filepath.Dir(dbPath)
if err := os.MkdirAll(dir, dirPermissions); err != nil {
return nil, fmt.Errorf("failed to create database directory: %w", err)
}
// Add connection parameters for go-sqlite3
// Enable WAL mode and other performance optimizations
dsn := fmt.Sprintf("file:%s?_busy_timeout=5000&_journal_mode=WAL&_synchronous=NORMAL&cache=shared", dbPath)
db, err := sql.Open("sqlite3", dsn)
if err != nil {
return nil, fmt.Errorf("failed to open database: %w", err)
}
if err := db.Ping(); err != nil {
return nil, fmt.Errorf("failed to ping database: %w", err)
}
// Set connection pool parameters
// Single connection to avoid locking issues with SQLite
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
db.SetConnMaxLifetime(0)
database := &Database{db: db, logger: logger, path: dbPath}
if err := database.Initialize(); err != nil {
return nil, fmt.Errorf("failed to initialize database: %w", err)
}
return database, nil
}
// Initialize creates the database schema if it doesn't exist.
func (d *Database) Initialize() error {
// Set SQLite pragmas for better performance
// WARNING: These settings trade durability for speed
pragmas := []string{
"PRAGMA journal_mode=WAL", // Write-Ahead Logging
"PRAGMA synchronous=OFF", // Don't wait for disk writes - RISKY but FAST
"PRAGMA cache_size=-1048576", // 1GB cache (negative = KB)
"PRAGMA temp_store=MEMORY", // Use memory for temp tables
"PRAGMA mmap_size=536870912", // 512MB memory-mapped I/O
"PRAGMA wal_autocheckpoint=10000", // Checkpoint every 10000 pages (less frequent)
"PRAGMA wal_checkpoint(PASSIVE)", // Checkpoint now
"PRAGMA page_size=8192", // Larger page size for better performance
"PRAGMA busy_timeout=30000", // 30 second busy timeout
"PRAGMA optimize", // Run optimizer
}
for _, pragma := range pragmas {
if err := d.exec(pragma); err != nil {
d.logger.Warn("Failed to set pragma", "pragma", pragma, "error", err)
}
}
err := d.exec(dbSchema)
return err
}
// Close closes the database connection.
func (d *Database) Close() error {
return d.db.Close()
}
// beginTx starts a new transaction with logging
func (d *Database) beginTx() (*loggingTx, error) {
tx, err := d.db.Begin()
if err != nil {
return nil, err
}
return &loggingTx{Tx: tx, logger: d.logger}, nil
}
// GetOrCreateASN retrieves an existing ASN or creates a new one if it doesn't exist.
func (d *Database) GetOrCreateASN(number int, timestamp time.Time) (*ASN, error) {
tx, err := d.beginTx()
if err != nil {
return nil, err
}
defer func() {
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
d.logger.Error("Failed to rollback transaction", "error", err)
}
}()
var asn ASN
var idStr string
var handle, description sql.NullString
err = tx.QueryRow("SELECT id, number, handle, description, first_seen, last_seen FROM asns WHERE number = ?", number).
Scan(&idStr, &asn.Number, &handle, &description, &asn.FirstSeen, &asn.LastSeen)
if err == nil {
// ASN exists, update last_seen
asn.ID, _ = uuid.Parse(idStr)
asn.Handle = handle.String
asn.Description = description.String
_, err = tx.Exec("UPDATE asns SET last_seen = ? WHERE id = ?", timestamp, asn.ID.String())
if err != nil {
return nil, err
}
asn.LastSeen = timestamp
if err = tx.Commit(); err != nil {
d.logger.Error("Failed to commit transaction for ASN update", "asn", number, "error", err)
return nil, err
}
return &asn, nil
}
if err != sql.ErrNoRows {
return nil, err
}
// ASN doesn't exist, create it with ASN info lookup
asn = ASN{
ID: generateUUID(),
Number: number,
FirstSeen: timestamp,
LastSeen: timestamp,
}
// Look up ASN info
if info, ok := asinfo.Get(number); ok {
asn.Handle = info.Handle
asn.Description = info.Description
}
_, err = tx.Exec("INSERT INTO asns (id, number, handle, description, first_seen, last_seen) VALUES (?, ?, ?, ?, ?, ?)",
asn.ID.String(), asn.Number, asn.Handle, asn.Description, asn.FirstSeen, asn.LastSeen)
if err != nil {
return nil, err
}
if err = tx.Commit(); err != nil {
d.logger.Error("Failed to commit transaction for ASN creation", "asn", number, "error", err)
return nil, err
}
return &asn, nil
}
// GetOrCreatePrefix retrieves an existing prefix or creates a new one if it doesn't exist.
func (d *Database) GetOrCreatePrefix(prefix string, timestamp time.Time) (*Prefix, error) {
tx, err := d.beginTx()
if err != nil {
return nil, err
}
defer func() {
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
d.logger.Error("Failed to rollback transaction", "error", err)
}
}()
var p Prefix
var idStr string
err = tx.QueryRow("SELECT id, prefix, ip_version, first_seen, last_seen FROM prefixes WHERE prefix = ?", prefix).
Scan(&idStr, &p.Prefix, &p.IPVersion, &p.FirstSeen, &p.LastSeen)
if err == nil {
// Prefix exists, update last_seen
p.ID, _ = uuid.Parse(idStr)
_, err = tx.Exec("UPDATE prefixes SET last_seen = ? WHERE id = ?", timestamp, p.ID.String())
if err != nil {
return nil, err
}
p.LastSeen = timestamp
if err = tx.Commit(); err != nil {
d.logger.Error("Failed to commit transaction for prefix update", "prefix", prefix, "error", err)
return nil, err
}
return &p, nil
}
if err != sql.ErrNoRows {
return nil, err
}
// Prefix doesn't exist, create it
p = Prefix{
ID: generateUUID(),
Prefix: prefix,
IPVersion: detectIPVersion(prefix),
FirstSeen: timestamp,
LastSeen: timestamp,
}
_, err = tx.Exec("INSERT INTO prefixes (id, prefix, ip_version, first_seen, last_seen) VALUES (?, ?, ?, ?, ?)",
p.ID.String(), p.Prefix, p.IPVersion, p.FirstSeen, p.LastSeen)
if err != nil {
return nil, err
}
if err = tx.Commit(); err != nil {
d.logger.Error("Failed to commit transaction for prefix creation", "prefix", prefix, "error", err)
return nil, err
}
return &p, nil
}
// RecordAnnouncement inserts a new BGP announcement or withdrawal into the database.
func (d *Database) RecordAnnouncement(announcement *Announcement) error {
err := d.exec(`
INSERT INTO announcements (id, prefix_id, asn_id, origin_asn_id, path, next_hop, timestamp, is_withdrawal)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`,
announcement.ID.String(), announcement.PrefixID.String(),
announcement.ASNID.String(), announcement.OriginASNID.String(),
announcement.Path, announcement.NextHop, announcement.Timestamp, announcement.IsWithdrawal)
return err
}
// RecordPeering records a peering relationship between two ASNs.
func (d *Database) RecordPeering(asA, asB int, timestamp time.Time) error {
// Validate ASNs
if asA <= 0 || asB <= 0 {
return fmt.Errorf("invalid ASN: asA=%d, asB=%d", asA, asB)
}
if asA == asB {
return fmt.Errorf("cannot create peering with same ASN: %d", asA)
}
// Normalize: ensure asA < asB
if asA > asB {
asA, asB = asB, asA
}
tx, err := d.beginTx()
if err != nil {
return err
}
defer func() {
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
d.logger.Error("Failed to rollback transaction", "error", err)
}
}()
var exists bool
err = tx.QueryRow("SELECT EXISTS(SELECT 1 FROM peerings WHERE as_a = ? AND as_b = ?)",
asA, asB).Scan(&exists)
if err != nil {
return err
}
if exists {
_, err = tx.Exec("UPDATE peerings SET last_seen = ? WHERE as_a = ? AND as_b = ?",
timestamp, asA, asB)
} else {
_, err = tx.Exec(`
INSERT INTO peerings (id, as_a, as_b, first_seen, last_seen)
VALUES (?, ?, ?, ?, ?)`,
generateUUID().String(), asA, asB, timestamp, timestamp)
}
if err != nil {
return err
}
if err = tx.Commit(); err != nil {
d.logger.Error("Failed to commit transaction for peering",
"as_a", asA,
"as_b", asB,
"error", err,
)
return err
}
return nil
}
// UpdatePeer updates or creates a BGP peer record
func (d *Database) UpdatePeer(peerIP string, peerASN int, messageType string, timestamp time.Time) error {
tx, err := d.beginTx()
if err != nil {
return err
}
defer func() {
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
d.logger.Error("Failed to rollback transaction", "error", err)
}
}()
var exists bool
err = tx.QueryRow("SELECT EXISTS(SELECT 1 FROM bgp_peers WHERE peer_ip = ?)", peerIP).Scan(&exists)
if err != nil {
return err
}
if exists {
_, err = tx.Exec(
"UPDATE bgp_peers SET peer_asn = ?, last_seen = ?, last_message_type = ? WHERE peer_ip = ?",
peerASN, timestamp, messageType, peerIP,
)
} else {
_, err = tx.Exec(
"INSERT INTO bgp_peers (id, peer_ip, peer_asn, first_seen, last_seen, last_message_type) VALUES (?, ?, ?, ?, ?, ?)",
generateUUID().String(), peerIP, peerASN, timestamp, timestamp, messageType,
)
}
if err != nil {
return err
}
if err = tx.Commit(); err != nil {
d.logger.Error("Failed to commit transaction for peer update",
"peer_ip", peerIP,
"peer_asn", peerASN,
"error", err,
)
return err
}
return nil
}
// GetStats returns database statistics
func (d *Database) GetStats() (Stats, error) {
var stats Stats
// Count ASNs
err := d.queryRow("SELECT COUNT(*) FROM asns").Scan(&stats.ASNs)
if err != nil {
return stats, err
}
// Count prefixes
err = d.queryRow("SELECT COUNT(*) FROM prefixes").Scan(&stats.Prefixes)
if err != nil {
return stats, err
}
// Count IPv4 and IPv6 prefixes
const ipVersionV4 = 4
err = d.queryRow("SELECT COUNT(*) FROM prefixes WHERE ip_version = ?", ipVersionV4).Scan(&stats.IPv4Prefixes)
if err != nil {
return stats, err
}
const ipVersionV6 = 6
err = d.queryRow("SELECT COUNT(*) FROM prefixes WHERE ip_version = ?", ipVersionV6).Scan(&stats.IPv6Prefixes)
if err != nil {
return stats, err
}
// Count peerings
err = d.queryRow("SELECT COUNT(*) FROM peerings").Scan(&stats.Peerings)
if err != nil {
return stats, err
}
// Count peers
err = d.queryRow("SELECT COUNT(*) FROM bgp_peers").Scan(&stats.Peers)
if err != nil {
return stats, err
}
// Get database file size
fileInfo, err := os.Stat(d.path)
if err != nil {
d.logger.Warn("Failed to get database file size", "error", err)
stats.FileSizeBytes = 0
} else {
stats.FileSizeBytes = fileInfo.Size()
}
// Get live routes count
err = d.db.QueryRow("SELECT COUNT(*) FROM live_routes").Scan(&stats.LiveRoutes)
if err != nil {
return stats, fmt.Errorf("failed to count live routes: %w", err)
}
// Get prefix distribution
stats.IPv4PrefixDistribution, stats.IPv6PrefixDistribution, err = d.GetPrefixDistribution()
if err != nil {
// Log but don't fail
d.logger.Warn("Failed to get prefix distribution", "error", err)
}
return stats, nil
}
// UpsertLiveRoute inserts or updates a live route
func (d *Database) UpsertLiveRoute(route *LiveRoute) error {
query := `
INSERT INTO live_routes (id, prefix, mask_length, ip_version, origin_asn, peer_ip, as_path, next_hop,
last_updated, v4_ip_start, v4_ip_end)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(prefix, origin_asn, peer_ip) DO UPDATE SET
mask_length = excluded.mask_length,
ip_version = excluded.ip_version,
as_path = excluded.as_path,
next_hop = excluded.next_hop,
last_updated = excluded.last_updated,
v4_ip_start = excluded.v4_ip_start,
v4_ip_end = excluded.v4_ip_end
`
// Encode AS path as JSON
pathJSON, err := json.Marshal(route.ASPath)
if err != nil {
return fmt.Errorf("failed to encode AS path: %w", err)
}
// Convert v4_ip_start and v4_ip_end to interface{} for SQL NULL handling
var v4Start, v4End interface{}
if route.V4IPStart != nil {
v4Start = *route.V4IPStart
}
if route.V4IPEnd != nil {
v4End = *route.V4IPEnd
}
_, err = d.db.Exec(query,
route.ID.String(),
route.Prefix,
route.MaskLength,
route.IPVersion,
route.OriginASN,
route.PeerIP,
string(pathJSON),
route.NextHop,
route.LastUpdated,
v4Start,
v4End,
)
return err
}
// DeleteLiveRoute deletes a live route
// If originASN is 0, deletes all routes for the prefix/peer combination
func (d *Database) DeleteLiveRoute(prefix string, originASN int, peerIP string) error {
var query string
var err error
if originASN == 0 {
// Delete all routes for this prefix from this peer
query = `DELETE FROM live_routes WHERE prefix = ? AND peer_ip = ?`
_, err = d.db.Exec(query, prefix, peerIP)
} else {
// Delete specific route
query = `DELETE FROM live_routes WHERE prefix = ? AND origin_asn = ? AND peer_ip = ?`
_, err = d.db.Exec(query, prefix, originASN, peerIP)
}
return err
}
// GetPrefixDistribution returns the distribution of prefixes by mask length
func (d *Database) GetPrefixDistribution() (ipv4 []PrefixDistribution, ipv6 []PrefixDistribution, err error) {
// IPv4 distribution
query := `
SELECT mask_length, COUNT(*) as count
FROM live_routes
WHERE ip_version = 4
GROUP BY mask_length
ORDER BY mask_length
`
rows, err := d.db.Query(query)
if err != nil {
return nil, nil, fmt.Errorf("failed to query IPv4 distribution: %w", err)
}
defer func() { _ = rows.Close() }()
for rows.Next() {
var dist PrefixDistribution
if err := rows.Scan(&dist.MaskLength, &dist.Count); err != nil {
return nil, nil, fmt.Errorf("failed to scan IPv4 distribution: %w", err)
}
ipv4 = append(ipv4, dist)
}
// IPv6 distribution
query = `
SELECT mask_length, COUNT(*) as count
FROM live_routes
WHERE ip_version = 6
GROUP BY mask_length
ORDER BY mask_length
`
rows, err = d.db.Query(query)
if err != nil {
return nil, nil, fmt.Errorf("failed to query IPv6 distribution: %w", err)
}
defer func() { _ = rows.Close() }()
for rows.Next() {
var dist PrefixDistribution
if err := rows.Scan(&dist.MaskLength, &dist.Count); err != nil {
return nil, nil, fmt.Errorf("failed to scan IPv6 distribution: %w", err)
}
ipv6 = append(ipv6, dist)
}
return ipv4, ipv6, nil
}
// GetLiveRouteCounts returns the count of IPv4 and IPv6 routes
func (d *Database) GetLiveRouteCounts() (ipv4Count, ipv6Count int, err error) {
// Get IPv4 count
err = d.db.QueryRow("SELECT COUNT(*) FROM live_routes WHERE ip_version = 4").Scan(&ipv4Count)
if err != nil {
return 0, 0, fmt.Errorf("failed to count IPv4 routes: %w", err)
}
// Get IPv6 count
err = d.db.QueryRow("SELECT COUNT(*) FROM live_routes WHERE ip_version = 6").Scan(&ipv6Count)
if err != nil {
return 0, 0, fmt.Errorf("failed to count IPv6 routes: %w", err)
}
return ipv4Count, ipv6Count, nil
}
// GetASInfoForIP returns AS information for the given IP address
func (d *Database) GetASInfoForIP(ip string) (*ASInfo, error) {
// Parse the IP to validate it
parsedIP := net.ParseIP(ip)
if parsedIP == nil {
return nil, fmt.Errorf("%w: %s", ErrInvalidIP, ip)
}
// Determine IP version
ipVersion := ipVersionV4
ipv4 := parsedIP.To4()
if ipv4 == nil {
ipVersion = ipVersionV6
}
// For IPv4, use optimized range query
if ipVersion == ipVersionV4 {
// Convert IP to 32-bit unsigned integer
ipUint := ipToUint32(ipv4)
query := `
SELECT DISTINCT lr.prefix, lr.mask_length, lr.origin_asn, lr.last_updated, a.handle, a.description
FROM live_routes lr
LEFT JOIN asns a ON a.number = lr.origin_asn
WHERE lr.ip_version = ? AND lr.v4_ip_start <= ? AND lr.v4_ip_end >= ?
ORDER BY lr.mask_length DESC
LIMIT 1
`
var prefix string
var maskLength, originASN int
var lastUpdated time.Time
var handle, description sql.NullString
err := d.db.QueryRow(query, ipVersionV4, ipUint, ipUint).Scan(
&prefix, &maskLength, &originASN, &lastUpdated, &handle, &description)
if err != nil {
if err == sql.ErrNoRows {
return nil, fmt.Errorf("%w for IP %s", ErrNoRoute, ip)
}
return nil, fmt.Errorf("failed to query routes: %w", err)
}
age := time.Since(lastUpdated).Round(time.Second).String()
return &ASInfo{
ASN: originASN,
Handle: handle.String,
Description: description.String,
Prefix: prefix,
LastUpdated: lastUpdated,
Age: age,
}, nil
}
// For IPv6, use the original method since we don't have range optimization
query := `
SELECT DISTINCT lr.prefix, lr.mask_length, lr.origin_asn, lr.last_updated, a.handle, a.description
FROM live_routes lr
LEFT JOIN asns a ON a.number = lr.origin_asn
WHERE lr.ip_version = ?
ORDER BY lr.mask_length DESC
`
rows, err := d.db.Query(query, ipVersionV6)
if err != nil {
return nil, fmt.Errorf("failed to query routes: %w", err)
}
defer func() { _ = rows.Close() }()
// Find the most specific matching prefix
var bestMatch struct {
prefix string
maskLength int
originASN int
lastUpdated time.Time
handle sql.NullString
description sql.NullString
}
bestMaskLength := -1
for rows.Next() {
var prefix string
var maskLength, originASN int
var lastUpdated time.Time
var handle, description sql.NullString
if err := rows.Scan(&prefix, &maskLength, &originASN, &lastUpdated, &handle, &description); err != nil {
continue
}
// Parse the prefix CIDR
_, ipNet, err := net.ParseCIDR(prefix)
if err != nil {
continue
}
// Check if the IP is in this prefix
if ipNet.Contains(parsedIP) && maskLength > bestMaskLength {
bestMatch.prefix = prefix
bestMatch.maskLength = maskLength
bestMatch.originASN = originASN
bestMatch.lastUpdated = lastUpdated
bestMatch.handle = handle
bestMatch.description = description
bestMaskLength = maskLength
}
}
if bestMaskLength == -1 {
return nil, fmt.Errorf("%w for IP %s", ErrNoRoute, ip)
}
age := time.Since(bestMatch.lastUpdated).Round(time.Second).String()
return &ASInfo{
ASN: bestMatch.originASN,
Handle: bestMatch.handle.String,
Description: bestMatch.description.String,
Prefix: bestMatch.prefix,
LastUpdated: bestMatch.lastUpdated,
Age: age,
}, nil
}
// ipToUint32 converts an IPv4 address to a 32-bit unsigned integer
func ipToUint32(ip net.IP) uint32 {
if len(ip) == ipv6Length {
// Convert to 4-byte representation
ip = ip[ipv4Offset:ipv6Length]
}
return uint32(ip[0])<<24 | uint32(ip[1])<<16 | uint32(ip[2])<<8 | uint32(ip[3])
}
// CalculateIPv4Range calculates the start and end IP addresses for an IPv4 CIDR block
func CalculateIPv4Range(cidr string) (start, end uint32, err error) {
_, ipNet, err := net.ParseCIDR(cidr)
if err != nil {
return 0, 0, err
}
// Get the network address (start of range)
ip := ipNet.IP.To4()
if ip == nil {
return 0, 0, fmt.Errorf("not an IPv4 address")
}
start = ipToUint32(ip)
// Calculate the end of the range
ones, bits := ipNet.Mask.Size()
hostBits := bits - ones
if hostBits >= ipv4Bits {
// Special case for /0 - entire IPv4 space
end = maxIPv4
} else {
// Safe to convert since we checked hostBits < 32
//nolint:gosec // hostBits is guaranteed to be < 32 from the check above
end = start | ((1 << uint(hostBits)) - 1)
}
return start, end, nil
}
// GetASDetails returns detailed information about an AS including prefixes
func (d *Database) GetASDetails(asn int) (*ASN, []LiveRoute, error) {
// Get AS information
var asnInfo ASN
var idStr string
var handle, description sql.NullString
err := d.db.QueryRow(
"SELECT id, number, handle, description, first_seen, last_seen FROM asns WHERE number = ?",
asn,
).Scan(&idStr, &asnInfo.Number, &handle, &description, &asnInfo.FirstSeen, &asnInfo.LastSeen)
if err != nil {
if err == sql.ErrNoRows {
return nil, nil, fmt.Errorf("%w: AS%d", ErrNoRoute, asn)
}
return nil, nil, fmt.Errorf("failed to query AS: %w", err)
}
asnInfo.ID, _ = uuid.Parse(idStr)
asnInfo.Handle = handle.String
asnInfo.Description = description.String
// Get prefixes announced by this AS (unique prefixes with most recent update)
query := `
SELECT prefix, mask_length, ip_version, MAX(last_updated) as last_updated
FROM live_routes
WHERE origin_asn = ?
GROUP BY prefix, mask_length, ip_version
`
rows, err := d.db.Query(query, asn)
if err != nil {
return &asnInfo, nil, fmt.Errorf("failed to query prefixes: %w", err)
}
defer func() { _ = rows.Close() }()
var prefixes []LiveRoute
for rows.Next() {
var route LiveRoute
var lastUpdatedStr string
err := rows.Scan(&route.Prefix, &route.MaskLength, &route.IPVersion, &lastUpdatedStr)
if err != nil {
d.logger.Error("Failed to scan prefix row", "error", err, "asn", asn)
continue
}
// Parse the timestamp string
route.LastUpdated, err = time.Parse("2006-01-02 15:04:05-07:00", lastUpdatedStr)
if err != nil {
// Try without timezone
route.LastUpdated, err = time.Parse("2006-01-02 15:04:05", lastUpdatedStr)
if err != nil {
d.logger.Error("Failed to parse timestamp", "error", err, "timestamp", lastUpdatedStr)
continue
}
}
route.OriginASN = asn
prefixes = append(prefixes, route)
}
return &asnInfo, prefixes, nil
}
// GetPrefixDetails returns detailed information about a prefix
func (d *Database) GetPrefixDetails(prefix string) ([]LiveRoute, error) {
query := `
SELECT lr.origin_asn, lr.peer_ip, lr.as_path, lr.next_hop, lr.last_updated,
a.handle, a.description
FROM live_routes lr
LEFT JOIN asns a ON a.number = lr.origin_asn
WHERE lr.prefix = ?
ORDER BY lr.origin_asn, lr.peer_ip
`
rows, err := d.db.Query(query, prefix)
if err != nil {
return nil, fmt.Errorf("failed to query prefix details: %w", err)
}
defer func() { _ = rows.Close() }()
var routes []LiveRoute
for rows.Next() {
var route LiveRoute
var pathJSON string
var handle, description sql.NullString
err := rows.Scan(
&route.OriginASN, &route.PeerIP, &pathJSON, &route.NextHop,
&route.LastUpdated, &handle, &description,
)
if err != nil {
continue
}
// Decode AS path
if err := json.Unmarshal([]byte(pathJSON), &route.ASPath); err != nil {
route.ASPath = []int{}
}
route.Prefix = prefix
routes = append(routes, route)
}
if len(routes) == 0 {
return nil, fmt.Errorf("%w: %s", ErrNoRoute, prefix)
}
return routes, nil
}