- Implement comprehensive SQL query logging for queries over 10ms - Add logging wrapper methods for all database operations - Replace timing code in GetStats with simple info log messages - Add missing database indexes for better query performance: - idx_live_routes_lookup for common prefix/origin/peer lookups - idx_live_routes_withdraw for withdrawal updates - idx_prefixes_prefix for prefix lookups - idx_asn_peerings_lookup for peering relationship queries - Increase SQLite cache size to 512MB - Add performance-oriented SQLite pragmas - Extract HTML templates to separate files using go:embed - Add JSON response middleware with @meta field (like bgpview.io API) - Fix concurrent map write errors in HTTP handlers - Add request timeout handling with proper JSON error responses These changes significantly improve database query performance and provide visibility into slow queries for debugging purposes.
622 lines
17 KiB
Go
622 lines
17 KiB
Go
// Package database provides SQLite storage for BGP routing data including ASNs, prefixes, announcements and peerings.
|
|
package database
|
|
|
|
import (
|
|
"database/sql"
|
|
"fmt"
|
|
"log/slog"
|
|
"os"
|
|
"path/filepath"
|
|
"runtime"
|
|
"time"
|
|
|
|
"github.com/google/uuid"
|
|
_ "modernc.org/sqlite" // Pure Go SQLite driver
|
|
)
|
|
|
|
const (
|
|
dirPermissions = 0750 // rwxr-x---
|
|
dbSchema = `
|
|
CREATE TABLE IF NOT EXISTS asns (
|
|
id TEXT PRIMARY KEY,
|
|
number INTEGER UNIQUE NOT NULL,
|
|
first_seen DATETIME NOT NULL,
|
|
last_seen DATETIME NOT NULL
|
|
);
|
|
|
|
CREATE TABLE IF NOT EXISTS prefixes (
|
|
id TEXT PRIMARY KEY,
|
|
prefix TEXT UNIQUE NOT NULL,
|
|
ip_version INTEGER NOT NULL, -- 4 for IPv4, 6 for IPv6
|
|
first_seen DATETIME NOT NULL,
|
|
last_seen DATETIME NOT NULL
|
|
);
|
|
|
|
CREATE TABLE IF NOT EXISTS announcements (
|
|
id TEXT PRIMARY KEY,
|
|
prefix_id TEXT NOT NULL,
|
|
asn_id TEXT NOT NULL,
|
|
origin_asn_id TEXT NOT NULL,
|
|
path TEXT NOT NULL,
|
|
next_hop TEXT,
|
|
timestamp DATETIME NOT NULL,
|
|
is_withdrawal BOOLEAN NOT NULL DEFAULT 0,
|
|
FOREIGN KEY (prefix_id) REFERENCES prefixes(id),
|
|
FOREIGN KEY (asn_id) REFERENCES asns(id),
|
|
FOREIGN KEY (origin_asn_id) REFERENCES asns(id)
|
|
);
|
|
|
|
CREATE TABLE IF NOT EXISTS asn_peerings (
|
|
id TEXT PRIMARY KEY,
|
|
from_asn_id TEXT NOT NULL,
|
|
to_asn_id TEXT NOT NULL,
|
|
first_seen DATETIME NOT NULL,
|
|
last_seen DATETIME NOT NULL,
|
|
FOREIGN KEY (from_asn_id) REFERENCES asns(id),
|
|
FOREIGN KEY (to_asn_id) REFERENCES asns(id),
|
|
UNIQUE(from_asn_id, to_asn_id)
|
|
);
|
|
|
|
-- BGP peers that send us messages
|
|
CREATE TABLE IF NOT EXISTS bgp_peers (
|
|
id TEXT PRIMARY KEY,
|
|
peer_ip TEXT UNIQUE NOT NULL,
|
|
peer_asn INTEGER NOT NULL,
|
|
first_seen DATETIME NOT NULL,
|
|
last_seen DATETIME NOT NULL,
|
|
last_message_type TEXT
|
|
);
|
|
|
|
-- Live routing table: current state of announced routes
|
|
CREATE TABLE IF NOT EXISTS live_routes (
|
|
id TEXT PRIMARY KEY,
|
|
prefix_id TEXT NOT NULL,
|
|
origin_asn_id TEXT NOT NULL,
|
|
peer_asn INTEGER NOT NULL,
|
|
next_hop TEXT,
|
|
announced_at DATETIME NOT NULL,
|
|
withdrawn_at DATETIME,
|
|
FOREIGN KEY (prefix_id) REFERENCES prefixes(id),
|
|
FOREIGN KEY (origin_asn_id) REFERENCES asns(id),
|
|
UNIQUE(prefix_id, origin_asn_id, peer_asn)
|
|
);
|
|
|
|
CREATE INDEX IF NOT EXISTS idx_prefixes_ip_version ON prefixes(ip_version);
|
|
CREATE INDEX IF NOT EXISTS idx_prefixes_version_prefix ON prefixes(ip_version, prefix);
|
|
CREATE INDEX IF NOT EXISTS idx_announcements_timestamp ON announcements(timestamp);
|
|
CREATE INDEX IF NOT EXISTS idx_announcements_prefix_id ON announcements(prefix_id);
|
|
CREATE INDEX IF NOT EXISTS idx_announcements_asn_id ON announcements(asn_id);
|
|
CREATE INDEX IF NOT EXISTS idx_asn_peerings_from_asn ON asn_peerings(from_asn_id);
|
|
CREATE INDEX IF NOT EXISTS idx_asn_peerings_to_asn ON asn_peerings(to_asn_id);
|
|
CREATE INDEX IF NOT EXISTS idx_asn_peerings_lookup ON asn_peerings(from_asn_id, to_asn_id);
|
|
|
|
-- Indexes for live routes table
|
|
CREATE INDEX IF NOT EXISTS idx_live_routes_active
|
|
ON live_routes(prefix_id, origin_asn_id)
|
|
WHERE withdrawn_at IS NULL;
|
|
|
|
CREATE INDEX IF NOT EXISTS idx_live_routes_origin
|
|
ON live_routes(origin_asn_id)
|
|
WHERE withdrawn_at IS NULL;
|
|
|
|
CREATE INDEX IF NOT EXISTS idx_live_routes_prefix
|
|
ON live_routes(prefix_id)
|
|
WHERE withdrawn_at IS NULL;
|
|
|
|
-- Critical index for the most common query pattern
|
|
CREATE INDEX IF NOT EXISTS idx_live_routes_lookup
|
|
ON live_routes(prefix_id, origin_asn_id, peer_asn)
|
|
WHERE withdrawn_at IS NULL;
|
|
|
|
-- Index for withdrawal updates by prefix and peer
|
|
CREATE INDEX IF NOT EXISTS idx_live_routes_withdraw
|
|
ON live_routes(prefix_id, peer_asn)
|
|
WHERE withdrawn_at IS NULL;
|
|
|
|
-- Additional indexes for prefixes table
|
|
CREATE INDEX IF NOT EXISTS idx_prefixes_prefix ON prefixes(prefix);
|
|
|
|
-- Indexes for bgp_peers table
|
|
CREATE INDEX IF NOT EXISTS idx_bgp_peers_asn ON bgp_peers(peer_asn);
|
|
CREATE INDEX IF NOT EXISTS idx_bgp_peers_last_seen ON bgp_peers(last_seen);
|
|
`
|
|
)
|
|
|
|
// Database manages the SQLite database connection and operations.
|
|
type Database struct {
|
|
db *sql.DB
|
|
logger *slog.Logger
|
|
}
|
|
|
|
// Config holds database configuration
|
|
type Config struct {
|
|
Path string
|
|
}
|
|
|
|
// getDefaultDatabasePath returns the appropriate database path for the OS
|
|
func getDefaultDatabasePath() string {
|
|
const dbFilename = "db.sqlite"
|
|
|
|
switch runtime.GOOS {
|
|
case "darwin":
|
|
// macOS: ~/Library/Application Support/berlin.sneak.app.routewatch/db.sqlite
|
|
home, err := os.UserHomeDir()
|
|
if err != nil {
|
|
return dbFilename
|
|
}
|
|
appSupport := filepath.Join(home, "Library", "Application Support", "berlin.sneak.app.routewatch")
|
|
if err := os.MkdirAll(appSupport, dirPermissions); err != nil {
|
|
return dbFilename
|
|
}
|
|
|
|
return filepath.Join(appSupport, dbFilename)
|
|
default:
|
|
// Linux and others: /var/lib/routewatch/db.sqlite
|
|
dbDir := "/var/lib/routewatch"
|
|
if err := os.MkdirAll(dbDir, dirPermissions); err != nil {
|
|
// Fall back to user's home directory if can't create system directory
|
|
home, err := os.UserHomeDir()
|
|
if err != nil {
|
|
return dbFilename
|
|
}
|
|
userDir := filepath.Join(home, ".local", "share", "routewatch")
|
|
if err := os.MkdirAll(userDir, dirPermissions); err != nil {
|
|
return dbFilename
|
|
}
|
|
|
|
return filepath.Join(userDir, dbFilename)
|
|
}
|
|
|
|
return filepath.Join(dbDir, dbFilename)
|
|
}
|
|
}
|
|
|
|
// NewConfig provides default database configuration
|
|
func NewConfig() Config {
|
|
return Config{
|
|
Path: getDefaultDatabasePath(),
|
|
}
|
|
}
|
|
|
|
// New creates a new database connection and initializes the schema.
|
|
func New(logger *slog.Logger) (*Database, error) {
|
|
config := NewConfig()
|
|
|
|
return NewWithConfig(config, logger)
|
|
}
|
|
|
|
// NewWithConfig creates a new database connection with custom configuration
|
|
func NewWithConfig(config Config, logger *slog.Logger) (*Database, error) {
|
|
// Log database path
|
|
logger.Info("Opening database", "path", config.Path)
|
|
|
|
// Ensure directory exists
|
|
dir := filepath.Dir(config.Path)
|
|
if err := os.MkdirAll(dir, dirPermissions); err != nil {
|
|
return nil, fmt.Errorf("failed to create database directory: %w", err)
|
|
}
|
|
|
|
// Add connection parameters for modernc.org/sqlite
|
|
dsn := fmt.Sprintf("file:%s?_busy_timeout=5000&_journal_mode=WAL", config.Path)
|
|
db, err := sql.Open("sqlite", dsn)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to open database: %w", err)
|
|
}
|
|
|
|
if err := db.Ping(); err != nil {
|
|
return nil, fmt.Errorf("failed to ping database: %w", err)
|
|
}
|
|
|
|
// Set connection pool parameters
|
|
db.SetMaxOpenConns(1) // Force serialization since SQLite doesn't handle true concurrency well
|
|
db.SetMaxIdleConns(1)
|
|
db.SetConnMaxLifetime(0)
|
|
|
|
database := &Database{db: db, logger: logger}
|
|
|
|
if err := database.Initialize(); err != nil {
|
|
return nil, fmt.Errorf("failed to initialize database: %w", err)
|
|
}
|
|
|
|
return database, nil
|
|
}
|
|
|
|
// Initialize creates the database schema if it doesn't exist.
|
|
func (d *Database) Initialize() error {
|
|
// Set SQLite pragmas for better performance
|
|
pragmas := []string{
|
|
"PRAGMA journal_mode=WAL", // Already set in connection string
|
|
"PRAGMA synchronous=NORMAL", // Faster than FULL, still safe
|
|
"PRAGMA cache_size=-524288", // 512MB cache
|
|
"PRAGMA temp_store=MEMORY", // Use memory for temp tables
|
|
"PRAGMA mmap_size=268435456", // 256MB memory-mapped I/O
|
|
"PRAGMA optimize", // Run optimizer
|
|
}
|
|
|
|
for _, pragma := range pragmas {
|
|
if err := d.exec(pragma); err != nil {
|
|
d.logger.Warn("Failed to set pragma", "pragma", pragma, "error", err)
|
|
}
|
|
}
|
|
|
|
err := d.exec(dbSchema)
|
|
|
|
return err
|
|
}
|
|
|
|
// Close closes the database connection.
|
|
func (d *Database) Close() error {
|
|
return d.db.Close()
|
|
}
|
|
|
|
// beginTx starts a new transaction with logging
|
|
func (d *Database) beginTx() (*loggingTx, error) {
|
|
tx, err := d.db.Begin()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return &loggingTx{Tx: tx, logger: d.logger}, nil
|
|
}
|
|
|
|
// GetOrCreateASN retrieves an existing ASN or creates a new one if it doesn't exist.
|
|
func (d *Database) GetOrCreateASN(number int, timestamp time.Time) (*ASN, error) {
|
|
tx, err := d.beginTx()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer func() {
|
|
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
|
|
d.logger.Error("Failed to rollback transaction", "error", err)
|
|
}
|
|
}()
|
|
|
|
var asn ASN
|
|
var idStr string
|
|
err = tx.QueryRow("SELECT id, number, first_seen, last_seen FROM asns WHERE number = ?", number).
|
|
Scan(&idStr, &asn.Number, &asn.FirstSeen, &asn.LastSeen)
|
|
|
|
if err == nil {
|
|
// ASN exists, update last_seen
|
|
asn.ID, _ = uuid.Parse(idStr)
|
|
_, err = tx.Exec("UPDATE asns SET last_seen = ? WHERE id = ?", timestamp, asn.ID.String())
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
asn.LastSeen = timestamp
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
d.logger.Error("Failed to commit transaction for ASN update", "asn", number, "error", err)
|
|
|
|
return nil, err
|
|
}
|
|
|
|
return &asn, nil
|
|
}
|
|
|
|
if err != sql.ErrNoRows {
|
|
return nil, err
|
|
}
|
|
|
|
// ASN doesn't exist, create it
|
|
asn = ASN{
|
|
ID: generateUUID(),
|
|
Number: number,
|
|
FirstSeen: timestamp,
|
|
LastSeen: timestamp,
|
|
}
|
|
_, err = tx.Exec("INSERT INTO asns (id, number, first_seen, last_seen) VALUES (?, ?, ?, ?)",
|
|
asn.ID.String(), asn.Number, asn.FirstSeen, asn.LastSeen)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
d.logger.Error("Failed to commit transaction for ASN creation", "asn", number, "error", err)
|
|
|
|
return nil, err
|
|
}
|
|
|
|
return &asn, nil
|
|
}
|
|
|
|
// GetOrCreatePrefix retrieves an existing prefix or creates a new one if it doesn't exist.
|
|
func (d *Database) GetOrCreatePrefix(prefix string, timestamp time.Time) (*Prefix, error) {
|
|
tx, err := d.beginTx()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer func() {
|
|
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
|
|
d.logger.Error("Failed to rollback transaction", "error", err)
|
|
}
|
|
}()
|
|
|
|
var p Prefix
|
|
var idStr string
|
|
err = tx.QueryRow("SELECT id, prefix, ip_version, first_seen, last_seen FROM prefixes WHERE prefix = ?", prefix).
|
|
Scan(&idStr, &p.Prefix, &p.IPVersion, &p.FirstSeen, &p.LastSeen)
|
|
|
|
if err == nil {
|
|
// Prefix exists, update last_seen
|
|
p.ID, _ = uuid.Parse(idStr)
|
|
_, err = tx.Exec("UPDATE prefixes SET last_seen = ? WHERE id = ?", timestamp, p.ID.String())
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
p.LastSeen = timestamp
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
d.logger.Error("Failed to commit transaction for prefix update", "prefix", prefix, "error", err)
|
|
|
|
return nil, err
|
|
}
|
|
|
|
return &p, nil
|
|
}
|
|
|
|
if err != sql.ErrNoRows {
|
|
return nil, err
|
|
}
|
|
|
|
// Prefix doesn't exist, create it
|
|
p = Prefix{
|
|
ID: generateUUID(),
|
|
Prefix: prefix,
|
|
IPVersion: detectIPVersion(prefix),
|
|
FirstSeen: timestamp,
|
|
LastSeen: timestamp,
|
|
}
|
|
_, err = tx.Exec("INSERT INTO prefixes (id, prefix, ip_version, first_seen, last_seen) VALUES (?, ?, ?, ?, ?)",
|
|
p.ID.String(), p.Prefix, p.IPVersion, p.FirstSeen, p.LastSeen)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
d.logger.Error("Failed to commit transaction for prefix creation", "prefix", prefix, "error", err)
|
|
|
|
return nil, err
|
|
}
|
|
|
|
return &p, nil
|
|
}
|
|
|
|
// RecordAnnouncement inserts a new BGP announcement or withdrawal into the database.
|
|
func (d *Database) RecordAnnouncement(announcement *Announcement) error {
|
|
err := d.exec(`
|
|
INSERT INTO announcements (id, prefix_id, asn_id, origin_asn_id, path, next_hop, timestamp, is_withdrawal)
|
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`,
|
|
announcement.ID.String(), announcement.PrefixID.String(),
|
|
announcement.ASNID.String(), announcement.OriginASNID.String(),
|
|
announcement.Path, announcement.NextHop, announcement.Timestamp, announcement.IsWithdrawal)
|
|
|
|
return err
|
|
}
|
|
|
|
// RecordPeering records a peering relationship between two ASNs.
|
|
func (d *Database) RecordPeering(fromASNID, toASNID string, timestamp time.Time) error {
|
|
tx, err := d.beginTx()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() {
|
|
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
|
|
d.logger.Error("Failed to rollback transaction", "error", err)
|
|
}
|
|
}()
|
|
|
|
var exists bool
|
|
err = tx.QueryRow("SELECT EXISTS(SELECT 1 FROM asn_peerings WHERE from_asn_id = ? AND to_asn_id = ?)",
|
|
fromASNID, toASNID).Scan(&exists)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if exists {
|
|
_, err = tx.Exec("UPDATE asn_peerings SET last_seen = ? WHERE from_asn_id = ? AND to_asn_id = ?",
|
|
timestamp, fromASNID, toASNID)
|
|
} else {
|
|
_, err = tx.Exec(`
|
|
INSERT INTO asn_peerings (id, from_asn_id, to_asn_id, first_seen, last_seen)
|
|
VALUES (?, ?, ?, ?, ?)`,
|
|
generateUUID().String(), fromASNID, toASNID, timestamp, timestamp)
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
d.logger.Error("Failed to commit transaction for peering",
|
|
"from_asn_id", fromASNID,
|
|
"to_asn_id", toASNID,
|
|
"error", err,
|
|
)
|
|
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// UpdateLiveRoute updates the live routing table for an announcement
|
|
func (d *Database) UpdateLiveRoute(
|
|
prefixID, originASNID uuid.UUID,
|
|
peerASN int,
|
|
nextHop string,
|
|
timestamp time.Time,
|
|
) error {
|
|
// Check if route already exists
|
|
var routeID sql.NullString
|
|
err := d.queryRow(`
|
|
SELECT id FROM live_routes
|
|
WHERE prefix_id = ? AND origin_asn_id = ? AND peer_asn = ? AND withdrawn_at IS NULL`,
|
|
prefixID.String(), originASNID.String(), peerASN).Scan(&routeID)
|
|
|
|
if err != nil && err != sql.ErrNoRows {
|
|
return err
|
|
}
|
|
|
|
if routeID.Valid {
|
|
// Route exists and is active, update it
|
|
err = d.exec(`
|
|
UPDATE live_routes
|
|
SET next_hop = ?, announced_at = ?
|
|
WHERE id = ?`,
|
|
nextHop, timestamp, routeID.String)
|
|
} else {
|
|
// Either new route or re-announcement of withdrawn route
|
|
err = d.exec(`
|
|
INSERT OR REPLACE INTO live_routes
|
|
(id, prefix_id, origin_asn_id, peer_asn, next_hop, announced_at, withdrawn_at)
|
|
VALUES (?, ?, ?, ?, ?, ?, NULL)`,
|
|
generateUUID().String(), prefixID.String(), originASNID.String(),
|
|
peerASN, nextHop, timestamp)
|
|
}
|
|
|
|
return err
|
|
}
|
|
|
|
// WithdrawLiveRoute marks a route as withdrawn in the live routing table
|
|
func (d *Database) WithdrawLiveRoute(prefixID uuid.UUID, peerASN int, timestamp time.Time) error {
|
|
err := d.exec(`
|
|
UPDATE live_routes
|
|
SET withdrawn_at = ?
|
|
WHERE prefix_id = ? AND peer_asn = ? AND withdrawn_at IS NULL`,
|
|
timestamp, prefixID.String(), peerASN)
|
|
|
|
return err
|
|
}
|
|
|
|
// GetActiveLiveRoutes returns all currently active routes (not withdrawn)
|
|
func (d *Database) GetActiveLiveRoutes() ([]LiveRoute, error) {
|
|
rows, err := d.query(`
|
|
SELECT id, prefix_id, origin_asn_id, peer_asn, next_hop, announced_at
|
|
FROM live_routes
|
|
WHERE withdrawn_at IS NULL
|
|
ORDER BY announced_at DESC`)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer func() {
|
|
_ = rows.Close()
|
|
}()
|
|
|
|
var routes []LiveRoute
|
|
for rows.Next() {
|
|
var route LiveRoute
|
|
var idStr, prefixIDStr, originASNIDStr string
|
|
err := rows.Scan(&idStr, &prefixIDStr, &originASNIDStr,
|
|
&route.PeerASN, &route.NextHop, &route.AnnouncedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
route.ID, _ = uuid.Parse(idStr)
|
|
route.PrefixID, _ = uuid.Parse(prefixIDStr)
|
|
route.OriginASNID, _ = uuid.Parse(originASNIDStr)
|
|
|
|
routes = append(routes, route)
|
|
}
|
|
|
|
return routes, rows.Err()
|
|
}
|
|
|
|
// UpdatePeer updates or creates a BGP peer record
|
|
func (d *Database) UpdatePeer(peerIP string, peerASN int, messageType string, timestamp time.Time) error {
|
|
tx, err := d.beginTx()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() {
|
|
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
|
|
d.logger.Error("Failed to rollback transaction", "error", err)
|
|
}
|
|
}()
|
|
|
|
var exists bool
|
|
err = tx.QueryRow("SELECT EXISTS(SELECT 1 FROM bgp_peers WHERE peer_ip = ?)", peerIP).Scan(&exists)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if exists {
|
|
_, err = tx.Exec(
|
|
"UPDATE bgp_peers SET peer_asn = ?, last_seen = ?, last_message_type = ? WHERE peer_ip = ?",
|
|
peerASN, timestamp, messageType, peerIP,
|
|
)
|
|
} else {
|
|
_, err = tx.Exec(
|
|
"INSERT INTO bgp_peers (id, peer_ip, peer_asn, first_seen, last_seen, last_message_type) VALUES (?, ?, ?, ?, ?, ?)",
|
|
generateUUID().String(), peerIP, peerASN, timestamp, timestamp, messageType,
|
|
)
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
d.logger.Error("Failed to commit transaction for peer update",
|
|
"peer_ip", peerIP,
|
|
"peer_asn", peerASN,
|
|
"error", err,
|
|
)
|
|
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// GetStats returns database statistics
|
|
func (d *Database) GetStats() (Stats, error) {
|
|
var stats Stats
|
|
|
|
// Count ASNs
|
|
d.logger.Info("Counting ASNs")
|
|
err := d.queryRow("SELECT COUNT(*) FROM asns").Scan(&stats.ASNs)
|
|
if err != nil {
|
|
return stats, err
|
|
}
|
|
|
|
// Count prefixes
|
|
d.logger.Info("Counting prefixes")
|
|
err = d.queryRow("SELECT COUNT(*) FROM prefixes").Scan(&stats.Prefixes)
|
|
if err != nil {
|
|
return stats, err
|
|
}
|
|
|
|
// Count IPv4 and IPv6 prefixes
|
|
d.logger.Info("Counting IPv4 prefixes")
|
|
const ipVersionV4 = 4
|
|
err = d.queryRow("SELECT COUNT(*) FROM prefixes WHERE ip_version = ?", ipVersionV4).Scan(&stats.IPv4Prefixes)
|
|
if err != nil {
|
|
return stats, err
|
|
}
|
|
|
|
d.logger.Info("Counting IPv6 prefixes")
|
|
const ipVersionV6 = 6
|
|
err = d.queryRow("SELECT COUNT(*) FROM prefixes WHERE ip_version = ?", ipVersionV6).Scan(&stats.IPv6Prefixes)
|
|
if err != nil {
|
|
return stats, err
|
|
}
|
|
|
|
// Count peerings
|
|
d.logger.Info("Counting peerings")
|
|
err = d.queryRow("SELECT COUNT(*) FROM asn_peerings").Scan(&stats.Peerings)
|
|
if err != nil {
|
|
return stats, err
|
|
}
|
|
|
|
// Count live routes
|
|
d.logger.Info("Counting live routes")
|
|
err = d.queryRow("SELECT COUNT(*) FROM live_routes WHERE withdrawn_at IS NULL").Scan(&stats.LiveRoutes)
|
|
if err != nil {
|
|
return stats, err
|
|
}
|
|
|
|
d.logger.Info("Stats collection complete")
|
|
|
|
return stats, nil
|
|
}
|