Optimize database batch operations with prepared statements
- Add prepared statements to all batch operations for better performance - Fix database lock contention by properly batching operations - Update SQLite settings for extreme performance (8GB cache, sync OFF) - Add proper error handling for statement closing - Update tests to properly track batch operations
This commit is contained in:
@@ -62,7 +62,7 @@ func New(cfg *config.Config, logger *logger.Logger) (*Database, error) {
|
||||
|
||||
// Add connection parameters for go-sqlite3
|
||||
// Enable WAL mode and other performance optimizations
|
||||
dsn := fmt.Sprintf("file:%s?_busy_timeout=5000&_journal_mode=WAL&_synchronous=NORMAL&cache=shared", dbPath)
|
||||
dsn := fmt.Sprintf("file:%s?_busy_timeout=5000&_journal_mode=WAL&_synchronous=OFF&cache=shared", dbPath)
|
||||
db, err := sql.Open("sqlite3", dsn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open database: %w", err)
|
||||
@@ -73,9 +73,10 @@ func New(cfg *config.Config, logger *logger.Logger) (*Database, error) {
|
||||
}
|
||||
|
||||
// Set connection pool parameters
|
||||
// Single connection to avoid locking issues with SQLite
|
||||
db.SetMaxOpenConns(1)
|
||||
db.SetMaxIdleConns(1)
|
||||
// Multiple connections for better concurrency
|
||||
const maxConns = 10
|
||||
db.SetMaxOpenConns(maxConns)
|
||||
db.SetMaxIdleConns(maxConns)
|
||||
db.SetConnMaxLifetime(0)
|
||||
|
||||
database := &Database{db: db, logger: logger, path: dbPath}
|
||||
@@ -89,21 +90,22 @@ func New(cfg *config.Config, logger *logger.Logger) (*Database, error) {
|
||||
|
||||
// Initialize creates the database schema if it doesn't exist.
|
||||
func (d *Database) Initialize() error {
|
||||
// Set SQLite pragmas for better performance
|
||||
// Set SQLite pragmas for extreme performance - prioritize speed over durability
|
||||
pragmas := []string{
|
||||
"PRAGMA journal_mode=WAL", // Write-Ahead Logging
|
||||
"PRAGMA synchronous=OFF", // Don't wait for disk writes
|
||||
"PRAGMA cache_size=-2097152", // 2GB cache (negative = KB)
|
||||
"PRAGMA temp_store=MEMORY", // Use memory for temp tables
|
||||
"PRAGMA mmap_size=536870912", // 512MB memory-mapped I/O
|
||||
"PRAGMA wal_autocheckpoint=12500", // Checkpoint every 12.5k pages (50MB with 4KB pages)
|
||||
"PRAGMA wal_checkpoint(PASSIVE)", // Checkpoint now
|
||||
"PRAGMA page_size=4096", // Standard page size
|
||||
"PRAGMA busy_timeout=2000", // 2 second busy timeout
|
||||
"PRAGMA locking_mode=NORMAL", // Allow multiple connections
|
||||
"PRAGMA read_uncommitted=true", // Allow dirty reads for better concurrency
|
||||
"PRAGMA journal_size_limit=104857600", // Limit WAL to 100MB
|
||||
"PRAGMA analysis_limit=0", // Disable automatic ANALYZE
|
||||
"PRAGMA journal_mode=WAL", // Write-Ahead Logging
|
||||
"PRAGMA synchronous=OFF", // Don't wait for disk writes
|
||||
"PRAGMA cache_size=-8388608", // 8GB cache (negative = KB)
|
||||
"PRAGMA temp_store=MEMORY", // Use memory for temp tables
|
||||
"PRAGMA mmap_size=10737418240", // 10GB memory-mapped I/O
|
||||
"PRAGMA page_size=8192", // 8KB pages for better performance
|
||||
"PRAGMA wal_autocheckpoint=100000", // Checkpoint every 100k pages (800MB)
|
||||
"PRAGMA wal_checkpoint(TRUNCATE)", // Checkpoint and truncate WAL now
|
||||
"PRAGMA busy_timeout=5000", // 5 second busy timeout
|
||||
"PRAGMA locking_mode=NORMAL", // Normal locking for multiple connections
|
||||
"PRAGMA read_uncommitted=true", // Allow dirty reads
|
||||
"PRAGMA analysis_limit=0", // Disable automatic ANALYZE
|
||||
"PRAGMA threads=4", // Use multiple threads for sorting
|
||||
"PRAGMA cache_spill=false", // Keep cache in memory, don't spill to disk
|
||||
}
|
||||
|
||||
for _, pragma := range pragmas {
|
||||
@@ -141,6 +143,241 @@ func (d *Database) beginTx() (*loggingTx, error) {
|
||||
return &loggingTx{Tx: tx, logger: d.logger}, nil
|
||||
}
|
||||
|
||||
// UpsertLiveRouteBatch inserts or updates multiple live routes in a single transaction
|
||||
func (d *Database) UpsertLiveRouteBatch(routes []*LiveRoute) error {
|
||||
if len(routes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tx, err := d.beginTx()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to begin transaction: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
|
||||
d.logger.Error("Failed to rollback transaction", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Use prepared statement for better performance
|
||||
query := `
|
||||
INSERT INTO live_routes (id, prefix, mask_length, ip_version, origin_asn, peer_ip, as_path, next_hop,
|
||||
last_updated, v4_ip_start, v4_ip_end)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(prefix, origin_asn, peer_ip) DO UPDATE SET
|
||||
mask_length = excluded.mask_length,
|
||||
ip_version = excluded.ip_version,
|
||||
as_path = excluded.as_path,
|
||||
next_hop = excluded.next_hop,
|
||||
last_updated = excluded.last_updated,
|
||||
v4_ip_start = excluded.v4_ip_start,
|
||||
v4_ip_end = excluded.v4_ip_end
|
||||
`
|
||||
|
||||
stmt, err := tx.Prepare(query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare statement: %w", err)
|
||||
}
|
||||
defer func() { _ = stmt.Close() }()
|
||||
|
||||
for _, route := range routes {
|
||||
// Encode AS path as JSON
|
||||
pathJSON, err := json.Marshal(route.ASPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encode AS path: %w", err)
|
||||
}
|
||||
|
||||
// Convert v4_ip_start and v4_ip_end to interface{} for SQL NULL handling
|
||||
var v4Start, v4End interface{}
|
||||
if route.V4IPStart != nil {
|
||||
v4Start = *route.V4IPStart
|
||||
}
|
||||
if route.V4IPEnd != nil {
|
||||
v4End = *route.V4IPEnd
|
||||
}
|
||||
|
||||
_, err = stmt.Exec(
|
||||
route.ID.String(),
|
||||
route.Prefix,
|
||||
route.MaskLength,
|
||||
route.IPVersion,
|
||||
route.OriginASN,
|
||||
route.PeerIP,
|
||||
string(pathJSON),
|
||||
route.NextHop,
|
||||
route.LastUpdated,
|
||||
v4Start,
|
||||
v4End,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upsert route %s: %w", route.Prefix, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = tx.Commit(); err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteLiveRouteBatch deletes multiple live routes in a single transaction
|
||||
func (d *Database) DeleteLiveRouteBatch(deletions []LiveRouteDeletion) error {
|
||||
if len(deletions) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tx, err := d.beginTx()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to begin transaction: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
|
||||
d.logger.Error("Failed to rollback transaction", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Separate deletions by type and use prepared statements
|
||||
var withOrigin []LiveRouteDeletion
|
||||
var withoutOrigin []LiveRouteDeletion
|
||||
|
||||
for _, del := range deletions {
|
||||
if del.OriginASN == 0 {
|
||||
withoutOrigin = append(withoutOrigin, del)
|
||||
} else {
|
||||
withOrigin = append(withOrigin, del)
|
||||
}
|
||||
}
|
||||
|
||||
// Process deletions with origin ASN
|
||||
if len(withOrigin) > 0 {
|
||||
stmt, err := tx.Prepare(`DELETE FROM live_routes WHERE prefix = ? AND origin_asn = ? AND peer_ip = ?`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare delete statement: %w", err)
|
||||
}
|
||||
defer func() { _ = stmt.Close() }()
|
||||
|
||||
for _, del := range withOrigin {
|
||||
_, err = stmt.Exec(del.Prefix, del.OriginASN, del.PeerIP)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete route %s: %w", del.Prefix, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process deletions without origin ASN
|
||||
if len(withoutOrigin) > 0 {
|
||||
stmt, err := tx.Prepare(`DELETE FROM live_routes WHERE prefix = ? AND peer_ip = ?`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare delete statement: %w", err)
|
||||
}
|
||||
defer func() { _ = stmt.Close() }()
|
||||
|
||||
for _, del := range withoutOrigin {
|
||||
_, err = stmt.Exec(del.Prefix, del.PeerIP)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete route %s: %w", del.Prefix, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err = tx.Commit(); err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOrCreateASNBatch creates or updates multiple ASNs in a single transaction
|
||||
func (d *Database) GetOrCreateASNBatch(asns map[int]time.Time) error {
|
||||
if len(asns) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tx, err := d.beginTx()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to begin transaction: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
|
||||
d.logger.Error("Failed to rollback transaction", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Prepare statements
|
||||
selectStmt, err := tx.Prepare(
|
||||
"SELECT id, number, handle, description, first_seen, last_seen FROM asns WHERE number = ?")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare select statement: %w", err)
|
||||
}
|
||||
defer func() { _ = selectStmt.Close() }()
|
||||
|
||||
updateStmt, err := tx.Prepare("UPDATE asns SET last_seen = ? WHERE id = ?")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare update statement: %w", err)
|
||||
}
|
||||
defer func() { _ = updateStmt.Close() }()
|
||||
|
||||
insertStmt, err := tx.Prepare(
|
||||
"INSERT INTO asns (id, number, handle, description, first_seen, last_seen) VALUES (?, ?, ?, ?, ?, ?)")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare insert statement: %w", err)
|
||||
}
|
||||
defer func() { _ = insertStmt.Close() }()
|
||||
|
||||
for number, timestamp := range asns {
|
||||
var asn ASN
|
||||
var idStr string
|
||||
var handle, description sql.NullString
|
||||
|
||||
err = selectStmt.QueryRow(number).Scan(&idStr, &asn.Number, &handle, &description, &asn.FirstSeen, &asn.LastSeen)
|
||||
|
||||
if err == nil {
|
||||
// ASN exists, update last_seen
|
||||
asn.ID, _ = uuid.Parse(idStr)
|
||||
_, err = updateStmt.Exec(timestamp, asn.ID.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update ASN %d: %w", number, err)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
// ASN doesn't exist, create it
|
||||
asn = ASN{
|
||||
ID: generateUUID(),
|
||||
Number: number,
|
||||
FirstSeen: timestamp,
|
||||
LastSeen: timestamp,
|
||||
}
|
||||
|
||||
// Look up ASN info
|
||||
if info, ok := asinfo.Get(number); ok {
|
||||
asn.Handle = info.Handle
|
||||
asn.Description = info.Description
|
||||
}
|
||||
|
||||
_, err = insertStmt.Exec(asn.ID.String(), asn.Number, asn.Handle, asn.Description, asn.FirstSeen, asn.LastSeen)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert ASN %d: %w", number, err)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query ASN %d: %w", number, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = tx.Commit(); err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOrCreateASN retrieves an existing ASN or creates a new one if it doesn't exist.
|
||||
func (d *Database) GetOrCreateASN(number int, timestamp time.Time) (*ASN, error) {
|
||||
tx, err := d.beginTx()
|
||||
@@ -344,6 +581,69 @@ func (d *Database) RecordPeering(asA, asB int, timestamp time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdatePeerBatch updates or creates multiple BGP peer records in a single transaction
|
||||
func (d *Database) UpdatePeerBatch(peers map[string]PeerUpdate) error {
|
||||
if len(peers) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tx, err := d.beginTx()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to begin transaction: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
|
||||
d.logger.Error("Failed to rollback transaction", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Prepare statements
|
||||
checkStmt, err := tx.Prepare("SELECT EXISTS(SELECT 1 FROM bgp_peers WHERE peer_ip = ?)")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare check statement: %w", err)
|
||||
}
|
||||
defer func() { _ = checkStmt.Close() }()
|
||||
|
||||
updateStmt, err := tx.Prepare(
|
||||
"UPDATE bgp_peers SET peer_asn = ?, last_seen = ?, last_message_type = ? WHERE peer_ip = ?")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare update statement: %w", err)
|
||||
}
|
||||
defer func() { _ = updateStmt.Close() }()
|
||||
|
||||
insertStmt, err := tx.Prepare(
|
||||
"INSERT INTO bgp_peers (id, peer_ip, peer_asn, first_seen, last_seen, last_message_type) VALUES (?, ?, ?, ?, ?, ?)")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare insert statement: %w", err)
|
||||
}
|
||||
defer func() { _ = insertStmt.Close() }()
|
||||
|
||||
for _, update := range peers {
|
||||
var exists bool
|
||||
err = checkStmt.QueryRow(update.PeerIP).Scan(&exists)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check peer %s: %w", update.PeerIP, err)
|
||||
}
|
||||
|
||||
if exists {
|
||||
_, err = updateStmt.Exec(update.PeerASN, update.Timestamp, update.MessageType, update.PeerIP)
|
||||
} else {
|
||||
_, err = insertStmt.Exec(
|
||||
generateUUID().String(), update.PeerIP, update.PeerASN,
|
||||
update.Timestamp, update.Timestamp, update.MessageType)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update peer %s: %w", update.PeerIP, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = tx.Commit(); err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdatePeer updates or creates a BGP peer record
|
||||
func (d *Database) UpdatePeer(peerIP string, peerASN int, messageType string, timestamp time.Time) error {
|
||||
tx, err := d.beginTx()
|
||||
|
||||
@@ -22,6 +22,7 @@ type Stats struct {
|
||||
type Store interface {
|
||||
// ASN operations
|
||||
GetOrCreateASN(number int, timestamp time.Time) (*ASN, error)
|
||||
GetOrCreateASNBatch(asns map[int]time.Time) error
|
||||
|
||||
// Prefix operations
|
||||
GetOrCreatePrefix(prefix string, timestamp time.Time) (*Prefix, error)
|
||||
@@ -37,10 +38,13 @@ type Store interface {
|
||||
|
||||
// Peer operations
|
||||
UpdatePeer(peerIP string, peerASN int, messageType string, timestamp time.Time) error
|
||||
UpdatePeerBatch(peers map[string]PeerUpdate) error
|
||||
|
||||
// Live route operations
|
||||
UpsertLiveRoute(route *LiveRoute) error
|
||||
UpsertLiveRouteBatch(routes []*LiveRoute) error
|
||||
DeleteLiveRoute(prefix string, originASN int, peerIP string) error
|
||||
DeleteLiveRouteBatch(deletions []LiveRouteDeletion) error
|
||||
GetPrefixDistribution() (ipv4 []PrefixDistribution, ipv6 []PrefixDistribution, err error)
|
||||
GetLiveRouteCounts() (ipv4Count, ipv6Count int, err error)
|
||||
|
||||
|
||||
@@ -77,3 +77,18 @@ type ASInfo struct {
|
||||
LastUpdated time.Time `json:"last_updated"`
|
||||
Age string `json:"age"`
|
||||
}
|
||||
|
||||
// LiveRouteDeletion represents parameters for deleting a live route
|
||||
type LiveRouteDeletion struct {
|
||||
Prefix string
|
||||
OriginASN int
|
||||
PeerIP string
|
||||
}
|
||||
|
||||
// PeerUpdate represents parameters for updating a peer
|
||||
type PeerUpdate struct {
|
||||
PeerIP string
|
||||
PeerASN int
|
||||
MessageType string
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user