Compare commits

..

No commits in common. "main" and "fix-min-time-calculation" have entirely different histories.

38 changed files with 178959 additions and 4777 deletions

3
.gitignore vendored
View File

@ -6,9 +6,6 @@
*.dylib
/bin/
.DS_Store
log.txt
# Test binary, built with `go test -c`
*.test

View File

@ -1,65 +0,0 @@
# Build stage
FROM golang:1.24-bookworm AS builder
# Install build dependencies (zstd for archive, gcc for CGO/sqlite3)
RUN apt-get update && apt-get install -y --no-install-recommends \
zstd \
gcc \
libc6-dev \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /src
# Copy everything
COPY . .
# Vendor dependencies (must be after copying source)
RUN go mod download && go mod vendor
# Build the binary with CGO enabled (required for sqlite3)
RUN CGO_ENABLED=1 GOOS=linux go build -o /routewatch ./cmd/routewatch
# Create source archive with vendored dependencies
RUN tar --zstd -cf /routewatch-source.tar.zst \
--exclude='.git' \
--exclude='*.tar.zst' \
.
# Runtime stage
FROM debian:bookworm-slim
# Install runtime dependencies
# - ca-certificates: for HTTPS connections
# - curl: for health checks
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
curl \
&& rm -rf /var/lib/apt/lists/*
# Create non-root user
RUN useradd -r -u 1000 -m routewatch
RUN mkdir -p /var/lib/berlin.sneak.app.routewatch && chown routewatch:routewatch /var/lib/berlin.sneak.app.routewatch
RUN mkdir /app
WORKDIR /app
# Copy binary and source archive from builder
COPY --from=builder /routewatch /app/routewatch
COPY --from=builder /routewatch-source.tar.zst /app/source/routewatch-source.tar.zst
# Set ownership
RUN chown -R routewatch:routewatch /app
ENV XDG_DATA_HOME=/var/lib
# Expose HTTP port
EXPOSE 8080
COPY ./entrypoint.sh /entrypoint.sh
# Health check using the health endpoint
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
CMD curl -sf http://localhost:8080/.well-known/healthcheck.json || exit 1
ENTRYPOINT ["/bin/bash", "/entrypoint.sh" ]

View File

@ -1,11 +1,5 @@
export DEBUG = routewatch
# Git revision for version embedding
GIT_REVISION := $(shell git rev-parse HEAD 2>/dev/null || echo "unknown")
GIT_REVISION_SHORT := $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown")
VERSION_PKG := git.eeqj.de/sneak/routewatch/internal/version
LDFLAGS := -X $(VERSION_PKG).GitRevision=$(GIT_REVISION) -X $(VERSION_PKG).GitRevisionShort=$(GIT_REVISION_SHORT)
.PHONY: test fmt lint build clean run asupdate
all: test
@ -21,7 +15,7 @@ lint:
golangci-lint run
build:
CGO_ENABLED=1 go build -ldflags "$(LDFLAGS)" -o bin/routewatch cmd/routewatch/main.go
CGO_ENABLED=1 go build -o bin/routewatch cmd/routewatch/main.go
clean:
rm -rf bin/

194
README.md
View File

@ -1,194 +0,0 @@
# RouteWatch
RouteWatch is a real-time BGP routing table monitor that streams BGP UPDATE messages from the RIPE RIS Live service, maintains a live routing table in SQLite, and provides HTTP APIs for querying routing information.
## Features
- Real-time streaming of BGP updates from RIPE RIS Live
- Maintains live IPv4 and IPv6 routing tables
- Tracks AS peering relationships
- HTTP API for IP-to-AS lookups, prefix details, and AS information
- Automatic reconnection with exponential backoff
- Batched database writes for high performance
- Backpressure handling to prevent memory exhaustion
## Installation
```bash
go build -o routewatch ./cmd/routewatch
```
## Usage
```bash
# Run the daemon (listens on port 8080 by default)
./routewatch
# Set custom port
PORT=3000 ./routewatch
# Enable debug logging
DEBUG=routewatch ./routewatch
```
## HTTP Endpoints
### Web Interface
- `GET /` - Redirects to /status
- `GET /status` - HTML status dashboard
- `GET /status.json` - JSON statistics
- `GET /as/{asn}` - AS detail page (HTML)
- `GET /prefix/{prefix}` - Prefix detail page (HTML)
- `GET /prefixlength/{length}` - IPv4 prefixes by mask length
- `GET /prefixlength6/{length}` - IPv6 prefixes by mask length
- `GET /ip/{ip}` - Redirects to prefix containing the IP
### API v1
- `GET /api/v1/stats` - Detailed statistics with handler metrics
- `GET /api/v1/ip/{ip}` - Look up AS information for an IP address
- `GET /api/v1/as/{asn}` - Get prefixes announced by an AS
- `GET /api/v1/prefix/{prefix}` - Get routes for a specific prefix
## Code Structure
```
routewatch/
├── cmd/
│ ├── routewatch/ # Main daemon entry point
│ ├── asinfo-gen/ # Utility to generate AS info data
│ └── streamdumper/ # Debug utility for raw stream output
├── internal/
│ ├── routewatch/ # Core application logic
│ ├── server/ # HTTP server and handlers
│ ├── database/ # SQLite storage layer
│ ├── streamer/ # RIPE RIS Live client
│ ├── ristypes/ # BGP message data structures
│ ├── logger/ # Structured logging wrapper
│ ├── metrics/ # Performance metrics tracking
│ ├── config/ # Configuration management
│ └── templates/ # HTML templates
└── pkg/
└── asinfo/ # AS information lookup (public API)
```
## Architecture Overview
### Component Relationships
```
┌─────────────────────────────────────────────────────────────────┐
│ RouteWatch │
│ (internal/routewatch/app.go - main orchestrator) │
├─────────────────────────────────────────────────────────────────┤
│ │
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
│ │ Streamer │───▶│ Handlers │───▶│ Database │ │
│ │ │ │ │ │ │ │
│ │ RIS Live │ │ - ASHandler │ │ SQLite with │ │
│ │ WebSocket │ │ - PeerHandler│ │ WAL mode │ │
│ │ client │ │ - PrefixHdlr │ │ │ │
│ │ │ │ - PeeringHdlr│ │ Tables: │ │
│ └──────────────┘ └──────────────┘ │ - asns │ │
│ │ - prefixes │ │
│ ┌──────────────┐ ┌──────────────┐ │ - live_routes│ │
│ │ Server │───▶│ Handlers │───▶│ - peerings │ │
│ │ │ │ │ │ - bgp_peers │ │
│ │ Chi router │ │ Status, API │ └──────────────┘ │
│ │ port 8080 │ │ AS, Prefix │ │
│ └──────────────┘ └──────────────┘ │
│ │
└─────────────────────────────────────────────────────────────────┘
```
### Execution Flow
1. **Startup** (`cmd/routewatch/main.go` → `internal/routewatch/cli.go`)
- Uber fx dependency injection initializes all components
- Signal handlers registered for graceful shutdown
2. **Initialization** (`internal/routewatch/app.go`)
- Database created with SQLite schema (WAL mode, 3GB cache)
- Message handlers registered with the streamer
- HTTP server started on configured port
3. **Message Processing Pipeline**
```
RIS Live Stream → JSON Parser → Message Dispatcher → Handler Queues → Batch Writers → SQLite
```
- Streamer connects to `ris-live.ripe.net` via HTTP
- Parses BGP UPDATE messages from JSON stream
- Dispatches to registered handlers based on message type
- Each handler has its own queue with backpressure handling
- Handlers batch writes for efficiency (25K-30K ops, 1-2s timeout)
4. **Handler Details**
- **ASHandler**: Tracks all ASNs seen in AS paths
- **PeerHandler**: Records BGP peer information
- **PrefixHandler**: Maintains live routing table (upserts on announcement, deletes on withdrawal)
- **PeeringHandler**: Extracts AS peering relationships from AS paths
5. **HTTP Request Flow**
```
Request → Chi Router → Middleware (timeout, logging) → Handler → Database Query → Response
```
### Key Design Patterns
- **Batched Writes**: All database operations are batched for performance
- **Backpressure**: Probabilistic message dropping when queues exceed 50% capacity
- **Graceful Shutdown**: 60-second timeout, flushes all pending batches
- **Reconnection**: Exponential backoff (5s-320s) with reset after 30s of stable connection
- **IPv4 Optimization**: IP ranges stored as uint32 for O(1) lookups
### Database Schema
```sql
-- Core tables
asns(id, number, handle, description, first_seen, last_seen)
prefixes_v4(id, prefix, mask_length, first_seen, last_seen)
prefixes_v6(id, prefix, mask_length, first_seen, last_seen)
-- Live routing tables (one per IP version)
live_routes_v4(id, prefix, mask_length, origin_asn, peer_ip, as_path,
next_hop, last_updated, v4_ip_start, v4_ip_end)
live_routes_v6(id, prefix, mask_length, origin_asn, peer_ip, as_path,
next_hop, last_updated)
-- Relationship tracking
peerings(id, as_a, as_b, first_seen, last_seen)
bgp_peers(id, peer_ip, peer_asn, last_message_type, last_seen)
```
## Configuration
Configuration is handled via environment variables and OS-specific paths:
| Variable | Default | Description |
|----------|---------|-------------|
| `PORT` | `8080` | HTTP server port |
| `DEBUG` | (empty) | Set to `routewatch` for debug logging |
State directory (database location):
- macOS: `~/Library/Application Support/routewatch/`
- Linux: `/var/lib/routewatch/` or `~/.local/share/routewatch/`
## Development
```bash
# Run tests
make test
# Format code
make fmt
# Run linter
make lint
# Build
make
```
## License
See LICENSE file.

View File

@ -1,7 +0,0 @@
#!/bin/bash
cd /var/lib/berlin.sneak.app.routewatch
chown -R routewatch:routewatch .
chmod 700 .
exec runuser -u routewatch -- /app/routewatch

4
go.mod
View File

@ -3,18 +3,18 @@ module git.eeqj.de/sneak/routewatch
go 1.24.4
require (
github.com/dustin/go-humanize v1.0.1
github.com/go-chi/chi/v5 v5.2.2
github.com/google/uuid v1.6.0
github.com/mattn/go-sqlite3 v1.14.29
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9
go.uber.org/fx v1.24.0
golang.org/x/term v0.33.0
)
require (
github.com/dustin/go-humanize v1.0.1 // indirect
go.uber.org/dig v1.19.0 // indirect
go.uber.org/multierr v1.10.0 // indirect
go.uber.org/zap v1.26.0 // indirect
golang.org/x/sys v0.34.0 // indirect
golang.org/x/term v0.33.0 // indirect
)

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,3 @@
// Package database provides SQLite storage for BGP routing data including ASNs,
// prefixes, announcements, peerings, and live route tables.
package database
import (
@ -7,8 +5,7 @@ import (
"time"
)
// Stats contains database statistics including counts for ASNs, prefixes,
// peerings, peers, and live routes, as well as file size and prefix distribution data.
// Stats contains database statistics
type Stats struct {
ASNs int
Prefixes int
@ -18,15 +15,11 @@ type Stats struct {
Peers int
FileSizeBytes int64
LiveRoutes int
OldestRoute *time.Time
NewestRoute *time.Time
IPv4PrefixDistribution []PrefixDistribution
IPv6PrefixDistribution []PrefixDistribution
}
// Store defines the interface for database operations. It provides methods for
// managing ASNs, prefixes, announcements, peerings, BGP peers, and live routes.
// Implementations must be safe for concurrent use.
// Store defines the interface for database operations
type Store interface {
// ASN operations
GetOrCreateASN(number int, timestamp time.Time) (*ASN, error)
@ -34,7 +27,6 @@ type Store interface {
// Prefix operations
GetOrCreatePrefix(prefix string, timestamp time.Time) (*Prefix, error)
UpdatePrefixesBatch(prefixes map[string]time.Time) error
// Announcement operations
RecordAnnouncement(announcement *Announcement) error
@ -63,19 +55,10 @@ type Store interface {
// IP lookup operations
GetASInfoForIP(ip string) (*ASInfo, error)
GetASInfoForIPContext(ctx context.Context, ip string) (*ASInfo, error)
GetIPInfo(ip string) (*IPInfo, error)
GetIPInfoContext(ctx context.Context, ip string) (*IPInfo, error)
// ASN WHOIS operations
GetNextStaleASN(ctx context.Context, staleThreshold time.Duration) (int, error)
UpdateASNWHOIS(ctx context.Context, update *ASNWHOISUpdate) error
GetWHOISStats(ctx context.Context, staleThreshold time.Duration) (*WHOISStats, error)
// AS and prefix detail operations
GetASDetails(asn int) (*ASN, []LiveRoute, error)
GetASDetailsContext(ctx context.Context, asn int) (*ASN, []LiveRoute, error)
GetASPeers(asn int) ([]ASPeer, error)
GetASPeersContext(ctx context.Context, asn int) ([]ASPeer, error)
GetPrefixDetails(prefix string) ([]LiveRoute, error)
GetPrefixDetailsContext(ctx context.Context, prefix string) ([]LiveRoute, error)
GetRandomPrefixesByLength(maskLength, ipVersion, limit int) ([]LiveRoute, error)
@ -83,12 +66,6 @@ type Store interface {
// Lifecycle
Close() error
// Maintenance operations
Vacuum(ctx context.Context) error
Analyze(ctx context.Context) error
Checkpoint(ctx context.Context) error
Ping(ctx context.Context) error
}
// Ensure Database implements Store

View File

@ -1,4 +1,3 @@
// Package database provides SQLite storage for BGP routing data.
package database
import (
@ -7,34 +6,17 @@ import (
"github.com/google/uuid"
)
// ASN represents an Autonomous System Number with its metadata including
// handle, description, WHOIS data, and first/last seen timestamps.
// ASN represents an Autonomous System Number
type ASN struct {
ASN int `json:"asn"`
ID uuid.UUID `json:"id"`
Number int `json:"number"`
Handle string `json:"handle"`
Description string `json:"description"`
// WHOIS parsed fields
ASName string `json:"as_name,omitempty"`
OrgName string `json:"org_name,omitempty"`
OrgID string `json:"org_id,omitempty"`
Address string `json:"address,omitempty"`
CountryCode string `json:"country_code,omitempty"`
AbuseEmail string `json:"abuse_email,omitempty"`
AbusePhone string `json:"abuse_phone,omitempty"`
TechEmail string `json:"tech_email,omitempty"`
TechPhone string `json:"tech_phone,omitempty"`
RIR string `json:"rir,omitempty"` // ARIN, RIPE, APNIC, LACNIC, AFRINIC
RIRRegDate *time.Time `json:"rir_registration_date,omitempty"`
RIRLastMod *time.Time `json:"rir_last_modified,omitempty"`
WHOISRaw string `json:"whois_raw,omitempty"`
// Timestamps
FirstSeen time.Time `json:"first_seen"`
LastSeen time.Time `json:"last_seen"`
WHOISUpdatedAt *time.Time `json:"whois_updated_at,omitempty"`
}
// Prefix represents an IP prefix (CIDR block) with its IP version (4 or 6)
// and first/last seen timestamps.
// Prefix represents an IP prefix (CIDR block)
type Prefix struct {
ID uuid.UUID `json:"id"`
Prefix string `json:"prefix"`
@ -43,25 +25,23 @@ type Prefix struct {
LastSeen time.Time `json:"last_seen"`
}
// Announcement represents a BGP announcement or withdrawal event,
// containing the prefix, AS path, origin ASN, peer ASN, next hop, and timestamp.
// Announcement represents a BGP announcement
type Announcement struct {
ID uuid.UUID `json:"id"`
PrefixID uuid.UUID `json:"prefix_id"`
PeerASN int `json:"peer_asn"`
OriginASN int `json:"origin_asn"`
ASNID uuid.UUID `json:"asn_id"`
OriginASNID uuid.UUID `json:"origin_asn_id"`
Path string `json:"path"` // JSON-encoded AS path
NextHop string `json:"next_hop"`
Timestamp time.Time `json:"timestamp"`
IsWithdrawal bool `json:"is_withdrawal"`
}
// ASNPeering represents a peering relationship between two ASNs,
// stored with the lower ASN as ASA and the higher as ASB.
// ASNPeering represents a peering relationship between two ASNs
type ASNPeering struct {
ID uuid.UUID `json:"id"`
ASA int `json:"as_a"`
ASB int `json:"as_b"`
FromASNID uuid.UUID `json:"from_asn_id"`
ToASNID uuid.UUID `json:"to_asn_id"`
FirstSeen time.Time `json:"first_seen"`
LastSeen time.Time `json:"last_seen"`
}
@ -88,7 +68,7 @@ type PrefixDistribution struct {
Count int `json:"count"`
}
// ASInfo represents AS information for an IP lookup (legacy format)
// ASInfo represents AS information for an IP lookup
type ASInfo struct {
ASN int `json:"asn"`
Handle string `json:"handle"`
@ -98,38 +78,11 @@ type ASInfo struct {
Age string `json:"age"`
}
// IPInfo represents comprehensive IP information for the /ip endpoint
type IPInfo struct {
IP string `json:"ip"`
PTR []string `json:"ptr,omitempty"`
Netblock string `json:"netblock"`
MaskLength int `json:"mask_length"`
IPVersion int `json:"ip_version"`
NumPeers int `json:"num_peers"`
// AS information
ASN int `json:"asn"`
ASName string `json:"as_name,omitempty"`
Handle string `json:"handle,omitempty"`
Description string `json:"description,omitempty"`
OrgName string `json:"org_name,omitempty"`
OrgID string `json:"org_id,omitempty"`
Address string `json:"address,omitempty"`
CountryCode string `json:"country_code,omitempty"`
AbuseEmail string `json:"abuse_email,omitempty"`
RIR string `json:"rir,omitempty"`
// Timestamps
FirstSeen time.Time `json:"first_seen"`
LastSeen time.Time `json:"last_seen"`
// Indicates if WHOIS data needs refresh (not serialized)
NeedsWHOISRefresh bool `json:"-"`
}
// LiveRouteDeletion represents parameters for deleting a live route
type LiveRouteDeletion struct {
Prefix string
OriginASN int
PeerIP string
IPVersion int
}
// PeerUpdate represents parameters for updating a peer
@ -139,21 +92,3 @@ type PeerUpdate struct {
MessageType string
Timestamp time.Time
}
// ASNWHOISUpdate contains WHOIS data for updating an ASN record.
type ASNWHOISUpdate struct {
ASN int
ASName string
OrgName string
OrgID string
Address string
CountryCode string
AbuseEmail string
AbusePhone string
TechEmail string
TechPhone string
RIR string
RIRRegDate *time.Time
RIRLastMod *time.Time
WHOISRaw string
}

View File

@ -1,44 +1,16 @@
-- IMPORTANT: This is the ONLY place where schema changes should be made.
-- We do NOT support migrations. All schema changes MUST be in this file.
-- DO NOT make schema changes anywhere else in the codebase.
CREATE TABLE IF NOT EXISTS asns (
asn INTEGER PRIMARY KEY,
id TEXT PRIMARY KEY,
number INTEGER UNIQUE NOT NULL,
handle TEXT,
description TEXT,
-- WHOIS parsed fields
as_name TEXT,
org_name TEXT,
org_id TEXT,
address TEXT, -- full address (may be multi-line)
country_code TEXT,
abuse_email TEXT,
abuse_phone TEXT,
tech_email TEXT,
tech_phone TEXT,
rir TEXT, -- ARIN, RIPE, APNIC, LACNIC, AFRINIC
rir_registration_date DATETIME,
rir_last_modified DATETIME,
-- Raw WHOIS response
whois_raw TEXT, -- complete WHOIS response text
-- Timestamps
first_seen DATETIME NOT NULL,
last_seen DATETIME NOT NULL,
whois_updated_at DATETIME -- when we last fetched WHOIS data
);
-- IPv4 prefixes table
CREATE TABLE IF NOT EXISTS prefixes_v4 (
id TEXT PRIMARY KEY,
prefix TEXT UNIQUE NOT NULL,
first_seen DATETIME NOT NULL,
last_seen DATETIME NOT NULL
);
-- IPv6 prefixes table
CREATE TABLE IF NOT EXISTS prefixes_v6 (
CREATE TABLE IF NOT EXISTS prefixes (
id TEXT PRIMARY KEY,
prefix TEXT UNIQUE NOT NULL,
ip_version INTEGER NOT NULL, -- 4 for IPv4, 6 for IPv6
first_seen DATETIME NOT NULL,
last_seen DATETIME NOT NULL
);
@ -46,14 +18,15 @@ CREATE TABLE IF NOT EXISTS prefixes_v6 (
CREATE TABLE IF NOT EXISTS announcements (
id TEXT PRIMARY KEY,
prefix_id TEXT NOT NULL,
peer_asn INTEGER NOT NULL,
origin_asn INTEGER NOT NULL,
asn_id TEXT NOT NULL,
origin_asn_id TEXT NOT NULL,
path TEXT NOT NULL,
next_hop TEXT,
timestamp DATETIME NOT NULL,
is_withdrawal BOOLEAN NOT NULL DEFAULT 0,
FOREIGN KEY (peer_asn) REFERENCES asns(asn),
FOREIGN KEY (origin_asn) REFERENCES asns(asn)
FOREIGN KEY (prefix_id) REFERENCES prefixes(id),
FOREIGN KEY (asn_id) REFERENCES asns(id),
FOREIGN KEY (origin_asn_id) REFERENCES asns(id)
);
CREATE TABLE IF NOT EXISTS peerings (
@ -75,72 +48,49 @@ CREATE TABLE IF NOT EXISTS bgp_peers (
last_message_type TEXT
);
-- Indexes for prefixes_v4 table
CREATE INDEX IF NOT EXISTS idx_prefixes_v4_prefix ON prefixes_v4(prefix);
-- Indexes for prefixes_v6 table
CREATE INDEX IF NOT EXISTS idx_prefixes_v6_prefix ON prefixes_v6(prefix);
CREATE INDEX IF NOT EXISTS idx_prefixes_ip_version ON prefixes(ip_version);
CREATE INDEX IF NOT EXISTS idx_prefixes_version_prefix ON prefixes(ip_version, prefix);
CREATE INDEX IF NOT EXISTS idx_announcements_timestamp ON announcements(timestamp);
CREATE INDEX IF NOT EXISTS idx_announcements_prefix_id ON announcements(prefix_id);
CREATE INDEX IF NOT EXISTS idx_announcements_peer_asn ON announcements(peer_asn);
CREATE INDEX IF NOT EXISTS idx_announcements_origin_asn ON announcements(origin_asn);
CREATE INDEX IF NOT EXISTS idx_announcements_asn_id ON announcements(asn_id);
CREATE INDEX IF NOT EXISTS idx_peerings_as_a ON peerings(as_a);
CREATE INDEX IF NOT EXISTS idx_peerings_as_b ON peerings(as_b);
CREATE INDEX IF NOT EXISTS idx_peerings_lookup ON peerings(as_a, as_b);
-- Additional indexes for prefixes table
CREATE INDEX IF NOT EXISTS idx_prefixes_prefix ON prefixes(prefix);
-- Indexes for asns table
CREATE INDEX IF NOT EXISTS idx_asns_asn ON asns(asn);
CREATE INDEX IF NOT EXISTS idx_asns_whois_updated_at ON asns(whois_updated_at);
CREATE INDEX IF NOT EXISTS idx_asns_number ON asns(number);
-- Indexes for bgp_peers table
CREATE INDEX IF NOT EXISTS idx_bgp_peers_asn ON bgp_peers(peer_asn);
CREATE INDEX IF NOT EXISTS idx_bgp_peers_last_seen ON bgp_peers(last_seen);
CREATE INDEX IF NOT EXISTS idx_bgp_peers_ip ON bgp_peers(peer_ip);
-- IPv4 routing table maintained by PrefixHandler
CREATE TABLE IF NOT EXISTS live_routes_v4 (
-- Live routing table maintained by PrefixHandler
CREATE TABLE IF NOT EXISTS live_routes (
id TEXT PRIMARY KEY,
prefix TEXT NOT NULL,
mask_length INTEGER NOT NULL, -- CIDR mask length (0-32)
mask_length INTEGER NOT NULL, -- CIDR mask length (0-32 for IPv4, 0-128 for IPv6)
ip_version INTEGER NOT NULL, -- 4 or 6
origin_asn INTEGER NOT NULL,
peer_ip TEXT NOT NULL,
as_path TEXT NOT NULL, -- JSON array
next_hop TEXT NOT NULL,
last_updated DATETIME NOT NULL,
-- IPv4 range columns for fast lookups
ip_start INTEGER NOT NULL, -- Start of IPv4 range as 32-bit unsigned int
ip_end INTEGER NOT NULL, -- End of IPv4 range as 32-bit unsigned int
-- IPv4 range columns for fast lookups (NULL for IPv6)
v4_ip_start INTEGER, -- Start of IPv4 range as 32-bit unsigned int
v4_ip_end INTEGER, -- End of IPv4 range as 32-bit unsigned int
UNIQUE(prefix, origin_asn, peer_ip)
);
-- IPv6 routing table maintained by PrefixHandler
CREATE TABLE IF NOT EXISTS live_routes_v6 (
id TEXT PRIMARY KEY,
prefix TEXT NOT NULL,
mask_length INTEGER NOT NULL, -- CIDR mask length (0-128)
origin_asn INTEGER NOT NULL,
peer_ip TEXT NOT NULL,
as_path TEXT NOT NULL, -- JSON array
next_hop TEXT NOT NULL,
last_updated DATETIME NOT NULL,
-- Note: IPv6 doesn't use integer range columns
UNIQUE(prefix, origin_asn, peer_ip)
);
-- Indexes for live_routes_v4 table
CREATE INDEX IF NOT EXISTS idx_live_routes_v4_prefix ON live_routes_v4(prefix);
CREATE INDEX IF NOT EXISTS idx_live_routes_v4_mask_length ON live_routes_v4(mask_length);
CREATE INDEX IF NOT EXISTS idx_live_routes_v4_origin_asn ON live_routes_v4(origin_asn);
CREATE INDEX IF NOT EXISTS idx_live_routes_v4_last_updated ON live_routes_v4(last_updated);
-- Indexes for live_routes table
CREATE INDEX IF NOT EXISTS idx_live_routes_prefix ON live_routes(prefix);
CREATE INDEX IF NOT EXISTS idx_live_routes_mask_length ON live_routes(mask_length);
CREATE INDEX IF NOT EXISTS idx_live_routes_ip_version_mask ON live_routes(ip_version, mask_length);
CREATE INDEX IF NOT EXISTS idx_live_routes_last_updated ON live_routes(last_updated);
-- Indexes for IPv4 range queries
CREATE INDEX IF NOT EXISTS idx_live_routes_v4_ip_range ON live_routes_v4(ip_start, ip_end);
-- Index to optimize prefix distribution queries
CREATE INDEX IF NOT EXISTS idx_live_routes_v4_mask_prefix ON live_routes_v4(mask_length, prefix);
-- Indexes for live_routes_v6 table
CREATE INDEX IF NOT EXISTS idx_live_routes_v6_prefix ON live_routes_v6(prefix);
CREATE INDEX IF NOT EXISTS idx_live_routes_v6_mask_length ON live_routes_v6(mask_length);
CREATE INDEX IF NOT EXISTS idx_live_routes_v6_origin_asn ON live_routes_v6(origin_asn);
CREATE INDEX IF NOT EXISTS idx_live_routes_v6_last_updated ON live_routes_v6(last_updated);
-- Index to optimize prefix distribution queries
CREATE INDEX IF NOT EXISTS idx_live_routes_v6_mask_prefix ON live_routes_v6(mask_length, prefix);
CREATE INDEX IF NOT EXISTS idx_live_routes_ipv4_range ON live_routes(v4_ip_start, v4_ip_end) WHERE ip_version = 4;
-- Index to optimize COUNT(DISTINCT prefix) queries
CREATE INDEX IF NOT EXISTS idx_live_routes_ip_mask_prefix ON live_routes(ip_version, mask_length, prefix);

View File

@ -8,7 +8,7 @@ import (
"git.eeqj.de/sneak/routewatch/internal/logger"
)
const slowQueryThreshold = 25 * time.Millisecond
const slowQueryThreshold = 50 * time.Millisecond
// logSlowQuery logs queries that take longer than slowQueryThreshold
func logSlowQuery(logger *logger.Logger, query string, start time.Time) {

View File

@ -1,8 +1,4 @@
// Package logger provides a structured logger with source location tracking.
// It wraps the standard library's log/slog package and automatically enriches
// log messages with the file name, line number, and function name of the caller.
// The output format is automatically selected based on the runtime environment:
// human-readable text for terminals, JSON for non-terminal output.
// Package logger provides a structured logger with source location tracking
package logger
import (
@ -16,25 +12,17 @@ import (
"golang.org/x/term"
)
// Logger wraps slog.Logger to add automatic source location information
// to all log messages. It embeds slog.Logger and provides the same logging
// methods (Debug, Info, Warn, Error) but enriches each message with the
// file name, line number, and function name of the caller.
// Logger wraps slog.Logger to add source location information
type Logger struct {
*slog.Logger
}
// AsSlog returns the underlying slog.Logger for use with APIs that require
// a standard slog.Logger instance rather than the custom Logger type.
// AsSlog returns the underlying slog.Logger
func (l *Logger) AsSlog() *slog.Logger {
return l.Logger
}
// New creates a new Logger with an appropriate handler based on the runtime
// environment. If stdout is a terminal, it uses a human-readable text format;
// otherwise, it outputs JSON for structured log aggregation. The log level
// defaults to Info, but can be set to Debug by including "routewatch" in the
// DEBUG environment variable.
// New creates a new logger with appropriate handler based on environment
func New() *Logger {
level := slog.LevelInfo
if debug := os.Getenv("DEBUG"); strings.Contains(debug, "routewatch") {
@ -57,10 +45,7 @@ func New() *Logger {
return &Logger{Logger: slog.New(handler)}
}
// sourceSkipLevel defines the number of call stack frames to skip when
// determining the caller's source location. This accounts for the logger
// method itself and the getSourceAttrs helper function.
const sourceSkipLevel = 2
const sourceSkipLevel = 2 // Skip levels for source location tracking
// getSourceAttrs returns attributes for the calling source location
func getSourceAttrs() []slog.Attr {
@ -90,10 +75,7 @@ func getSourceAttrs() []slog.Attr {
return attrs
}
// Debug logs a message at debug level with automatic source location tracking.
// Additional structured attributes can be passed as key-value pairs in args.
// Debug messages are only output when the DEBUG environment variable contains
// "routewatch".
// Debug logs at debug level with source location
func (l *Logger) Debug(msg string, args ...any) {
sourceAttrs := getSourceAttrs()
allArgs := make([]any, 0, len(args)+len(sourceAttrs)*2)
@ -109,8 +91,7 @@ func (l *Logger) Debug(msg string, args ...any) {
l.Logger.Debug(msg, allArgs...)
}
// Info logs a message at info level with automatic source location tracking.
// Additional structured attributes can be passed as key-value pairs in args.
// Info logs at info level with source location
func (l *Logger) Info(msg string, args ...any) {
sourceAttrs := getSourceAttrs()
allArgs := make([]any, 0, len(args)+len(sourceAttrs)*2)
@ -126,8 +107,7 @@ func (l *Logger) Info(msg string, args ...any) {
l.Logger.Info(msg, allArgs...)
}
// Warn logs a message at warn level with automatic source location tracking.
// Additional structured attributes can be passed as key-value pairs in args.
// Warn logs at warn level with source location
func (l *Logger) Warn(msg string, args ...any) {
sourceAttrs := getSourceAttrs()
allArgs := make([]any, 0, len(args)+len(sourceAttrs)*2)
@ -143,8 +123,7 @@ func (l *Logger) Warn(msg string, args ...any) {
l.Logger.Warn(msg, allArgs...)
}
// Error logs a message at error level with automatic source location tracking.
// Additional structured attributes can be passed as key-value pairs in args.
// Error logs at error level with source location
func (l *Logger) Error(msg string, args ...any) {
sourceAttrs := getSourceAttrs()
allArgs := make([]any, 0, len(args)+len(sourceAttrs)*2)
@ -160,16 +139,12 @@ func (l *Logger) Error(msg string, args ...any) {
l.Logger.Error(msg, allArgs...)
}
// With returns a new Logger with additional structured attributes that will
// be included in all subsequent log messages. The args parameter accepts
// key-value pairs in the same format as the logging methods.
// With returns a new logger with additional attributes
func (l *Logger) With(args ...any) *Logger {
return &Logger{Logger: l.Logger.With(args...)}
}
// WithGroup returns a new Logger that adds the specified group name as a
// prefix to all attribute keys in subsequent log messages. This is useful
// for organizing related attributes under a common namespace.
// WithGroup returns a new logger with a group prefix
func (l *Logger) WithGroup(name string) *Logger {
return &Logger{Logger: l.Logger.WithGroup(name)}
}

View File

@ -15,29 +15,16 @@ type Tracker struct {
registry metrics.Registry
connectedSince time.Time
isConnected atomic.Bool
reconnectCount atomic.Uint64
// Stream metrics (decompressed data)
// Stream metrics
messageCounter metrics.Counter
byteCounter metrics.Counter
messageRate metrics.Meter
byteRate metrics.Meter
// Wire bytes metrics (actual bytes on the wire, before decompression)
wireByteCounter metrics.Counter
wireByteRate metrics.Meter
// Route update metrics
ipv4UpdateRate metrics.Meter
ipv6UpdateRate metrics.Meter
// Announcement/withdrawal metrics
announcementCounter metrics.Counter
withdrawalCounter metrics.Counter
churnRate metrics.Meter // combined announcements + withdrawals per second
// BGP peer tracking
bgpPeerCount atomic.Int32
}
// New creates a new metrics tracker
@ -50,33 +37,19 @@ func New() *Tracker {
byteCounter: metrics.NewCounter(),
messageRate: metrics.NewMeter(),
byteRate: metrics.NewMeter(),
wireByteCounter: metrics.NewCounter(),
wireByteRate: metrics.NewMeter(),
ipv4UpdateRate: metrics.NewMeter(),
ipv6UpdateRate: metrics.NewMeter(),
announcementCounter: metrics.NewCounter(),
withdrawalCounter: metrics.NewCounter(),
churnRate: metrics.NewMeter(),
}
}
// SetConnected updates the connection status
func (t *Tracker) SetConnected(connected bool) {
wasConnected := t.isConnected.Swap(connected)
t.isConnected.Store(connected)
if connected {
t.mu.Lock()
t.connectedSince = time.Now()
t.mu.Unlock()
// Increment reconnect count (but not for the initial connection)
if wasConnected || t.reconnectCount.Load() > 0 {
t.reconnectCount.Add(1)
}
}
}
// GetReconnectCount returns the number of reconnections since startup
func (t *Tracker) GetReconnectCount() uint64 {
return t.reconnectCount.Load()
}
// IsConnected returns the current connection status
@ -84,7 +57,7 @@ func (t *Tracker) IsConnected() bool {
return t.isConnected.Load()
}
// RecordMessage records a received message and its decompressed size
// RecordMessage records a received message and its size
func (t *Tracker) RecordMessage(bytes int64) {
t.messageCounter.Inc(1)
t.byteCounter.Inc(bytes)
@ -92,12 +65,6 @@ func (t *Tracker) RecordMessage(bytes int64) {
t.byteRate.Mark(bytes)
}
// RecordWireBytes records actual bytes received on the wire (before decompression)
func (t *Tracker) RecordWireBytes(bytes int64) {
t.wireByteCounter.Inc(bytes)
t.wireByteRate.Mark(bytes)
}
// GetStreamMetrics returns current streaming metrics
func (t *Tracker) GetStreamMetrics() StreamMetrics {
t.mu.RLock()
@ -109,29 +76,22 @@ func (t *Tracker) GetStreamMetrics() StreamMetrics {
// Safely convert counters to uint64
msgCount := t.messageCounter.Count()
byteCount := t.byteCounter.Count()
wireByteCount := t.wireByteCounter.Count()
var totalMessages, totalBytes, totalWireBytes uint64
var totalMessages, totalBytes uint64
if msgCount >= 0 {
totalMessages = uint64(msgCount)
}
if byteCount >= 0 {
totalBytes = uint64(byteCount)
}
if wireByteCount >= 0 {
totalWireBytes = uint64(wireByteCount)
}
return StreamMetrics{
TotalMessages: totalMessages,
TotalBytes: totalBytes,
TotalWireBytes: totalWireBytes,
ConnectedSince: connectedSince,
Connected: t.isConnected.Load(),
MessagesPerSec: t.messageRate.Rate1(),
BitsPerSec: t.byteRate.Rate1() * bitsPerByte,
WireBitsPerSec: t.wireByteRate.Rate1() * bitsPerByte,
ReconnectCount: t.reconnectCount.Load(),
}
}
@ -145,56 +105,6 @@ func (t *Tracker) RecordIPv6Update() {
t.ipv6UpdateRate.Mark(1)
}
// RecordAnnouncement records a route announcement
func (t *Tracker) RecordAnnouncement() {
t.announcementCounter.Inc(1)
t.churnRate.Mark(1)
}
// RecordWithdrawal records a route withdrawal
func (t *Tracker) RecordWithdrawal() {
t.withdrawalCounter.Inc(1)
t.churnRate.Mark(1)
}
// SetBGPPeerCount updates the current BGP peer count
func (t *Tracker) SetBGPPeerCount(count int) {
// BGP peer count is always small (< 1000), so int32 is safe
if count > 0 && count < 1<<31 {
t.bgpPeerCount.Store(int32(count)) //nolint:gosec // count is validated
}
}
// GetBGPPeerCount returns the current BGP peer count
func (t *Tracker) GetBGPPeerCount() int {
return int(t.bgpPeerCount.Load())
}
// GetAnnouncementCount returns the total announcement count
func (t *Tracker) GetAnnouncementCount() uint64 {
count := t.announcementCounter.Count()
if count < 0 {
return 0
}
return uint64(count)
}
// GetWithdrawalCount returns the total withdrawal count
func (t *Tracker) GetWithdrawalCount() uint64 {
count := t.withdrawalCounter.Count()
if count < 0 {
return 0
}
return uint64(count)
}
// GetChurnRate returns the route churn rate per second
func (t *Tracker) GetChurnRate() float64 {
return t.churnRate.Rate1()
}
// GetRouteMetrics returns current route update metrics
func (t *Tracker) GetRouteMetrics() RouteMetrics {
return RouteMetrics{
@ -205,30 +115,16 @@ func (t *Tracker) GetRouteMetrics() RouteMetrics {
// StreamMetrics contains streaming statistics
type StreamMetrics struct {
// TotalMessages is the total number of messages received since startup
TotalMessages uint64
// TotalBytes is the total number of decompressed bytes received since startup
TotalBytes uint64
// TotalWireBytes is the total number of bytes received on the wire (before decompression)
TotalWireBytes uint64
// ConnectedSince is the time when the current connection was established
ConnectedSince time.Time
// Connected indicates whether the stream is currently connected
Connected bool
// MessagesPerSec is the rate of messages received per second (1-minute average)
MessagesPerSec float64
// BitsPerSec is the rate of decompressed bits received per second (1-minute average)
BitsPerSec float64
// WireBitsPerSec is the rate of bits received on the wire per second (1-minute average)
WireBitsPerSec float64
// ReconnectCount is the number of reconnections since startup
ReconnectCount uint64
}
// RouteMetrics contains route update statistics
type RouteMetrics struct {
// IPv4UpdatesPerSec is the rate of IPv4 route updates per second (1-minute average)
IPv4UpdatesPerSec float64
// IPv6UpdatesPerSec is the rate of IPv6 route updates per second (1-minute average)
IPv6UpdatesPerSec float64
}

View File

@ -6,14 +6,10 @@ import (
"time"
)
// ASPath represents a BGP AS path as a slice of AS numbers.
// It handles JSON unmarshaling of both simple arrays and nested AS sets,
// flattening any nested structures into a single sequence of AS numbers.
// ASPath represents an AS path that may contain nested AS sets
type ASPath []int
// UnmarshalJSON implements the json.Unmarshaler interface for ASPath.
// It handles both simple integer arrays [1, 2, 3] and nested AS sets
// like [1, [2, 3], 4], flattening them into a single slice of integers.
// UnmarshalJSON implements custom JSON unmarshaling to flatten nested arrays
func (p *ASPath) UnmarshalJSON(data []byte) error {
// First try to unmarshal as a simple array of integers
var simple []int
@ -50,18 +46,13 @@ func (p *ASPath) UnmarshalJSON(data []byte) error {
return nil
}
// RISLiveMessage represents the outer wrapper message from the RIPE RIS Live stream.
// Each message contains a Type field indicating the message type and a Data field
// containing the actual BGP message payload.
// RISLiveMessage represents the outer wrapper from the RIS Live stream
type RISLiveMessage struct {
Type string `json:"type"`
Data RISMessage `json:"data"`
}
// RISMessage represents a BGP update message from the RIPE RIS Live stream.
// It contains metadata about the BGP session (peer, ASN, host) along with
// the actual BGP update data including AS path, communities, announcements,
// and withdrawals.
// RISMessage represents a message from the RIS Live stream
type RISMessage struct {
Type string `json:"type"`
Timestamp float64 `json:"timestamp"`
@ -83,9 +74,7 @@ type RISMessage struct {
Raw string `json:"raw,omitempty"`
}
// RISAnnouncement represents a BGP route announcement within a RIS message.
// It contains the next hop IP address and the list of prefixes being announced
// via that next hop.
// RISAnnouncement represents announcement data within a RIS message
type RISAnnouncement struct {
NextHop string `json:"next_hop"`
Prefixes []string `json:"prefixes"`

View File

@ -43,8 +43,6 @@ type RouteWatch struct {
peerHandler *PeerHandler
prefixHandler *PrefixHandler
peeringHandler *PeeringHandler
asnFetcher *ASNFetcher
dbMaintainer *DBMaintainer
}
// New creates a new RouteWatch instance
@ -111,15 +109,6 @@ func (rw *RouteWatch) Run(ctx context.Context) error {
return err
}
// Start ASN WHOIS fetcher for background updates
rw.asnFetcher = NewASNFetcher(rw.db, rw.logger.Logger)
rw.asnFetcher.Start()
rw.server.SetASNFetcher(rw.asnFetcher)
// Start database maintenance goroutine
rw.dbMaintainer = NewDBMaintainer(rw.db, rw.logger.Logger)
rw.dbMaintainer.Start()
// Wait for context cancellation
<-ctx.Done()
@ -155,16 +144,6 @@ func (rw *RouteWatch) Shutdown() {
rw.peeringHandler.Stop()
}
// Stop ASN WHOIS fetcher
if rw.asnFetcher != nil {
rw.asnFetcher.Stop()
}
// Stop database maintainer
if rw.dbMaintainer != nil {
rw.dbMaintainer.Stop()
}
// Stop services
rw.streamer.Stop()

View File

@ -61,7 +61,8 @@ func (m *mockStore) GetOrCreateASN(number int, timestamp time.Time) (*database.A
}
asn := &database.ASN{
ASN: number,
ID: uuid.New(),
Number: number,
FirstSeen: timestamp,
LastSeen: timestamp,
}
@ -71,37 +72,6 @@ func (m *mockStore) GetOrCreateASN(number int, timestamp time.Time) (*database.A
return asn, nil
}
// UpdatePrefixesBatch mock implementation
func (m *mockStore) UpdatePrefixesBatch(prefixes map[string]time.Time) error {
m.mu.Lock()
defer m.mu.Unlock()
for prefix, timestamp := range prefixes {
if p, exists := m.Prefixes[prefix]; exists {
p.LastSeen = timestamp
} else {
const (
ipVersionV4 = 4
ipVersionV6 = 6
)
ipVersion := ipVersionV4
if strings.Contains(prefix, ":") {
ipVersion = ipVersionV6
}
m.Prefixes[prefix] = &database.Prefix{
ID: uuid.New(),
Prefix: prefix,
IPVersion: ipVersion,
FirstSeen: timestamp,
LastSeen: timestamp,
}
}
}
return nil
}
// GetOrCreatePrefix mock implementation
func (m *mockStore) GetOrCreatePrefix(prefix string, timestamp time.Time) (*database.Prefix, error) {
m.mu.Lock()
@ -291,63 +261,6 @@ func (m *mockStore) GetRandomPrefixesByLengthContext(ctx context.Context, maskLe
return m.GetRandomPrefixesByLength(maskLength, ipVersion, limit)
}
// GetASPeers mock implementation
func (m *mockStore) GetASPeers(asn int) ([]database.ASPeer, error) {
// Return empty peers for now
return []database.ASPeer{}, nil
}
// GetASPeersContext mock implementation with context support
func (m *mockStore) GetASPeersContext(ctx context.Context, asn int) ([]database.ASPeer, error) {
return m.GetASPeers(asn)
}
// GetIPInfo mock implementation
func (m *mockStore) GetIPInfo(ip string) (*database.IPInfo, error) {
return m.GetIPInfoContext(context.Background(), ip)
}
// GetIPInfoContext mock implementation with context support
func (m *mockStore) GetIPInfoContext(ctx context.Context, ip string) (*database.IPInfo, error) {
now := time.Now()
return &database.IPInfo{
IP: ip,
Netblock: "8.8.8.0/24",
MaskLength: 24,
IPVersion: 4,
NumPeers: 3,
ASN: 15169,
Handle: "GOOGLE",
Description: "Google LLC",
CountryCode: "US",
FirstSeen: now.Add(-24 * time.Hour),
LastSeen: now,
}, nil
}
// GetNextStaleASN mock implementation
func (m *mockStore) GetNextStaleASN(ctx context.Context, staleThreshold time.Duration) (int, error) {
return 0, database.ErrNoStaleASN
}
// UpdateASNWHOIS mock implementation
func (m *mockStore) UpdateASNWHOIS(ctx context.Context, update *database.ASNWHOISUpdate) error {
return nil
}
// GetWHOISStats mock implementation
func (m *mockStore) GetWHOISStats(ctx context.Context, staleThreshold time.Duration) (*database.WHOISStats, error) {
m.mu.Lock()
defer m.mu.Unlock()
return &database.WHOISStats{
TotalASNs: len(m.ASNs),
FreshASNs: 0,
StaleASNs: 0,
NeverFetched: len(m.ASNs),
}, nil
}
// UpsertLiveRouteBatch mock implementation
func (m *mockStore) UpsertLiveRouteBatch(routes []*database.LiveRoute) error {
m.mu.Lock()
@ -389,7 +302,8 @@ func (m *mockStore) GetOrCreateASNBatch(asns map[int]time.Time) error {
for number, timestamp := range asns {
if _, exists := m.ASNs[number]; !exists {
m.ASNs[number] = &database.ASN{
ASN: number,
ID: uuid.New(),
Number: number,
FirstSeen: timestamp,
LastSeen: timestamp,
}
@ -405,26 +319,6 @@ func (m *mockStore) UpdatePeerBatch(peers map[string]database.PeerUpdate) error
return nil
}
// Vacuum mock implementation
func (m *mockStore) Vacuum(ctx context.Context) error {
return nil
}
// Analyze mock implementation
func (m *mockStore) Analyze(ctx context.Context) error {
return nil
}
// Checkpoint mock implementation
func (m *mockStore) Checkpoint(ctx context.Context) error {
return nil
}
// Ping mock implementation
func (m *mockStore) Ping(ctx context.Context) error {
return nil
}
func TestRouteWatchLiveFeed(t *testing.T) {
// Create mock database

View File

@ -22,10 +22,7 @@ const (
asnBatchTimeout = 2 * time.Second
)
// ASHandler processes Autonomous System Number (ASN) information extracted from
// BGP UPDATE messages. It uses batched database operations to efficiently store
// ASN data, collecting operations into batches that are flushed either when the
// batch reaches a size threshold or after a timeout period.
// ASHandler handles ASN information from BGP messages using batched operations
type ASHandler struct {
db database.Store
logger *logger.Logger
@ -43,11 +40,7 @@ type asnOp struct {
timestamp time.Time
}
// NewASHandler creates and returns a new ASHandler instance. It initializes
// the batching system and starts a background goroutine that periodically
// flushes accumulated ASN operations to the database. The caller must call
// Stop when finished to ensure all pending operations are flushed and the
// background goroutine is terminated.
// NewASHandler creates a new batched ASN handler
func NewASHandler(db database.Store, logger *logger.Logger) *ASHandler {
h := &ASHandler{
db: db,
@ -64,27 +57,19 @@ func NewASHandler(db database.Store, logger *logger.Logger) *ASHandler {
return h
}
// WantsMessage reports whether this handler should process messages of the
// given type. ASHandler only processes "UPDATE" messages, as these contain
// the AS path information needed to track autonomous systems.
// WantsMessage returns true if this handler wants to process messages of the given type
func (h *ASHandler) WantsMessage(messageType string) bool {
// We only care about UPDATE messages for the database
return messageType == "UPDATE"
}
// QueueCapacity returns the recommended message queue size for this handler.
// ASHandler uses a large queue capacity to accommodate high-volume BGP streams,
// as the batching mechanism allows efficient processing of accumulated messages.
// QueueCapacity returns the desired queue capacity for this handler
func (h *ASHandler) QueueCapacity() int {
// Batching allows us to use a larger queue
return asHandlerQueueSize
}
// HandleMessage processes a RIS Live BGP message by extracting all ASNs from
// the AS path and queuing them for batch insertion into the database. The
// origin ASN (last element in the path) and all transit ASNs are recorded
// with their associated timestamps. The batch is automatically flushed when
// it reaches the configured size threshold.
// HandleMessage processes a RIS message and queues database operations
func (h *ASHandler) HandleMessage(msg *ristypes.RISMessage) {
// Use the pre-parsed timestamp
timestamp := msg.ParsedTimestamp
@ -171,11 +156,7 @@ func (h *ASHandler) flushBatchLocked() {
h.lastFlush = time.Now()
}
// Stop gracefully shuts down the ASHandler by signaling the background flush
// goroutine to terminate and waiting for it to complete. Any pending ASN
// operations in the current batch are flushed to the database before Stop
// returns. This method should be called during application shutdown to ensure
// no data is lost.
// Stop gracefully stops the handler and flushes remaining batches
func (h *ASHandler) Stop() {
close(h.stopCh)
h.wg.Wait()

View File

@ -1,325 +0,0 @@
// Package routewatch contains the ASN WHOIS fetcher for background updates.
package routewatch
import (
"context"
"log/slog"
"sync"
"time"
"git.eeqj.de/sneak/routewatch/internal/database"
"git.eeqj.de/sneak/routewatch/internal/server"
"git.eeqj.de/sneak/routewatch/internal/whois"
)
// ASN fetcher configuration constants.
const (
// baseInterval is the starting interval between fetch attempts.
baseInterval = 15 * time.Second
// minInterval is the minimum interval after successes (rate limit).
minInterval = 1 * time.Second
// maxInterval is the maximum interval after failures (backoff cap).
maxInterval = 5 * time.Minute
// backoffMultiplier is how much to multiply interval on failure.
backoffMultiplier = 2
// whoisStaleThreshold is how old WHOIS data can be before refresh.
whoisStaleThreshold = 30 * 24 * time.Hour // 30 days
// immediateQueueSize is the buffer size for immediate fetch requests.
immediateQueueSize = 100
// statsWindow is how long to keep stats for.
statsWindow = time.Hour
)
// ASNFetcher handles background WHOIS lookups for ASNs.
type ASNFetcher struct {
db database.Store
whoisClient *whois.Client
logger *slog.Logger
immediateQueue chan int
stopCh chan struct{}
wg sync.WaitGroup
// fetchMu ensures only one fetch runs at a time
fetchMu sync.Mutex
// interval tracking with mutex protection
intervalMu sync.Mutex
currentInterval time.Duration
consecutiveFails int
// hourly stats tracking
statsMu sync.Mutex
successTimes []time.Time
errorTimes []time.Time
}
// NewASNFetcher creates a new ASN fetcher.
func NewASNFetcher(db database.Store, logger *slog.Logger) *ASNFetcher {
return &ASNFetcher{
db: db,
whoisClient: whois.NewClient(),
logger: logger.With("component", "asn_fetcher"),
immediateQueue: make(chan int, immediateQueueSize),
stopCh: make(chan struct{}),
currentInterval: baseInterval,
successTimes: make([]time.Time, 0),
errorTimes: make([]time.Time, 0),
}
}
// Start begins the background ASN fetcher goroutine.
func (f *ASNFetcher) Start() {
f.wg.Add(1)
go f.run()
f.logger.Info("ASN fetcher started",
"base_interval", baseInterval,
"min_interval", minInterval,
"max_interval", maxInterval,
)
}
// Stop gracefully shuts down the fetcher.
func (f *ASNFetcher) Stop() {
close(f.stopCh)
f.wg.Wait()
f.logger.Info("ASN fetcher stopped")
}
// QueueImmediate queues an ASN for immediate WHOIS lookup.
// Non-blocking - if queue is full, the request is dropped.
func (f *ASNFetcher) QueueImmediate(asn int) {
select {
case f.immediateQueue <- asn:
f.logger.Debug("Queued immediate WHOIS lookup", "asn", asn)
default:
f.logger.Debug("Immediate queue full, dropping request", "asn", asn)
}
}
// GetStats returns statistics about fetcher activity.
func (f *ASNFetcher) GetStats() server.ASNFetcherStats {
f.statsMu.Lock()
defer f.statsMu.Unlock()
f.intervalMu.Lock()
interval := f.currentInterval
fails := f.consecutiveFails
f.intervalMu.Unlock()
// Prune old entries and count
cutoff := time.Now().Add(-statsWindow)
f.successTimes = pruneOldTimes(f.successTimes, cutoff)
f.errorTimes = pruneOldTimes(f.errorTimes, cutoff)
return server.ASNFetcherStats{
SuccessesLastHour: len(f.successTimes),
ErrorsLastHour: len(f.errorTimes),
CurrentInterval: interval,
ConsecutiveFails: fails,
}
}
// pruneOldTimes removes times older than cutoff and returns the pruned slice.
func pruneOldTimes(times []time.Time, cutoff time.Time) []time.Time {
result := make([]time.Time, 0, len(times))
for _, t := range times {
if t.After(cutoff) {
result = append(result, t)
}
}
return result
}
// getInterval returns the current fetch interval.
func (f *ASNFetcher) getInterval() time.Duration {
f.intervalMu.Lock()
defer f.intervalMu.Unlock()
return f.currentInterval
}
// recordSuccess decreases the interval on successful fetch.
func (f *ASNFetcher) recordSuccess() {
f.intervalMu.Lock()
f.consecutiveFails = 0
// Decrease interval by half, but not below minimum
newInterval := f.currentInterval / backoffMultiplier
if newInterval < minInterval {
newInterval = minInterval
}
if newInterval != f.currentInterval {
f.logger.Debug("Decreased fetch interval",
"old_interval", f.currentInterval,
"new_interval", newInterval,
)
f.currentInterval = newInterval
}
f.intervalMu.Unlock()
// Record success time for stats
f.statsMu.Lock()
f.successTimes = append(f.successTimes, time.Now())
f.statsMu.Unlock()
}
// recordFailure increases the interval on failed fetch using exponential backoff.
func (f *ASNFetcher) recordFailure() {
f.intervalMu.Lock()
f.consecutiveFails++
// Exponential backoff: multiply by 2, capped at max
newInterval := f.currentInterval * backoffMultiplier
if newInterval > maxInterval {
newInterval = maxInterval
}
if newInterval != f.currentInterval {
f.logger.Debug("Increased fetch interval due to failure",
"old_interval", f.currentInterval,
"new_interval", newInterval,
"consecutive_failures", f.consecutiveFails,
)
f.currentInterval = newInterval
}
f.intervalMu.Unlock()
// Record error time for stats
f.statsMu.Lock()
f.errorTimes = append(f.errorTimes, time.Now())
f.statsMu.Unlock()
}
// run is the main background loop.
func (f *ASNFetcher) run() {
defer f.wg.Done()
timer := time.NewTimer(f.getInterval())
defer timer.Stop()
for {
select {
case <-f.stopCh:
return
case asn := <-f.immediateQueue:
// Process immediate request (respects lock)
f.tryFetch(asn)
// Reset timer after immediate fetch
timer.Reset(f.getInterval())
case <-timer.C:
// Background fetch of stale/missing ASN
f.fetchNextStale()
// Reset timer with potentially updated interval
timer.Reset(f.getInterval())
}
}
}
// tryFetch attempts to fetch and update an ASN, respecting the fetch lock.
// Returns true if fetch was successful.
func (f *ASNFetcher) tryFetch(asn int) bool {
// Try to acquire lock, skip if another fetch is running
if !f.fetchMu.TryLock() {
f.logger.Debug("Skipping fetch, another fetch in progress", "asn", asn)
return false
}
defer f.fetchMu.Unlock()
return f.fetchAndUpdate(asn)
}
// fetchNextStale finds and fetches the next ASN needing WHOIS data.
func (f *ASNFetcher) fetchNextStale() {
// Try to acquire lock, skip if another fetch is running
if !f.fetchMu.TryLock() {
f.logger.Debug("Skipping stale fetch, another fetch in progress")
return
}
defer f.fetchMu.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
asn, err := f.db.GetNextStaleASN(ctx, whoisStaleThreshold)
if err != nil {
if err != database.ErrNoStaleASN {
f.logger.Error("Failed to get stale ASN", "error", err)
f.recordFailure()
}
// No stale ASN is not a failure, just nothing to do
return
}
f.fetchAndUpdate(asn)
}
// fetchAndUpdate performs a WHOIS lookup and updates the database.
// Returns true if successful.
func (f *ASNFetcher) fetchAndUpdate(asn int) bool {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
f.logger.Info("Fetching WHOIS data", "asn", asn)
info, err := f.whoisClient.LookupASN(ctx, asn)
if err != nil {
f.logger.Error("WHOIS lookup failed", "asn", asn, "error", err)
f.recordFailure()
return false
}
// Update database with WHOIS data
err = f.db.UpdateASNWHOIS(ctx, &database.ASNWHOISUpdate{
ASN: asn,
ASName: info.ASName,
OrgName: info.OrgName,
OrgID: info.OrgID,
Address: info.Address,
CountryCode: info.CountryCode,
AbuseEmail: info.AbuseEmail,
AbusePhone: info.AbusePhone,
TechEmail: info.TechEmail,
TechPhone: info.TechPhone,
RIR: info.RIR,
RIRRegDate: info.RegDate,
RIRLastMod: info.LastMod,
WHOISRaw: info.RawResponse,
})
if err != nil {
f.logger.Error("Failed to update ASN WHOIS data", "asn", asn, "error", err)
f.recordFailure()
return false
}
f.recordSuccess()
f.logger.Info("Updated ASN WHOIS data",
"asn", asn,
"org_name", info.OrgName,
"country", info.CountryCode,
"rir", info.RIR,
"next_interval", f.getInterval(),
)
return true
}
// GetStaleThreshold returns the WHOIS stale threshold duration.
func GetStaleThreshold() time.Duration {
return whoisStaleThreshold
}

View File

@ -53,11 +53,7 @@ func logDebugStats(logger *logger.Logger) {
}
}
// CLIEntry is the main entry point for the routewatch command-line interface.
// It initializes the application using the fx dependency injection framework,
// sets up signal handling for graceful shutdown, and starts the RouteWatch service.
// This function blocks until the application receives a shutdown signal or encounters
// a fatal error.
// CLIEntry is the main entry point for the CLI
func CLIEntry() {
app := fx.New(
getModule(),

View File

@ -1,189 +0,0 @@
// Package routewatch contains the database maintainer for background maintenance tasks.
package routewatch
import (
"context"
"log/slog"
"sync"
"time"
"git.eeqj.de/sneak/routewatch/internal/database"
)
// Database maintenance configuration constants.
const (
// checkpointInterval is how often to run WAL checkpoint.
// Frequent checkpoints keep the WAL small, improving read performance.
// Under heavy write load, we need aggressive checkpointing.
checkpointInterval = 5 * time.Second
// vacuumInterval is how often to run incremental vacuum.
// Since incremental vacuum only frees ~1000 pages (~4MB) per run,
// we run it frequently to keep up with deletions.
vacuumInterval = 10 * time.Minute
// analyzeInterval is how often to run ANALYZE.
analyzeInterval = 1 * time.Hour
// checkpointTimeout is the max time for WAL checkpoint.
checkpointTimeout = 10 * time.Second
// vacuumTimeout is the max time for incremental vacuum (should be quick).
vacuumTimeout = 30 * time.Second
// analyzeTimeout is the max time for ANALYZE.
analyzeTimeout = 5 * time.Minute
)
// DBMaintainer handles background database maintenance tasks.
type DBMaintainer struct {
db database.Store
logger *slog.Logger
stopCh chan struct{}
wg sync.WaitGroup
// Stats tracking
statsMu sync.Mutex
lastCheckpoint time.Time
lastVacuum time.Time
lastAnalyze time.Time
checkpointCount int
vacuumCount int
analyzeCount int
lastCheckpointError error
lastVacuumError error
lastAnalyzeError error
}
// NewDBMaintainer creates a new database maintainer.
func NewDBMaintainer(db database.Store, logger *slog.Logger) *DBMaintainer {
return &DBMaintainer{
db: db,
logger: logger.With("component", "db_maintainer"),
stopCh: make(chan struct{}),
}
}
// Start begins the background maintenance goroutine.
func (m *DBMaintainer) Start() {
m.wg.Add(1)
go m.run()
m.logger.Info("Database maintainer started",
"checkpoint_interval", checkpointInterval,
"vacuum_interval", vacuumInterval,
"analyze_interval", analyzeInterval,
)
}
// Stop gracefully shuts down the maintainer.
func (m *DBMaintainer) Stop() {
close(m.stopCh)
m.wg.Wait()
m.logger.Info("Database maintainer stopped")
}
// run is the main background loop.
func (m *DBMaintainer) run() {
defer m.wg.Done()
// Use different timers for each task
checkpointTimer := time.NewTimer(checkpointInterval)
vacuumTimer := time.NewTimer(vacuumInterval)
analyzeTimer := time.NewTimer(analyzeInterval)
defer checkpointTimer.Stop()
defer vacuumTimer.Stop()
defer analyzeTimer.Stop()
for {
select {
case <-m.stopCh:
return
case <-checkpointTimer.C:
m.runCheckpoint()
checkpointTimer.Reset(checkpointInterval)
case <-vacuumTimer.C:
m.runVacuum()
vacuumTimer.Reset(vacuumInterval)
case <-analyzeTimer.C:
m.runAnalyze()
analyzeTimer.Reset(analyzeInterval)
}
}
}
// runCheckpoint performs a WAL checkpoint to keep the WAL file small.
func (m *DBMaintainer) runCheckpoint() {
ctx, cancel := context.WithTimeout(context.Background(), checkpointTimeout)
defer cancel()
startTime := time.Now()
err := m.db.Checkpoint(ctx)
m.statsMu.Lock()
m.lastCheckpoint = time.Now()
m.lastCheckpointError = err
if err == nil {
m.checkpointCount++
}
m.statsMu.Unlock()
if err != nil {
m.logger.Error("WAL checkpoint failed", "error", err, "duration", time.Since(startTime))
} else {
m.logger.Debug("WAL checkpoint completed", "duration", time.Since(startTime))
}
}
// runVacuum performs an incremental vacuum operation on the database.
func (m *DBMaintainer) runVacuum() {
ctx, cancel := context.WithTimeout(context.Background(), vacuumTimeout)
defer cancel()
m.logger.Debug("Running incremental vacuum")
startTime := time.Now()
err := m.db.Vacuum(ctx)
m.statsMu.Lock()
m.lastVacuum = time.Now()
m.lastVacuumError = err
if err == nil {
m.vacuumCount++
}
m.statsMu.Unlock()
if err != nil {
m.logger.Error("Incremental vacuum failed", "error", err, "duration", time.Since(startTime))
} else {
m.logger.Debug("Incremental vacuum completed", "duration", time.Since(startTime))
}
}
// runAnalyze performs an ANALYZE operation on the database.
func (m *DBMaintainer) runAnalyze() {
ctx, cancel := context.WithTimeout(context.Background(), analyzeTimeout)
defer cancel()
m.logger.Info("Starting database ANALYZE")
startTime := time.Now()
err := m.db.Analyze(ctx)
m.statsMu.Lock()
m.lastAnalyze = time.Now()
m.lastAnalyzeError = err
if err == nil {
m.analyzeCount++
}
m.statsMu.Unlock()
if err != nil {
m.logger.Error("ANALYZE failed", "error", err, "duration", time.Since(startTime))
} else {
m.logger.Info("ANALYZE completed", "duration", time.Since(startTime))
}
}

View File

@ -5,20 +5,14 @@ import (
"git.eeqj.de/sneak/routewatch/internal/ristypes"
)
// SimpleHandler is a basic implementation of streamer.MessageHandler that
// filters messages by type and delegates processing to a callback function.
// It provides a simple way to handle specific RIS message types without
// implementing the full MessageHandler interface from scratch.
// SimpleHandler is a basic implementation of streamer.MessageHandler
type SimpleHandler struct {
logger *logger.Logger
messageTypes []string
callback func(*ristypes.RISMessage)
}
// NewSimpleHandler creates a new SimpleHandler that accepts specific message types.
// The messageTypes parameter specifies which RIS message types this handler will process.
// If messageTypes is empty, the handler will accept all message types.
// The callback function is invoked for each message that passes the type filter.
// NewSimpleHandler creates a handler that accepts specific message types
func NewSimpleHandler(
logger *logger.Logger,
messageTypes []string,
@ -31,9 +25,7 @@ func NewSimpleHandler(
}
}
// WantsMessage returns true if this handler wants to process messages of the given type.
// It checks whether messageType is in the handler's configured list of accepted types.
// If no specific types were configured (empty messageTypes slice), it returns true for all types.
// WantsMessage returns true if this handler wants to process messages of the given type
func (h *SimpleHandler) WantsMessage(messageType string) bool {
// If no specific types are set, accept all messages
if len(h.messageTypes) == 0 {
@ -49,8 +41,7 @@ func (h *SimpleHandler) WantsMessage(messageType string) bool {
return false
}
// HandleMessage processes a RIS message by invoking the configured callback function.
// If no callback was provided during construction, the message is silently ignored.
// HandleMessage processes a RIS message
func (h *SimpleHandler) HandleMessage(msg *ristypes.RISMessage) {
if h.callback != nil {
h.callback(msg)

View File

@ -1,8 +1,5 @@
package routewatch
// peerhandler.go provides batched peer tracking functionality for BGP route monitoring.
// It tracks BGP peers from all incoming RIS messages and maintains peer state in the database.
import (
"strconv"
"sync"
@ -24,10 +21,7 @@ const (
peerBatchTimeout = 2 * time.Second
)
// PeerHandler tracks BGP peers from all message types using batched operations.
// It maintains a queue of peer updates and periodically flushes them to the database
// in batches to improve performance. The handler deduplicates peer updates within
// each batch, keeping only the most recent update for each peer IP address.
// PeerHandler tracks BGP peers from all message types using batched operations
type PeerHandler struct {
db database.Store
logger *logger.Logger
@ -47,10 +41,7 @@ type peerUpdate struct {
timestamp time.Time
}
// NewPeerHandler creates a new PeerHandler with the given database store and logger.
// It initializes the peer batch buffer and starts a background goroutine that
// periodically flushes accumulated peer updates to the database. The handler
// should be stopped by calling Stop when it is no longer needed.
// NewPeerHandler creates a new batched peer tracking handler
func NewPeerHandler(db database.Store, logger *logger.Logger) *PeerHandler {
h := &PeerHandler{
db: db,
@ -67,25 +58,18 @@ func NewPeerHandler(db database.Store, logger *logger.Logger) *PeerHandler {
return h
}
// WantsMessage returns true for all message types since peer information
// is extracted from every RIS message regardless of type. This satisfies
// the MessageHandler interface.
// WantsMessage returns true for all message types since we track peers from all messages
func (h *PeerHandler) WantsMessage(_ string) bool {
return true
}
// QueueCapacity returns the desired queue capacity for this handler.
// The PeerHandler uses a large queue capacity because batching allows
// for efficient processing of many updates at once.
// QueueCapacity returns the desired queue capacity for this handler
func (h *PeerHandler) QueueCapacity() int {
// Batching allows us to use a larger queue
return peerHandlerQueueSize
}
// HandleMessage processes a RIS message to track peer information.
// It extracts the peer IP address and ASN from the message and adds
// the update to an internal batch. When the batch reaches peerBatchSize
// or the batch timeout expires, the batch is flushed to the database.
// HandleMessage processes a message to track peer information
func (h *PeerHandler) HandleMessage(msg *ristypes.RISMessage) {
// Parse peer ASN from string
peerASN := 0

View File

@ -11,36 +11,23 @@ import (
)
const (
// peeringHandlerQueueSize defines the buffer capacity for the peering
// handler's message queue. This should be large enough to handle bursts
// of BGP UPDATE messages without blocking.
// peeringHandlerQueueSize is the queue capacity for peering operations
peeringHandlerQueueSize = 100000
// minPathLengthForPeering specifies the minimum number of ASNs required
// in a BGP AS path to extract peering relationships. A path with fewer
// than 2 ASNs cannot contain any peering information.
// minPathLengthForPeering is the minimum AS path length to extract peerings
minPathLengthForPeering = 2
// pathExpirationTime determines how long AS paths are kept in memory
// before being eligible for pruning. Paths older than this are removed
// to prevent unbounded memory growth.
// pathExpirationTime is how long to keep AS paths in memory
pathExpirationTime = 30 * time.Minute
// peeringProcessInterval controls how frequently the handler processes
// accumulated AS paths and extracts peering relationships to store
// in the database.
peeringProcessInterval = 30 * time.Second
// peeringProcessInterval is how often to process AS paths into peerings
peeringProcessInterval = 2 * time.Minute
// pathPruneInterval determines how often the handler checks for and
// removes expired AS paths from memory.
// pathPruneInterval is how often to prune old AS paths
pathPruneInterval = 5 * time.Minute
)
// PeeringHandler processes BGP UPDATE messages to extract and track
// AS peering relationships. It accumulates AS paths in memory and
// periodically processes them to extract unique peering pairs, which
// are then stored in the database. The handler implements the Handler
// interface for integration with the message processing pipeline.
// PeeringHandler handles AS peering relationships from BGP path data
type PeeringHandler struct {
db database.Store
logger *logger.Logger
@ -52,11 +39,7 @@ type PeeringHandler struct {
stopCh chan struct{}
}
// NewPeeringHandler creates and initializes a new PeeringHandler with the
// provided database store and logger. It starts two background goroutines:
// one for periodic processing of accumulated AS paths into peering records,
// and one for pruning expired paths from memory. The handler begins
// processing immediately upon creation.
// NewPeeringHandler creates a new batched peering handler
func NewPeeringHandler(db database.Store, logger *logger.Logger) *PeeringHandler {
h := &PeeringHandler{
db: db,
@ -72,25 +55,18 @@ func NewPeeringHandler(db database.Store, logger *logger.Logger) *PeeringHandler
return h
}
// WantsMessage reports whether the handler should receive messages of the
// given type. PeeringHandler only processes UPDATE messages, as these contain
// the AS path information needed to extract peering relationships.
// WantsMessage returns true if this handler wants to process messages of the given type
func (h *PeeringHandler) WantsMessage(messageType string) bool {
// We only care about UPDATE messages that have AS paths
return messageType == "UPDATE"
}
// QueueCapacity returns the buffer size for the handler's message queue.
// This value is used by the message dispatcher to allocate the channel
// buffer when registering the handler.
// QueueCapacity returns the desired queue capacity for this handler
func (h *PeeringHandler) QueueCapacity() int {
return peeringHandlerQueueSize
}
// HandleMessage processes a BGP UPDATE message by storing its AS path
// in memory for later batch processing. Messages with AS paths shorter
// than minPathLengthForPeering are ignored as they cannot contain valid
// peering information.
// HandleMessage processes a message to extract AS paths
func (h *PeeringHandler) HandleMessage(msg *ristypes.RISMessage) {
// Skip if no AS path or only one AS
if len(msg.Path) < minPathLengthForPeering {
@ -165,9 +141,7 @@ func (h *PeeringHandler) prunePaths() {
}
}
// ProcessPeeringsNow triggers immediate processing of all accumulated AS
// paths into peering records. This bypasses the normal periodic processing
// schedule and is primarily intended for testing purposes.
// ProcessPeeringsNow forces immediate processing of peerings (for testing)
func (h *PeeringHandler) ProcessPeeringsNow() {
h.processPeerings()
}
@ -248,10 +222,7 @@ func (h *PeeringHandler) processPeerings() {
)
}
// Stop gracefully shuts down the handler by signaling the background
// goroutines to stop and performing a final synchronous processing of
// any remaining AS paths. This ensures no peering data is lost during
// shutdown.
// Stop gracefully stops the handler and processes remaining peerings
func (h *PeeringHandler) Stop() {
close(h.stopCh)
// Process any remaining peerings synchronously

View File

@ -19,7 +19,7 @@ const (
prefixHandlerQueueSize = 100000
// prefixBatchSize is the number of prefix updates to batch together
prefixBatchSize = 25000
prefixBatchSize = 20000
// prefixBatchTimeout is the maximum time to wait before flushing a batch
// DO NOT reduce this timeout - larger batches are more efficient
@ -113,10 +113,6 @@ func (h *PrefixHandler) HandleMessage(msg *ristypes.RISMessage) {
timestamp: timestamp,
path: msg.Path,
})
// Record announcement in metrics
if h.metrics != nil {
h.metrics.RecordAnnouncement()
}
}
}
@ -130,10 +126,6 @@ func (h *PrefixHandler) HandleMessage(msg *ristypes.RISMessage) {
timestamp: timestamp,
path: msg.Path,
})
// Record withdrawal in metrics
if h.metrics != nil {
h.metrics.RecordWithdrawal()
}
}
// Check if we need to flush
@ -190,15 +182,9 @@ func (h *PrefixHandler) flushBatchLocked() {
var routesToUpsert []*database.LiveRoute
var routesToDelete []database.LiveRouteDeletion
// Collect unique prefixes to update
prefixesToUpdate := make(map[string]time.Time)
// Skip the prefix table updates entirely - just update live_routes
// The prefix table is not critical for routing lookups
for _, update := range prefixMap {
// Track prefix for both announcements and withdrawals
if _, exists := prefixesToUpdate[update.prefix]; !exists || update.timestamp.After(prefixesToUpdate[update.prefix]) {
prefixesToUpdate[update.prefix] = update.timestamp
}
if update.messageType == "announcement" && update.originASN > 0 {
// Create live route for batch upsert
route := h.createLiveRoute(update)
@ -206,20 +192,11 @@ func (h *PrefixHandler) flushBatchLocked() {
routesToUpsert = append(routesToUpsert, route)
}
} else if update.messageType == "withdrawal" {
// Parse CIDR to get IP version
_, ipVersion, err := parseCIDR(update.prefix)
if err != nil {
h.logger.Error("Failed to parse CIDR for withdrawal", "prefix", update.prefix, "error", err)
continue
}
// Create deletion record for batch delete
routesToDelete = append(routesToDelete, database.LiveRouteDeletion{
Prefix: update.prefix,
OriginASN: update.originASN,
PeerIP: update.peer,
IPVersion: ipVersion,
})
}
}
@ -242,13 +219,6 @@ func (h *PrefixHandler) flushBatchLocked() {
}
}
// Update prefix tables
if len(prefixesToUpdate) > 0 {
if err := h.db.UpdatePrefixesBatch(prefixesToUpdate); err != nil {
h.logger.Error("Failed to update prefix batch", "error", err, "count", len(prefixesToUpdate))
}
}
elapsed := time.Since(startTime)
h.logger.Debug("Flushed prefix batch",
"batch_size", batchSize,

View File

@ -7,10 +7,10 @@ import (
"errors"
"net"
"net/http"
"net/url"
"runtime"
"sort"
"strconv"
"strings"
"time"
"git.eeqj.de/sneak/routewatch/internal/database"
@ -20,87 +20,14 @@ import (
"github.com/go-chi/chi/v5"
)
const (
// statsContextTimeout is the timeout for stats API operations.
statsContextTimeout = 4 * time.Second
// healthCheckTimeout is the timeout for health check operations.
healthCheckTimeout = 2 * time.Second
)
// HealthCheckResponse represents the health check response.
type HealthCheckResponse struct {
Status string `json:"status"`
Timestamp string `json:"timestamp"`
Checks map[string]string `json:"checks"`
}
// handleHealthCheck returns a handler that performs health checks.
// Returns 200 if healthy, 503 if any check fails.
// Uses lightweight checks to avoid timeout issues under load.
func (s *Server) handleHealthCheck() http.HandlerFunc {
// handleRoot returns a handler that redirects to /status
func (s *Server) handleRoot() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), healthCheckTimeout)
defer cancel()
checks := make(map[string]string)
healthy := true
// Check database connectivity with lightweight ping
err := s.db.Ping(ctx)
if err != nil {
checks["database"] = "error: " + err.Error()
healthy = false
} else {
checks["database"] = "ok"
}
// Check streamer connection
metrics := s.streamer.GetMetrics()
if metrics.Connected {
checks["ris_live"] = "ok"
} else {
checks["ris_live"] = "disconnected"
healthy = false
}
// Build response
status := "ok"
if !healthy {
status = "error"
}
response := HealthCheckResponse{
Status: status,
Timestamp: time.Now().UTC().Format(time.RFC3339),
Checks: checks,
}
if !healthy {
w.WriteHeader(http.StatusServiceUnavailable)
}
if err := writeJSONSuccess(w, response); err != nil {
s.logger.Error("Failed to encode health check response", "error", err)
}
http.Redirect(w, r, "/status", http.StatusSeeOther)
}
}
// handleIndex returns a handler that serves the home page.
func (s *Server) handleIndex() http.HandlerFunc {
return func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
tmpl := templates.IndexTemplate()
if err := tmpl.Execute(w, nil); err != nil {
s.logger.Error("Failed to render index template", "error", err)
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
}
}
}
// writeJSONError writes a standardized JSON error response with the given
// status code and error message.
// writeJSONError writes a standardized JSON error response
func writeJSONError(w http.ResponseWriter, statusCode int, message string) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
@ -113,8 +40,7 @@ func writeJSONError(w http.ResponseWriter, statusCode int, message string) {
})
}
// writeJSONSuccess writes a standardized JSON success response containing
// the provided data wrapped in a status envelope.
// writeJSONSuccess writes a standardized JSON success response
func writeJSONSuccess(w http.ResponseWriter, data interface{}) error {
w.Header().Set("Content-Type", "application/json")
@ -124,31 +50,15 @@ func writeJSONSuccess(w http.ResponseWriter, data interface{}) error {
})
}
// WHOISStatsInfo contains WHOIS fetcher statistics for the status page.
type WHOISStatsInfo struct {
TotalASNs int `json:"total_asns"`
FreshASNs int `json:"fresh_asns"`
StaleASNs int `json:"stale_asns"`
NeverFetched int `json:"never_fetched"`
SuccessesLastHour int `json:"successes_last_hour"`
ErrorsLastHour int `json:"errors_last_hour"`
CurrentInterval string `json:"current_interval"`
ConsecutiveFails int `json:"consecutive_fails"`
FreshPercent float64 `json:"fresh_percent"`
}
// handleStatusJSON returns a handler that serves JSON statistics including
// uptime, message counts, database stats, and route information.
// handleStatusJSON returns a handler that serves JSON statistics
func (s *Server) handleStatusJSON() http.HandlerFunc {
// Stats represents the statistics response
type Stats struct {
Uptime string `json:"uptime"`
TotalMessages uint64 `json:"total_messages"`
TotalBytes uint64 `json:"total_bytes"`
TotalWireBytes uint64 `json:"total_wire_bytes"`
MessagesPerSec float64 `json:"messages_per_sec"`
MbitsPerSec float64 `json:"mbits_per_sec"`
WireMbitsPerSec float64 `json:"wire_mbits_per_sec"`
Connected bool `json:"connected"`
GoVersion string `json:"go_version"`
Goroutines int `json:"goroutines"`
@ -163,18 +73,15 @@ func (s *Server) handleStatusJSON() http.HandlerFunc {
LiveRoutes int `json:"live_routes"`
IPv4Routes int `json:"ipv4_routes"`
IPv6Routes int `json:"ipv6_routes"`
OldestRoute *time.Time `json:"oldest_route,omitempty"`
NewestRoute *time.Time `json:"newest_route,omitempty"`
IPv4UpdatesPerSec float64 `json:"ipv4_updates_per_sec"`
IPv6UpdatesPerSec float64 `json:"ipv6_updates_per_sec"`
IPv4PrefixDistribution []database.PrefixDistribution `json:"ipv4_prefix_distribution"`
IPv6PrefixDistribution []database.PrefixDistribution `json:"ipv6_prefix_distribution"`
WHOISStats *WHOISStatsInfo `json:"whois_stats,omitempty"`
}
return func(w http.ResponseWriter, r *http.Request) {
// Create a 4 second timeout context for this request
ctx, cancel := context.WithTimeout(r.Context(), statsContextTimeout)
// Create a 1 second timeout context for this request
ctx, cancel := context.WithTimeout(r.Context(), 1*time.Second)
defer cancel()
metrics := s.streamer.GetMetrics()
@ -231,20 +138,12 @@ func (s *Server) handleStatusJSON() http.HandlerFunc {
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
// Get WHOIS stats if fetcher is available
var whoisStats *WHOISStatsInfo
if s.asnFetcher != nil {
whoisStats = s.getWHOISStats(ctx)
}
stats := Stats{
Uptime: uptime,
TotalMessages: metrics.TotalMessages,
TotalBytes: metrics.TotalBytes,
TotalWireBytes: metrics.TotalWireBytes,
MessagesPerSec: metrics.MessagesPerSec,
MbitsPerSec: metrics.BitsPerSec / bitsPerMegabit,
WireMbitsPerSec: metrics.WireBitsPerSec / bitsPerMegabit,
Connected: metrics.Connected,
GoVersion: runtime.Version(),
Goroutines: runtime.NumGoroutine(),
@ -259,13 +158,10 @@ func (s *Server) handleStatusJSON() http.HandlerFunc {
LiveRoutes: dbStats.LiveRoutes,
IPv4Routes: ipv4Routes,
IPv6Routes: ipv6Routes,
OldestRoute: dbStats.OldestRoute,
NewestRoute: dbStats.NewestRoute,
IPv4UpdatesPerSec: routeMetrics.IPv4UpdatesPerSec,
IPv6UpdatesPerSec: routeMetrics.IPv6UpdatesPerSec,
IPv4PrefixDistribution: dbStats.IPv4PrefixDistribution,
IPv6PrefixDistribution: dbStats.IPv6PrefixDistribution,
WHOISStats: whoisStats,
}
if err := writeJSONSuccess(w, stats); err != nil {
@ -274,53 +170,13 @@ func (s *Server) handleStatusJSON() http.HandlerFunc {
}
}
// getWHOISStats builds WHOIS statistics from database and fetcher.
func (s *Server) getWHOISStats(ctx context.Context) *WHOISStatsInfo {
// Get database WHOIS stats
dbStats, err := s.db.GetWHOISStats(ctx, whoisStaleThreshold)
if err != nil {
s.logger.Warn("Failed to get WHOIS stats", "error", err)
return nil
}
// Get fetcher stats
fetcherStats := s.asnFetcher.GetStats()
// Calculate fresh percentage
var freshPercent float64
if dbStats.TotalASNs > 0 {
freshPercent = float64(dbStats.FreshASNs) / float64(dbStats.TotalASNs) * percentMultiplier
}
return &WHOISStatsInfo{
TotalASNs: dbStats.TotalASNs,
FreshASNs: dbStats.FreshASNs,
StaleASNs: dbStats.StaleASNs,
NeverFetched: dbStats.NeverFetched,
SuccessesLastHour: fetcherStats.SuccessesLastHour,
ErrorsLastHour: fetcherStats.ErrorsLastHour,
CurrentInterval: fetcherStats.CurrentInterval.String(),
ConsecutiveFails: fetcherStats.ConsecutiveFails,
FreshPercent: freshPercent,
}
}
// whoisStaleThreshold matches the fetcher's threshold for consistency.
const whoisStaleThreshold = 30 * 24 * time.Hour
// percentMultiplier converts a ratio to a percentage.
const percentMultiplier = 100
// handleStats returns a handler that serves API v1 statistics including
// detailed handler queue statistics and performance metrics.
// handleStats returns a handler that serves API v1 statistics
func (s *Server) handleStats() http.HandlerFunc {
// HandlerStatsInfo represents handler statistics in the API response
type HandlerStatsInfo struct {
Name string `json:"name"`
QueueLength int `json:"queue_length"`
QueueCapacity int `json:"queue_capacity"`
QueueHighWaterMark int `json:"queue_high_water_mark"`
ProcessedCount uint64 `json:"processed_count"`
DroppedCount uint64 `json:"dropped_count"`
AvgProcessTimeMs float64 `json:"avg_process_time_ms"`
@ -328,40 +184,17 @@ func (s *Server) handleStats() http.HandlerFunc {
MaxProcessTimeMs float64 `json:"max_process_time_ms"`
}
// GCStats represents garbage collection statistics
type GCStats struct {
NumGC uint32 `json:"num_gc"`
TotalPauseMs uint64 `json:"total_pause_ms"`
LastPauseMs float64 `json:"last_pause_ms"`
HeapAllocBytes uint64 `json:"heap_alloc_bytes"`
HeapSysBytes uint64 `json:"heap_sys_bytes"`
}
// StreamStats represents stream statistics including announcements/withdrawals
type StreamStats struct {
Announcements uint64 `json:"announcements"`
Withdrawals uint64 `json:"withdrawals"`
RouteChurnPerSec float64 `json:"route_churn_per_sec"`
BGPPeerCount int `json:"bgp_peer_count"`
}
// StatsResponse represents the API statistics response
type StatsResponse struct {
Uptime string `json:"uptime"`
TotalMessages uint64 `json:"total_messages"`
TotalBytes uint64 `json:"total_bytes"`
TotalWireBytes uint64 `json:"total_wire_bytes"`
MessagesPerSec float64 `json:"messages_per_sec"`
MbitsPerSec float64 `json:"mbits_per_sec"`
WireMbitsPerSec float64 `json:"wire_mbits_per_sec"`
Connected bool `json:"connected"`
ConnectionDuration string `json:"connection_duration"`
ReconnectCount uint64 `json:"reconnect_count"`
GoVersion string `json:"go_version"`
Goroutines int `json:"goroutines"`
MemoryUsage string `json:"memory_usage"`
GC GCStats `json:"gc"`
Stream StreamStats `json:"stream"`
ASNs int `json:"asns"`
Prefixes int `json:"prefixes"`
IPv4Prefixes int `json:"ipv4_prefixes"`
@ -372,19 +205,16 @@ func (s *Server) handleStats() http.HandlerFunc {
LiveRoutes int `json:"live_routes"`
IPv4Routes int `json:"ipv4_routes"`
IPv6Routes int `json:"ipv6_routes"`
OldestRoute *time.Time `json:"oldest_route,omitempty"`
NewestRoute *time.Time `json:"newest_route,omitempty"`
IPv4UpdatesPerSec float64 `json:"ipv4_updates_per_sec"`
IPv6UpdatesPerSec float64 `json:"ipv6_updates_per_sec"`
HandlerStats []HandlerStatsInfo `json:"handler_stats"`
IPv4PrefixDistribution []database.PrefixDistribution `json:"ipv4_prefix_distribution"`
IPv6PrefixDistribution []database.PrefixDistribution `json:"ipv6_prefix_distribution"`
WHOISStats *WHOISStatsInfo `json:"whois_stats,omitempty"`
}
return func(w http.ResponseWriter, r *http.Request) {
// Create a 4 second timeout context for this request
ctx, cancel := context.WithTimeout(r.Context(), statsContextTimeout)
// Create a 1 second timeout context for this request
ctx, cancel := context.WithTimeout(r.Context(), 1*time.Second)
defer cancel()
// Check if context is already cancelled
@ -421,7 +251,7 @@ func (s *Server) handleStats() http.HandlerFunc {
return
case err := <-errChan:
s.logger.Error("Failed to get database stats", "error", err)
writeJSONError(w, http.StatusInternalServerError, err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
case dbStats = <-statsChan:
@ -454,7 +284,6 @@ func (s *Server) handleStats() http.HandlerFunc {
Name: hs.Name,
QueueLength: hs.QueueLength,
QueueCapacity: hs.QueueCapacity,
QueueHighWaterMark: hs.QueueHighWaterMark,
ProcessedCount: hs.ProcessedCount,
DroppedCount: hs.DroppedCount,
AvgProcessTimeMs: float64(hs.AvgProcessTime.Microseconds()) / microsecondsPerMillisecond,
@ -467,64 +296,16 @@ func (s *Server) handleStats() http.HandlerFunc {
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
// Get WHOIS stats if fetcher is available
var whoisStats *WHOISStatsInfo
if s.asnFetcher != nil {
whoisStats = s.getWHOISStats(ctx)
}
// Calculate connection duration
connectionDuration := "disconnected"
if metrics.Connected && !metrics.ConnectedSince.IsZero() {
connectionDuration = time.Since(metrics.ConnectedSince).Truncate(time.Second).String()
}
// Get announcement/withdrawal stats from metrics tracker
metricsTracker := s.streamer.GetMetricsTracker()
announcements := metricsTracker.GetAnnouncementCount()
withdrawals := metricsTracker.GetWithdrawalCount()
churnRate := metricsTracker.GetChurnRate()
bgpPeerCount := metricsTracker.GetBGPPeerCount()
// Calculate last GC pause
const (
nanosecondsPerMillisecond = 1e6
gcPauseHistorySize = 256 // Size of runtime.MemStats.PauseNs circular buffer
)
var lastPauseMs float64
if memStats.NumGC > 0 {
// PauseNs is a circular buffer, get the most recent pause
lastPauseIdx := (memStats.NumGC + gcPauseHistorySize - 1) % gcPauseHistorySize
lastPauseMs = float64(memStats.PauseNs[lastPauseIdx]) / nanosecondsPerMillisecond
}
stats := StatsResponse{
Uptime: uptime,
TotalMessages: metrics.TotalMessages,
TotalBytes: metrics.TotalBytes,
TotalWireBytes: metrics.TotalWireBytes,
MessagesPerSec: metrics.MessagesPerSec,
MbitsPerSec: metrics.BitsPerSec / bitsPerMegabit,
WireMbitsPerSec: metrics.WireBitsPerSec / bitsPerMegabit,
Connected: metrics.Connected,
ConnectionDuration: connectionDuration,
ReconnectCount: metrics.ReconnectCount,
GoVersion: runtime.Version(),
Goroutines: runtime.NumGoroutine(),
MemoryUsage: humanize.Bytes(memStats.Alloc),
GC: GCStats{
NumGC: memStats.NumGC,
TotalPauseMs: memStats.PauseTotalNs / uint64(nanosecondsPerMillisecond),
LastPauseMs: lastPauseMs,
HeapAllocBytes: memStats.HeapAlloc,
HeapSysBytes: memStats.HeapSys,
},
Stream: StreamStats{
Announcements: announcements,
Withdrawals: withdrawals,
RouteChurnPerSec: churnRate,
BGPPeerCount: bgpPeerCount,
},
ASNs: dbStats.ASNs,
Prefixes: dbStats.Prefixes,
IPv4Prefixes: dbStats.IPv4Prefixes,
@ -535,14 +316,11 @@ func (s *Server) handleStats() http.HandlerFunc {
LiveRoutes: dbStats.LiveRoutes,
IPv4Routes: ipv4Routes,
IPv6Routes: ipv6Routes,
OldestRoute: dbStats.OldestRoute,
NewestRoute: dbStats.NewestRoute,
IPv4UpdatesPerSec: routeMetrics.IPv4UpdatesPerSec,
IPv6UpdatesPerSec: routeMetrics.IPv6UpdatesPerSec,
HandlerStats: handlerStatsInfo,
IPv4PrefixDistribution: dbStats.IPv4PrefixDistribution,
IPv6PrefixDistribution: dbStats.IPv6PrefixDistribution,
WHOISStats: whoisStats,
}
if err := writeJSONSuccess(w, stats); err != nil {
@ -551,8 +329,7 @@ func (s *Server) handleStats() http.HandlerFunc {
}
}
// handleStatusHTML returns a handler that serves the HTML status page,
// which displays real-time statistics fetched via JavaScript.
// handleStatusHTML returns a handler that serves the HTML status page
func (s *Server) handleStatusHTML() http.HandlerFunc {
return func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
@ -567,136 +344,33 @@ func (s *Server) handleStatusHTML() http.HandlerFunc {
// handleIPLookup returns a handler that looks up AS information for an IP address
func (s *Server) handleIPLookup() http.HandlerFunc {
return s.handleIPInfo()
}
// IPLookupResponse is the standard response for IP/hostname lookups.
type IPLookupResponse struct {
Query string `json:"query"`
Results []*database.IPInfo `json:"results"`
Errors []string `json:"errors,omitempty"`
}
// handleIPInfo returns a handler that provides comprehensive IP information.
// Used for /ip, /ip/{addr}, and /api/v1/ip/{ip} endpoints.
// Accepts IP addresses (single or comma-separated) and hostnames.
// Always returns the same response structure with PTR records for each IP.
func (s *Server) handleIPInfo() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// Get IP/hostname from URL param, falling back to client IP
target := chi.URLParam(r, "ip")
if target == "" {
target = chi.URLParam(r, "addr")
}
if target == "" {
// Use client IP (RealIP middleware has already processed this)
target = extractClientIP(r)
}
if target == "" {
writeJSONError(w, http.StatusBadRequest, "Could not determine IP address")
ip := chi.URLParam(r, "ip")
if ip == "" {
writeJSONError(w, http.StatusBadRequest, "IP parameter is required")
return
}
ctx := r.Context()
response := IPLookupResponse{
Query: target,
Results: make([]*database.IPInfo, 0),
}
// Collect all IPs to look up
var ipsToLookup []string
// Check if target contains commas (multiple IPs)
targets := strings.Split(target, ",")
for _, t := range targets {
t = strings.TrimSpace(t)
if t == "" {
continue
}
// Check if this target is an IP address
if parsedIP := net.ParseIP(t); parsedIP != nil {
ipsToLookup = append(ipsToLookup, t)
// Look up AS information for the IP
asInfo, err := s.db.GetASInfoForIPContext(r.Context(), ip)
if err != nil {
// Check if it's an invalid IP error
if errors.Is(err, database.ErrInvalidIP) {
writeJSONError(w, http.StatusBadRequest, err.Error())
} else {
// It's a hostname - resolve it
resolved, err := net.DefaultResolver.LookupHost(ctx, t)
if err != nil {
response.Errors = append(response.Errors, t+": "+err.Error())
continue
// All other errors (including ErrNoRoute) are 404
writeJSONError(w, http.StatusNotFound, err.Error())
}
ipsToLookup = append(ipsToLookup, resolved...)
}
}
if len(ipsToLookup) == 0 {
writeJSONError(w, http.StatusBadRequest, "No valid IPs or hostnames provided")
return
}
// Track ASNs that need WHOIS refresh
refreshASNs := make(map[int]bool)
// Look up each IP
for _, ip := range ipsToLookup {
ipInfo, err := s.db.GetIPInfoContext(ctx, ip)
if err != nil {
response.Errors = append(response.Errors, ip+": "+err.Error())
continue
}
// Do PTR lookup for this IP
ptrs, err := net.DefaultResolver.LookupAddr(ctx, ip)
if err == nil && len(ptrs) > 0 {
// Remove trailing dots from PTR records
for i, ptr := range ptrs {
ptrs[i] = strings.TrimSuffix(ptr, ".")
}
ipInfo.PTR = ptrs
}
response.Results = append(response.Results, ipInfo)
if ipInfo.NeedsWHOISRefresh {
refreshASNs[ipInfo.ASN] = true
// Return successful response
if err := writeJSONSuccess(w, asInfo); err != nil {
s.logger.Error("Failed to encode AS info", "error", err)
}
}
// Queue WHOIS refresh for stale ASNs (non-blocking)
if s.asnFetcher != nil {
for asn := range refreshASNs {
s.asnFetcher.QueueImmediate(asn)
}
}
// Return response (even if no results, include errors)
if len(response.Results) == 0 && len(response.Errors) > 0 {
writeJSONError(w, http.StatusNotFound, "No routes found: "+response.Errors[0])
return
}
if err := writeJSONSuccess(w, response); err != nil {
s.logger.Error("Failed to encode IP lookup response", "error", err)
}
}
}
// extractClientIP extracts the client IP from the request.
// Works with chi's RealIP middleware which sets RemoteAddr.
func extractClientIP(r *http.Request) string {
// RemoteAddr is in the form "IP:port" or just "IP" for unix sockets
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
// Might be just an IP without port
return r.RemoteAddr
}
return host
}
// handleASDetailJSON returns AS details as JSON
@ -748,18 +422,20 @@ func (s *Server) handleASDetailJSON() http.HandlerFunc {
// handlePrefixDetailJSON returns prefix details as JSON
func (s *Server) handlePrefixDetailJSON() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// Get prefix and length from URL params
prefixParam := chi.URLParam(r, "prefix")
lenParam := chi.URLParam(r, "len")
if prefixParam == "" || lenParam == "" {
writeJSONError(w, http.StatusBadRequest, "Prefix and length parameters are required")
if prefixParam == "" {
writeJSONError(w, http.StatusBadRequest, "Prefix parameter is required")
return
}
// Combine prefix and length into CIDR notation
prefix := prefixParam + "/" + lenParam
// URL decode the prefix parameter
prefix, err := url.QueryUnescape(prefixParam)
if err != nil {
writeJSONError(w, http.StatusBadRequest, "Invalid prefix parameter")
return
}
routes, err := s.db.GetPrefixDetailsContext(r.Context(), prefix)
if err != nil {
@ -815,14 +491,6 @@ func (s *Server) handleASDetail() http.HandlerFunc {
return
}
// Get peers
peers, err := s.db.GetASPeersContext(r.Context(), asn)
if err != nil {
s.logger.Error("Failed to get AS peers", "error", err)
// Continue without peers rather than failing the whole request
peers = []database.ASPeer{}
}
// Group prefixes by IP version
const ipVersionV4 = 4
var ipv4Prefixes, ipv6Prefixes []database.LiveRoute
@ -879,8 +547,6 @@ func (s *Server) handleASDetail() http.HandlerFunc {
TotalCount int
IPv4Count int
IPv6Count int
Peers []database.ASPeer
PeerCount int
}{
ASN: asInfo,
IPv4Prefixes: ipv4Prefixes,
@ -888,8 +554,6 @@ func (s *Server) handleASDetail() http.HandlerFunc {
TotalCount: len(prefixes),
IPv4Count: len(ipv4Prefixes),
IPv6Count: len(ipv6Prefixes),
Peers: peers,
PeerCount: len(peers),
}
// Check if context is still valid before writing response
@ -912,18 +576,20 @@ func (s *Server) handleASDetail() http.HandlerFunc {
// handlePrefixDetail returns a handler that serves the prefix detail HTML page
func (s *Server) handlePrefixDetail() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// Get prefix and length from URL params
prefixParam := chi.URLParam(r, "prefix")
lenParam := chi.URLParam(r, "len")
if prefixParam == "" || lenParam == "" {
http.Error(w, "Prefix and length parameters are required", http.StatusBadRequest)
if prefixParam == "" {
http.Error(w, "Prefix parameter is required", http.StatusBadRequest)
return
}
// Combine prefix and length into CIDR notation
prefix := prefixParam + "/" + lenParam
// URL decode the prefix parameter
prefix, err := url.QueryUnescape(prefixParam)
if err != nil {
http.Error(w, "Invalid prefix parameter", http.StatusBadRequest)
return
}
routes, err := s.db.GetPrefixDetailsContext(r.Context(), prefix)
if err != nil {
@ -939,7 +605,7 @@ func (s *Server) handlePrefixDetail() http.HandlerFunc {
// Group by origin AS and collect unique AS info
type ASNInfo struct {
ASN int
Number int
Handle string
Description string
PeerCount int
@ -956,7 +622,7 @@ func (s *Server) handlePrefixDetail() http.HandlerFunc {
description = asInfo.Description
}
originMap[route.OriginASN] = &ASNInfo{
ASN: route.OriginASN,
Number: route.OriginASN,
Handle: handle,
Description: description,
PeerCount: 0,
@ -989,7 +655,7 @@ func (s *Server) handlePrefixDetail() http.HandlerFunc {
// Create enhanced routes with AS path handles
type ASPathEntry struct {
ASN int
Number int
Handle string
}
type EnhancedRoute struct {
@ -1008,7 +674,7 @@ func (s *Server) handlePrefixDetail() http.HandlerFunc {
for j, asn := range route.ASPath {
handle := asinfo.GetHandle(asn)
enhancedRoute.ASPathWithHandle[j] = ASPathEntry{
ASN: asn,
Number: asn,
Handle: handle,
}
}
@ -1052,7 +718,37 @@ func (s *Server) handlePrefixDetail() http.HandlerFunc {
}
}
// handlePrefixLength shows a random sample of IPv4 prefixes with the specified mask length
// handleIPRedirect looks up the prefix containing the IP and redirects to its detail page
func (s *Server) handleIPRedirect() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ip := chi.URLParam(r, "ip")
if ip == "" {
http.Error(w, "IP parameter is required", http.StatusBadRequest)
return
}
// Look up AS information for the IP (which includes the prefix)
asInfo, err := s.db.GetASInfoForIP(ip)
if err != nil {
if errors.Is(err, database.ErrInvalidIP) {
http.Error(w, "Invalid IP address", http.StatusBadRequest)
} else if errors.Is(err, database.ErrNoRoute) {
http.Error(w, "No route found for this IP", http.StatusNotFound)
} else {
s.logger.Error("Failed to look up IP", "error", err)
http.Error(w, "Internal server error", http.StatusInternalServerError)
}
return
}
// Redirect to the prefix detail page (URL encode the prefix)
http.Redirect(w, r, "/prefix/"+url.QueryEscape(asInfo.Prefix), http.StatusSeeOther)
}
}
// handlePrefixLength shows a random sample of prefixes with the specified mask length
func (s *Server) handlePrefixLength() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
lengthStr := chi.URLParam(r, "length")
@ -1069,107 +765,22 @@ func (s *Server) handlePrefixLength() http.HandlerFunc {
return
}
// Validate IPv4 mask length
const maxIPv4MaskLength = 32
if maskLength < 0 || maskLength > maxIPv4MaskLength {
http.Error(w, "Invalid IPv4 mask length", http.StatusBadRequest)
return
}
const ipVersion = 4
// Get random sample of prefixes
const maxPrefixes = 500
prefixes, err := s.db.GetRandomPrefixesByLengthContext(r.Context(), maskLength, ipVersion, maxPrefixes)
if err != nil {
s.logger.Error("Failed to get prefixes by length", "error", err)
http.Error(w, "Internal server error", http.StatusInternalServerError)
return
}
// Sort prefixes for display
sort.Slice(prefixes, func(i, j int) bool {
// First compare by IP version
if prefixes[i].IPVersion != prefixes[j].IPVersion {
return prefixes[i].IPVersion < prefixes[j].IPVersion
}
// Then by prefix
return prefixes[i].Prefix < prefixes[j].Prefix
})
// Create enhanced prefixes with AS descriptions
type EnhancedPrefix struct {
database.LiveRoute
OriginASDescription string
Age string
}
enhancedPrefixes := make([]EnhancedPrefix, len(prefixes))
for i, prefix := range prefixes {
enhancedPrefixes[i] = EnhancedPrefix{
LiveRoute: prefix,
Age: formatAge(prefix.LastUpdated),
}
// Get AS description
if asInfo, ok := asinfo.Get(prefix.OriginASN); ok {
enhancedPrefixes[i].OriginASDescription = asInfo.Description
}
}
// Render template
data := map[string]interface{}{
"MaskLength": maskLength,
"IPVersion": ipVersion,
"Prefixes": enhancedPrefixes,
"Count": len(prefixes),
}
// Check if context is still valid before writing response
select {
case <-r.Context().Done():
// Request was cancelled, don't write response
return
default:
}
tmpl := templates.PrefixLengthTemplate()
if err := tmpl.Execute(w, data); err != nil {
s.logger.Error("Failed to render prefix length template", "error", err)
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
}
}
}
// handlePrefixLength6 shows a random sample of IPv6 prefixes with the specified mask length
func (s *Server) handlePrefixLength6() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
lengthStr := chi.URLParam(r, "length")
if lengthStr == "" {
http.Error(w, "Length parameter is required", http.StatusBadRequest)
return
}
maskLength, err := strconv.Atoi(lengthStr)
if err != nil {
// Determine IP version based on mask length
const (
maxIPv4MaskLength = 32
maxIPv6MaskLength = 128
)
var ipVersion int
if maskLength <= maxIPv4MaskLength {
ipVersion = 4
} else if maskLength <= maxIPv6MaskLength {
ipVersion = 6
} else {
http.Error(w, "Invalid mask length", http.StatusBadRequest)
return
}
// Validate IPv6 mask length
const maxIPv6MaskLength = 128
if maskLength < 0 || maskLength > maxIPv6MaskLength {
http.Error(w, "Invalid IPv6 mask length", http.StatusBadRequest)
return
}
const ipVersion = 6
// Get random sample of prefixes
const maxPrefixes = 500
prefixes, err := s.db.GetRandomPrefixesByLengthContext(r.Context(), maskLength, ipVersion, maxPrefixes)

View File

@ -5,7 +5,6 @@ import (
"context"
"encoding/json"
"fmt"
"log/slog"
"net/http"
"sync"
"time"
@ -45,12 +44,7 @@ func (rw *responseWriter) Header() http.Header {
return rw.ResponseWriter.Header()
}
// JSONResponseMiddleware is an HTTP middleware that wraps all JSON responses
// with a @meta field containing execution metadata. The metadata includes the
// time zone (always UTC), API version, and request execution time in milliseconds.
//
// Endpoints "/" and "/status" are excluded from this processing and passed through
// unchanged. Non-JSON responses and empty responses are also passed through unchanged.
// JSONResponseMiddleware wraps all JSON responses with metadata
func JSONResponseMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Skip non-JSON endpoints
@ -161,14 +155,8 @@ func (tw *timeoutWriter) markWritten() {
tw.written = true
}
// TimeoutMiddleware creates an HTTP middleware that enforces a request timeout.
// If the handler does not complete within the specified duration, the middleware
// returns a JSON error response with HTTP status 408 (Request Timeout).
//
// The timeout parameter specifies the maximum duration allowed for request processing.
// The returned middleware handles panics from the wrapped handler by re-panicking
// after cleanup, and prevents concurrent writes to the response after timeout occurs.
func TimeoutMiddleware(timeout time.Duration, logger *slog.Logger) func(http.Handler) http.Handler {
// TimeoutMiddleware creates a timeout middleware that returns JSON errors
func TimeoutMiddleware(timeout time.Duration) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
@ -204,14 +192,6 @@ func TimeoutMiddleware(timeout time.Duration, logger *slog.Logger) func(http.Han
tw.markWritten() // Prevent the handler from writing after timeout
execTime := time.Since(startTime)
// Log the timeout as a warning
logger.Warn("Request timeout",
"method", r.Method,
"path", r.URL.Path,
"duration_ms", execTime.Milliseconds(),
"remote_addr", r.RemoteAddr,
)
// Write directly to the underlying writer since we've marked tw as written
// This is safe because markWritten() prevents the handler from writing
tw.mu.Lock()
@ -237,147 +217,3 @@ func TimeoutMiddleware(timeout time.Duration, logger *slog.Logger) func(http.Han
})
}
}
// JSONValidationMiddleware is an HTTP middleware that validates JSON API responses.
// It ensures that responses with Content-Type "application/json" contain valid JSON.
//
// If a response is not valid JSON or is empty when JSON is expected, the middleware
// returns a properly formatted JSON error response. For timeout errors (status 408),
// the error message will be "Request timeout". For other errors, it returns
// "Internal server error" with status 500 if the original status was 200.
//
// Non-JSON responses are passed through unchanged.
func JSONValidationMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Create a custom response writer to capture the response
rw := &responseWriter{
ResponseWriter: w,
body: &bytes.Buffer{},
statusCode: http.StatusOK,
}
// Serve the request
next.ServeHTTP(rw, r)
// Check if it's meant to be a JSON response
contentType := rw.Header().Get("Content-Type")
isJSON := contentType == "application/json" || contentType == ""
// If it's not JSON or has content, pass through
if !isJSON && rw.body.Len() > 0 {
w.WriteHeader(rw.statusCode)
_, _ = w.Write(rw.body.Bytes())
return
}
// For JSON responses, validate the JSON
if rw.body.Len() > 0 {
var testParse interface{}
if err := json.Unmarshal(rw.body.Bytes(), &testParse); err == nil {
// Valid JSON, write it out
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(rw.statusCode)
_, _ = w.Write(rw.body.Bytes())
return
}
}
// If we get here, either there's no body or invalid JSON
// Write a proper error response
w.Header().Set("Content-Type", "application/json")
// Determine appropriate status code
statusCode := rw.statusCode
if statusCode == http.StatusOK {
statusCode = http.StatusInternalServerError
}
w.WriteHeader(statusCode)
errorMsg := "Internal server error"
if statusCode == http.StatusRequestTimeout {
errorMsg = "Request timeout"
}
response := map[string]interface{}{
"status": "error",
"error": map[string]interface{}{
"msg": errorMsg,
"code": statusCode,
},
}
_ = json.NewEncoder(w).Encode(response)
})
}
// statusWriter wraps http.ResponseWriter to capture the status code
type statusWriter struct {
http.ResponseWriter
statusCode int
written bool
}
func (sw *statusWriter) WriteHeader(statusCode int) {
if !sw.written {
sw.statusCode = statusCode
sw.written = true
}
sw.ResponseWriter.WriteHeader(statusCode)
}
func (sw *statusWriter) Write(b []byte) (int, error) {
if !sw.written {
sw.statusCode = http.StatusOK
sw.written = true
}
return sw.ResponseWriter.Write(b)
}
// RequestLoggerMiddleware creates a structured logging middleware using slog.
func RequestLoggerMiddleware(logger *slog.Logger) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
// Wrap response writer to capture status
sw := &statusWriter{ResponseWriter: w, statusCode: http.StatusOK}
// Log request start
logger.Debug("HTTP request started",
"method", r.Method,
"path", r.URL.Path,
"remote_addr", r.RemoteAddr,
"user_agent", r.UserAgent(),
)
// Serve the request
next.ServeHTTP(sw, r)
// Log request completion
duration := time.Since(start)
logLevel := slog.LevelInfo
// Slow query threshold (1 second)
const slowQueryThreshold = 1 * time.Second
if sw.statusCode >= http.StatusInternalServerError {
logLevel = slog.LevelError
} else if sw.statusCode >= http.StatusBadRequest {
logLevel = slog.LevelWarn
} else if duration >= slowQueryThreshold {
logLevel = slog.LevelWarn
}
logger.Log(r.Context(), logLevel, "HTTP request completed",
"method", r.Method,
"path", r.URL.Path,
"status", sw.statusCode,
"duration_ms", duration.Milliseconds(),
"remote_addr", r.RemoteAddr,
"slow", duration >= slowQueryThreshold,
)
})
}
}

View File

@ -14,38 +14,29 @@ func (s *Server) setupRoutes() {
// Middleware
r.Use(middleware.RequestID)
r.Use(middleware.RealIP)
r.Use(RequestLoggerMiddleware(s.logger.Logger)) // Structured request logging
r.Use(middleware.Logger)
r.Use(middleware.Recoverer)
const requestTimeout = 30 * time.Second // Increased from 8s for slow queries
r.Use(TimeoutMiddleware(requestTimeout, s.logger.Logger))
const requestTimeout = 2 * time.Second
r.Use(TimeoutMiddleware(requestTimeout))
r.Use(JSONResponseMiddleware)
// Routes
r.Get("/", s.handleIndex())
r.Get("/", s.handleRoot())
r.Get("/status", s.handleStatusHTML())
r.Get("/status.json", JSONValidationMiddleware(s.handleStatusJSON()).ServeHTTP)
r.Get("/.well-known/healthcheck.json", JSONValidationMiddleware(s.handleHealthCheck()).ServeHTTP)
r.Get("/status.json", s.handleStatusJSON())
// AS and prefix detail pages
r.Get("/as/{asn}", s.handleASDetail())
r.Get("/prefix/{prefix}/{len}", s.handlePrefixDetail())
r.Get("/prefix/{prefix}", s.handlePrefixDetail())
r.Get("/prefixlength/{length}", s.handlePrefixLength())
r.Get("/prefixlength6/{length}", s.handlePrefixLength6())
// IP info JSON endpoints (replaces old /ip redirect)
r.Route("/ip", func(r chi.Router) {
r.Use(JSONValidationMiddleware)
r.Get("/", s.handleIPInfo()) // Client IP
r.Get("/{addr}", s.handleIPInfo()) // Specified IP
})
r.Get("/ip/{ip}", s.handleIPRedirect())
// API routes
r.Route("/api/v1", func(r chi.Router) {
r.Use(JSONValidationMiddleware)
r.Get("/stats", s.handleStats())
r.Get("/ip/{ip}", s.handleIPLookup())
r.Get("/as/{asn}", s.handleASDetailJSON())
r.Get("/prefix/{prefix}/{len}", s.handlePrefixDetailJSON())
r.Get("/prefix/{prefix}", s.handlePrefixDetailJSON())
})
s.router = r

View File

@ -13,20 +13,6 @@ import (
"github.com/go-chi/chi/v5"
)
// ASNFetcherStats contains WHOIS fetcher statistics.
type ASNFetcherStats struct {
SuccessesLastHour int
ErrorsLastHour int
CurrentInterval time.Duration
ConsecutiveFails int
}
// ASNFetcher is an interface for queuing ASN WHOIS lookups.
type ASNFetcher interface {
QueueImmediate(asn int)
GetStats() ASNFetcherStats
}
// Server provides HTTP endpoints for status monitoring
type Server struct {
router *chi.Mux
@ -34,7 +20,6 @@ type Server struct {
streamer *streamer.Streamer
logger *logger.Logger
srv *http.Server
asnFetcher ASNFetcher
}
// New creates a new HTTP server
@ -57,27 +42,16 @@ func (s *Server) Start() error {
port = "8080"
}
const (
readHeaderTimeout = 40 * time.Second
readTimeout = 60 * time.Second
writeTimeout = 60 * time.Second
idleTimeout = 120 * time.Second
)
const readHeaderTimeout = 10 * time.Second
s.srv = &http.Server{
Addr: ":" + port,
Handler: s.router,
ReadHeaderTimeout: readHeaderTimeout,
ReadTimeout: readTimeout,
WriteTimeout: writeTimeout,
IdleTimeout: idleTimeout,
}
s.logger.Info("Starting HTTP server", "port", port, "addr", s.srv.Addr)
s.logger.Info("Starting HTTP server", "port", port)
// Start in goroutine but log when actually listening
go func() {
s.logger.Info("HTTP server listening", "addr", s.srv.Addr)
if err := s.srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
s.logger.Error("HTTP server error", "error", err)
}
@ -96,8 +70,3 @@ func (s *Server) Stop(ctx context.Context) error {
return s.srv.Shutdown(ctx)
}
// SetASNFetcher sets the ASN WHOIS fetcher for on-demand lookups.
func (s *Server) SetASNFetcher(fetcher ASNFetcher) {
s.asnFetcher = fetcher
}

View File

@ -4,13 +4,10 @@ package streamer
import (
"bufio"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"io"
"math"
"math/rand"
"net/http"
"sync"
"sync/atomic"
@ -21,26 +18,6 @@ import (
"git.eeqj.de/sneak/routewatch/internal/ristypes"
)
// countingReader wraps an io.Reader and counts bytes read
type countingReader struct {
reader io.Reader
count int64
}
// Read implements io.Reader and counts bytes
func (c *countingReader) Read(p []byte) (int, error) {
n, err := c.reader.Read(p)
atomic.AddInt64(&c.count, int64(n))
return n, err
}
// Count returns the total bytes read
func (c *countingReader) Count() int64 {
return atomic.LoadInt64(&c.count)
}
// Configuration constants for the RIS Live streamer.
const (
risLiveURL = "https://ris-live.ripe.net/v1/stream/?format=json&" +
"client=https%3A%2F%2Fgit.eeqj.de%2Fsneak%2Froutewatch"
@ -52,33 +29,22 @@ const (
bytesPerKB = 1024
bytesPerMB = 1024 * 1024
maxConcurrentHandlers = 800 // Maximum number of concurrent message handlers
// Backpressure constants
backpressureThreshold = 0.5 // Start dropping at 50% queue utilization
backpressureSlope = 2.0 // Slope for linear drop probability increase
)
// MessageHandler defines the interface for processing RIS messages.
// Implementations must specify which message types they want to receive,
// how to process messages, and their desired queue capacity.
// MessageHandler is an interface for handling RIS messages
type MessageHandler interface {
// WantsMessage returns true if this handler wants to process messages of the given type.
// WantsMessage returns true if this handler wants to process messages of the given type
WantsMessage(messageType string) bool
// HandleMessage processes a RIS message. This method is called from a dedicated
// goroutine for each handler, so implementations do not need to be goroutine-safe
// with respect to other handlers.
// HandleMessage processes a RIS message
HandleMessage(msg *ristypes.RISMessage)
// QueueCapacity returns the desired queue capacity for this handler.
// Handlers that process quickly can have larger queues to buffer bursts.
// When the queue fills up, messages will be dropped according to the
// backpressure algorithm.
// QueueCapacity returns the desired queue capacity for this handler
// Handlers that process quickly can have larger queues
QueueCapacity() int
}
// RawMessageHandler is a function type for processing raw JSON lines from the stream.
// It receives the unmodified JSON line as a string before any parsing occurs.
// RawMessageHandler is a callback for handling raw JSON lines from the stream
type RawMessageHandler func(line string)
// handlerMetrics tracks performance metrics for a handler
@ -88,7 +54,6 @@ type handlerMetrics struct {
totalTime time.Duration // Total processing time (for average calculation)
minTime time.Duration // Minimum processing time
maxTime time.Duration // Maximum processing time
queueHighWaterMark int // Maximum queue length seen
mu sync.Mutex // Protects the metrics
}
@ -99,10 +64,7 @@ type handlerInfo struct {
metrics handlerMetrics
}
// Streamer manages a connection to the RIPE RIS Live streaming API for receiving
// real-time BGP UPDATE messages. It handles automatic reconnection with exponential
// backoff, dispatches messages to registered handlers via per-handler queues, and
// implements backpressure to prevent queue overflow during high traffic periods.
// Streamer handles streaming BGP updates from RIS Live
type Streamer struct {
logger *logger.Logger
client *http.Client
@ -113,35 +75,21 @@ type Streamer struct {
running bool
metrics *metrics.Tracker
totalDropped uint64 // Total dropped messages across all handlers
random *rand.Rand // Random number generator for backpressure drops
bgpPeers map[string]bool // Track active BGP peers by peer IP
bgpPeersMu sync.RWMutex // Protects bgpPeers map
}
// New creates a new Streamer instance configured to connect to the RIS Live API.
// The logger is used for structured logging of connection events and errors.
// The metrics tracker is used to record message counts, bytes received, and connection status.
// New creates a new RIS streamer
func New(logger *logger.Logger, metrics *metrics.Tracker) *Streamer {
return &Streamer{
logger: logger,
client: &http.Client{
Timeout: 0, // No timeout for streaming
Transport: &http.Transport{
// Disable automatic gzip decompression so we can measure wire bytes
DisableCompression: true,
},
},
handlers: make([]*handlerInfo, 0),
metrics: metrics,
//nolint:gosec // Non-cryptographic randomness is fine for backpressure
random: rand.New(rand.NewSource(time.Now().UnixNano())),
bgpPeers: make(map[string]bool),
}
}
// RegisterHandler adds a MessageHandler to receive parsed RIS messages.
// Each handler gets its own dedicated queue and worker goroutine for processing.
// If the streamer is already running, the handler's worker is started immediately.
// RegisterHandler adds a callback for message processing
func (s *Streamer) RegisterHandler(handler MessageHandler) {
s.mu.Lock()
defer s.mu.Unlock()
@ -163,19 +111,14 @@ func (s *Streamer) RegisterHandler(handler MessageHandler) {
}
}
// RegisterRawHandler sets a callback to receive raw JSON lines from the stream
// before they are parsed. Only one raw handler can be registered at a time;
// subsequent calls will replace the previous handler.
// RegisterRawHandler sets a callback for raw message lines
func (s *Streamer) RegisterRawHandler(handler RawMessageHandler) {
s.mu.Lock()
defer s.mu.Unlock()
s.rawHandler = handler
}
// Start begins streaming BGP updates from the RIS Live API in a background goroutine.
// It starts worker goroutines for each registered handler and manages automatic
// reconnection with exponential backoff on connection failures.
// Returns an error if the streamer is already running.
// Start begins streaming in a goroutine
func (s *Streamer) Start() error {
s.mu.Lock()
defer s.mu.Unlock()
@ -203,9 +146,7 @@ func (s *Streamer) Start() error {
return nil
}
// Stop halts the streaming connection and shuts down all handler workers.
// It cancels the streaming context, closes all handler queues, and updates
// the connection status in metrics. This method is safe to call multiple times.
// Stop halts the streaming
func (s *Streamer) Stop() {
s.mu.Lock()
if s.cancel != nil {
@ -245,8 +186,7 @@ func (s *Streamer) runHandlerWorker(info *handlerInfo) {
}
}
// IsRunning reports whether the streamer is currently connected and processing messages.
// This is safe to call concurrently from multiple goroutines.
// IsRunning returns whether the streamer is currently active
func (s *Streamer) IsRunning() bool {
s.mu.RLock()
defer s.mu.RUnlock()
@ -254,26 +194,21 @@ func (s *Streamer) IsRunning() bool {
return s.running
}
// GetMetrics returns the current streaming metrics including message counts,
// bytes received, and throughput rates. The returned struct is a snapshot
// of the current state and is safe to use without synchronization.
// GetMetrics returns current streaming metrics
func (s *Streamer) GetMetrics() metrics.StreamMetrics {
return s.metrics.GetStreamMetrics()
}
// GetMetricsTracker returns the underlying metrics.Tracker instance for direct access
// to metrics recording and retrieval functionality.
// GetMetricsTracker returns the metrics tracker instance
func (s *Streamer) GetMetricsTracker() *metrics.Tracker {
return s.metrics
}
// HandlerStats contains performance metrics for a single message handler.
// It includes queue utilization, message counts, and processing time statistics.
// HandlerStats represents metrics for a single handler
type HandlerStats struct {
Name string
QueueLength int
QueueCapacity int
QueueHighWaterMark int
ProcessedCount uint64
DroppedCount uint64
AvgProcessTime time.Duration
@ -281,9 +216,7 @@ type HandlerStats struct {
MaxProcessTime time.Duration
}
// GetHandlerStats returns a snapshot of performance statistics for all registered
// handlers. The returned slice contains one HandlerStats entry per handler with
// current queue depth, processed/dropped counts, and processing time statistics.
// GetHandlerStats returns current handler statistics
func (s *Streamer) GetHandlerStats() []HandlerStats {
s.mu.RLock()
defer s.mu.RUnlock()
@ -297,7 +230,6 @@ func (s *Streamer) GetHandlerStats() []HandlerStats {
Name: fmt.Sprintf("%T", info.handler),
QueueLength: len(info.queue),
QueueCapacity: cap(info.queue),
QueueHighWaterMark: info.metrics.queueHighWaterMark,
ProcessedCount: info.metrics.processedCount,
DroppedCount: info.metrics.droppedCount,
MinProcessTime: info.metrics.minTime,
@ -323,9 +255,7 @@ func (s *Streamer) GetHandlerStats() []HandlerStats {
return stats
}
// GetDroppedMessages returns the total number of messages dropped across all handlers
// due to queue overflow or backpressure. This counter is monotonically increasing
// and is safe to call concurrently.
// GetDroppedMessages returns the total number of dropped messages
func (s *Streamer) GetDroppedMessages() uint64 {
return atomic.LoadUint64(&s.totalDropped)
}
@ -344,18 +274,16 @@ func (s *Streamer) logMetrics() {
uptime,
"total_messages",
metrics.TotalMessages,
"wire_bytes",
metrics.TotalWireBytes,
"wire_mb",
fmt.Sprintf("%.2f", float64(metrics.TotalWireBytes)/bytesPerMB),
"wire_mbps",
fmt.Sprintf("%.2f", metrics.WireBitsPerSec/bitsPerMegabit),
"decompressed_bytes",
"total_bytes",
metrics.TotalBytes,
"decompressed_mb",
"total_mb",
fmt.Sprintf("%.2f", float64(metrics.TotalBytes)/bytesPerMB),
"messages_per_sec",
fmt.Sprintf("%.2f", metrics.MessagesPerSec),
"bits_per_sec",
fmt.Sprintf("%.0f", metrics.BitsPerSec),
"mbps",
fmt.Sprintf("%.2f", metrics.BitsPerSec/bitsPerMegabit),
"total_dropped",
totalDropped,
)
@ -468,9 +396,6 @@ func (s *Streamer) stream(ctx context.Context) error {
return fmt.Errorf("failed to create request: %w", err)
}
// Explicitly request gzip compression
req.Header.Set("Accept-Encoding", "gzip")
resp, err := s.client.Do(req)
if err != nil {
return fmt.Errorf("failed to connect to RIS Live: %w", err)
@ -485,28 +410,9 @@ func (s *Streamer) stream(ctx context.Context) error {
return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
// Wrap body with counting reader to track actual wire bytes
wireCounter := &countingReader{reader: resp.Body}
// Check if response is gzip-compressed and decompress if needed
var reader io.Reader = wireCounter
if resp.Header.Get("Content-Encoding") == "gzip" {
gzReader, err := gzip.NewReader(wireCounter)
if err != nil {
return fmt.Errorf("failed to create gzip reader: %w", err)
}
defer func() { _ = gzReader.Close() }()
reader = gzReader
s.logger.Info("Connected to RIS Live stream", "compression", "gzip")
} else {
s.logger.Info("Connected to RIS Live stream", "compression", "none")
}
s.logger.Info("Connected to RIS Live stream")
s.metrics.SetConnected(true)
// Track wire bytes for metrics updates
var lastWireBytes int64
// Start metrics logging goroutine
metricsTicker := time.NewTicker(metricsLogInterval)
defer metricsTicker.Stop()
@ -522,27 +428,7 @@ func (s *Streamer) stream(ctx context.Context) error {
}
}()
// Wire byte update ticker - update metrics with actual wire bytes periodically
wireUpdateTicker := time.NewTicker(time.Second)
defer wireUpdateTicker.Stop()
go func() {
for {
select {
case <-wireUpdateTicker.C:
currentBytes := wireCounter.Count()
delta := currentBytes - lastWireBytes
if delta > 0 {
s.metrics.RecordWireBytes(delta)
lastWireBytes = currentBytes
}
case <-ctx.Done():
return
}
}
}()
scanner := bufio.NewScanner(reader)
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
select {
@ -558,7 +444,7 @@ func (s *Streamer) stream(ctx context.Context) error {
continue
}
// Update metrics with decompressed message size
// Update metrics with message size
s.updateMetrics(len(line))
// Call raw handler if registered
@ -611,32 +497,18 @@ func (s *Streamer) stream(ctx context.Context) error {
// BGP keepalive messages - silently process
continue
case "OPEN":
// BGP open messages - track peer as active
s.bgpPeersMu.Lock()
s.bgpPeers[msg.Peer] = true
peerCount := len(s.bgpPeers)
s.bgpPeersMu.Unlock()
s.metrics.SetBGPPeerCount(peerCount)
// BGP open messages
s.logger.Info("BGP session opened",
"peer", msg.Peer,
"peer_asn", msg.PeerASN,
"total_peers", peerCount,
)
continue
case "NOTIFICATION":
// BGP notification messages (session closed)
s.bgpPeersMu.Lock()
delete(s.bgpPeers, msg.Peer)
peerCount := len(s.bgpPeers)
s.bgpPeersMu.Unlock()
s.metrics.SetBGPPeerCount(peerCount)
// BGP notification messages (errors)
s.logger.Warn("BGP notification",
"peer", msg.Peer,
"peer_asn", msg.PeerASN,
"total_peers", peerCount,
)
continue
@ -644,45 +516,27 @@ func (s *Streamer) stream(ctx context.Context) error {
// Peer state changes - silently ignore
continue
default:
s.logger.Warn("Unknown message type, skipping",
s.logger.Error("Unknown message type",
"type", msg.Type,
"line", string(line),
)
continue
panic(fmt.Sprintf("Unknown RIS message type: %s", msg.Type))
}
// Dispatch to interested handlers
s.mu.RLock()
for _, info := range s.handlers {
if !info.handler.WantsMessage(msg.Type) {
continue
}
// Check if we should drop due to backpressure
if s.shouldDropForBackpressure(info) {
atomic.AddUint64(&info.metrics.droppedCount, 1)
atomic.AddUint64(&s.totalDropped, 1)
continue
}
// Try to queue the message
if info.handler.WantsMessage(msg.Type) {
select {
case info.queue <- &msg:
// Message queued successfully
// Update high water mark if needed
queueLen := len(info.queue)
info.metrics.mu.Lock()
if queueLen > info.metrics.queueHighWaterMark {
info.metrics.queueHighWaterMark = queueLen
}
info.metrics.mu.Unlock()
default:
// Queue is full, drop the message
atomic.AddUint64(&info.metrics.droppedCount, 1)
atomic.AddUint64(&s.totalDropped, 1)
}
}
}
s.mu.RUnlock()
}
@ -692,25 +546,3 @@ func (s *Streamer) stream(ctx context.Context) error {
return nil
}
// shouldDropForBackpressure determines if a message should be dropped based on queue utilization
func (s *Streamer) shouldDropForBackpressure(info *handlerInfo) bool {
// Calculate queue utilization
queueLen := len(info.queue)
queueCap := cap(info.queue)
utilization := float64(queueLen) / float64(queueCap)
// No drops below threshold
if utilization < backpressureThreshold {
return false
}
// Calculate drop probability (0.0 at threshold, 1.0 at 100% full)
dropProbability := (utilization - backpressureThreshold) * backpressureSlope
if dropProbability > 1.0 {
dropProbability = 1.0
}
// Random drop based on probability
return s.random.Float64() < dropProbability
}

View File

@ -3,78 +3,15 @@
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>AS{{.ASN.ASN}} - {{.ASN.Handle}} - RouteWatch</title>
<title>AS{{.ASN.Number}} - {{.ASN.Handle}} - RouteWatch</title>
<style>
* {
box-sizing: border-box;
}
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
margin: 0;
padding: 0;
padding: 20px;
background: #f5f5f5;
color: #333;
}
/* Navbar styles */
.navbar {
background: #2c3e50;
padding: 0 20px;
display: flex;
align-items: center;
justify-content: space-between;
height: 56px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.navbar-brand {
display: flex;
align-items: center;
gap: 8px;
font-size: 18px;
font-weight: 600;
}
.navbar-brand a {
color: white;
text-decoration: none;
}
.navbar-brand a:hover {
color: #ecf0f1;
}
.navbar-brand .by {
font-weight: normal;
color: #95a5a6;
font-size: 14px;
}
.navbar-brand .author {
color: #3498db;
font-weight: normal;
}
.navbar-brand .author:hover {
text-decoration: underline;
}
.navbar-links {
display: flex;
gap: 20px;
}
.navbar-links a {
color: #ecf0f1;
text-decoration: none;
font-size: 14px;
padding: 8px 12px;
border-radius: 4px;
transition: background-color 0.2s;
}
.navbar-links a:hover {
background: rgba(255,255,255,0.1);
}
.navbar-links a.active {
background: rgba(255,255,255,0.15);
}
/* Main content */
.main-content {
padding: 20px;
}
.container {
max-width: 1200px;
margin: 0 auto;
@ -196,22 +133,10 @@
</style>
</head>
<body>
<nav class="navbar">
<div class="navbar-brand">
<a href="/">routewatch</a>
<span class="by">by</span>
<a href="https://sneak.berlin" class="author">@sneak</a>
</div>
<div class="navbar-links">
<a href="/">Home</a>
<a href="/status">Status</a>
</div>
</nav>
<main class="main-content">
<div class="container">
<a href="/status" class="nav-link">← Back to Status</a>
<h1>AS{{.ASN.ASN}}{{if .ASN.Handle}} - {{.ASN.Handle}}{{end}}</h1>
<h1>AS{{.ASN.Number}}{{if .ASN.Handle}} - {{.ASN.Handle}}{{end}}</h1>
{{if .ASN.Description}}
<p class="subtitle">{{.ASN.Description}}</p>
{{end}}
@ -229,10 +154,6 @@
<div class="info-label">IPv6 Prefixes</div>
<div class="info-value">{{.IPv6Count}}</div>
</div>
<div class="info-card">
<div class="info-label">Peer ASNs</div>
<div class="info-value">{{.PeerCount}}</div>
</div>
<div class="info-card">
<div class="info-label">First Seen</div>
<div class="info-value">{{.ASN.FirstSeen.Format "2006-01-02"}}</div>
@ -257,7 +178,7 @@
<tbody>
{{range .IPv4Prefixes}}
<tr>
<td><a href="{{.Prefix | prefixURL}}" class="prefix-link">{{.Prefix}}</a></td>
<td><a href="/prefix/{{.Prefix | urlEncode}}" class="prefix-link">{{.Prefix}}</a></td>
<td>/{{.MaskLength}}</td>
<td>{{.LastUpdated.Format "2006-01-02 15:04:05"}}</td>
<td class="age">{{.LastUpdated | timeSince}}</td>
@ -286,7 +207,7 @@
<tbody>
{{range .IPv6Prefixes}}
<tr>
<td><a href="{{.Prefix | prefixURL}}" class="prefix-link">{{.Prefix}}</a></td>
<td><a href="/prefix/{{.Prefix | urlEncode}}" class="prefix-link">{{.Prefix}}</a></td>
<td>/{{.MaskLength}}</td>
<td>{{.LastUpdated.Format "2006-01-02 15:04:05"}}</td>
<td class="age">{{.LastUpdated | timeSince}}</td>
@ -302,45 +223,6 @@
<p>No prefixes announced by this AS</p>
</div>
{{end}}
{{if .Peers}}
<div class="prefix-section">
<div class="prefix-header">
<h2>Peer ASNs</h2>
<span class="prefix-count">{{.PeerCount}}</span>
</div>
<table class="prefix-table">
<thead>
<tr>
<th>ASN</th>
<th>Handle</th>
<th>Description</th>
<th>First Seen</th>
<th>Last Seen</th>
</tr>
</thead>
<tbody>
{{range .Peers}}
<tr>
<td><a href="/as/{{.ASN}}" class="prefix-link">AS{{.ASN}}</a></td>
<td>{{if .Handle}}{{.Handle}}{{else}}-{{end}}</td>
<td>{{if .Description}}{{.Description}}{{else}}-{{end}}</td>
<td>{{.FirstSeen.Format "2006-01-02"}}</td>
<td>{{.LastSeen.Format "2006-01-02"}}</td>
</tr>
{{end}}
</tbody>
</table>
</div>
{{else}}
<div class="prefix-section">
<h2>Peer ASNs</h2>
<div class="empty-state">
<p>No peering relationships found for this AS</p>
</div>
</div>
{{end}}
</div>
</main>
</body>
</html>

View File

@ -1,447 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>RouteWatch - BGP Route Monitor</title>
<style>
* {
box-sizing: border-box;
}
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
margin: 0;
padding: 0;
background: #f5f5f5;
color: #333;
}
/* Navbar styles */
.navbar {
background: #2c3e50;
padding: 0 20px;
display: flex;
align-items: center;
justify-content: space-between;
height: 56px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.navbar-brand {
display: flex;
align-items: center;
gap: 8px;
font-size: 18px;
font-weight: 600;
}
.navbar-brand a {
color: white;
text-decoration: none;
}
.navbar-brand a:hover {
color: #ecf0f1;
}
.navbar-brand .by {
font-weight: normal;
color: #95a5a6;
font-size: 14px;
}
.navbar-brand .author {
color: #3498db;
font-weight: normal;
}
.navbar-brand .author:hover {
text-decoration: underline;
}
.navbar-links {
display: flex;
gap: 20px;
}
.navbar-links a {
color: #ecf0f1;
text-decoration: none;
font-size: 14px;
padding: 8px 12px;
border-radius: 4px;
transition: background-color 0.2s;
}
.navbar-links a:hover {
background: rgba(255,255,255,0.1);
}
.navbar-links a.active {
background: rgba(255,255,255,0.15);
}
/* Main content */
.main-content {
max-width: 1200px;
margin: 0 auto;
padding: 30px 20px;
}
/* Stats overview */
.stats-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
gap: 20px;
margin-bottom: 40px;
}
.stat-card {
background: white;
padding: 24px;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
text-align: center;
}
.stat-value {
font-size: 32px;
font-weight: bold;
color: #2c3e50;
margin-bottom: 8px;
}
.stat-label {
font-size: 14px;
color: #7f8c8d;
text-transform: uppercase;
letter-spacing: 0.5px;
}
.stat-card.connected .stat-value {
color: #27ae60;
}
.stat-card.disconnected .stat-value {
color: #e74c3c;
}
/* Search section */
.search-section {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(350px, 1fr));
gap: 24px;
margin-bottom: 40px;
}
.search-card {
background: white;
padding: 24px;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.search-card h2 {
margin: 0 0 16px 0;
font-size: 18px;
color: #2c3e50;
}
.search-input-group {
display: flex;
gap: 10px;
}
.search-input-group input {
flex: 1;
padding: 12px 16px;
border: 1px solid #ddd;
border-radius: 6px;
font-size: 14px;
outline: none;
transition: border-color 0.2s;
}
.search-input-group input:focus {
border-color: #3498db;
}
.search-input-group button {
padding: 12px 24px;
background: #3498db;
color: white;
border: none;
border-radius: 6px;
font-size: 14px;
cursor: pointer;
transition: background-color 0.2s;
}
.search-input-group button:hover {
background: #2980b9;
}
.search-input-group button:disabled {
background: #bdc3c7;
cursor: not-allowed;
}
.search-hint {
font-size: 12px;
color: #95a5a6;
margin-top: 8px;
}
/* IP Lookup result */
.ip-result {
margin-top: 16px;
display: none;
}
.ip-result.visible {
display: block;
}
.ip-result-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 8px;
}
.ip-result-header h3 {
margin: 0;
font-size: 14px;
color: #2c3e50;
}
.ip-result-header button {
background: none;
border: none;
color: #e74c3c;
cursor: pointer;
font-size: 12px;
}
.ip-result pre {
background: #2c3e50;
color: #ecf0f1;
padding: 16px;
border-radius: 6px;
overflow-x: auto;
font-size: 12px;
line-height: 1.5;
margin: 0;
max-height: 400px;
overflow-y: auto;
}
.ip-result .error {
background: #fee;
color: #c00;
padding: 12px;
border-radius: 6px;
font-size: 14px;
}
.ip-result .loading {
color: #7f8c8d;
font-style: italic;
}
/* Footer */
.footer {
margin-top: 40px;
padding: 20px;
background: white;
border-radius: 8px;
box-shadow: 0 -2px 10px rgba(0,0,0,0.05);
text-align: center;
color: #7f8c8d;
font-size: 14px;
}
.footer a {
color: #3498db;
text-decoration: none;
}
.footer a:hover {
text-decoration: underline;
}
.footer .separator {
margin: 0 10px;
color: #ddd;
}
</style>
</head>
<body>
<nav class="navbar">
<div class="navbar-brand">
<a href="/">routewatch</a>
<span class="by">by</span>
<a href="https://sneak.berlin" class="author">@sneak</a>
</div>
<div class="navbar-links">
<a href="/" class="active">Home</a>
<a href="/status">Status</a>
</div>
</nav>
<main class="main-content">
<div class="stats-grid">
<div class="stat-card" id="status-card">
<div class="stat-value" id="stat-status">-</div>
<div class="stat-label">Status</div>
</div>
<div class="stat-card">
<div class="stat-value" id="stat-routes">-</div>
<div class="stat-label">Live Routes</div>
</div>
<div class="stat-card">
<div class="stat-value" id="stat-asns">-</div>
<div class="stat-label">Autonomous Systems</div>
</div>
<div class="stat-card">
<div class="stat-value" id="stat-prefixes">-</div>
<div class="stat-label">Prefixes</div>
</div>
<div class="stat-card">
<div class="stat-value" id="stat-peers">-</div>
<div class="stat-label">BGP Peers</div>
</div>
<div class="stat-card">
<div class="stat-value" id="stat-updates">-</div>
<div class="stat-label">Updates/sec</div>
</div>
</div>
<div class="search-section">
<div class="search-card">
<h2>AS Number Lookup</h2>
<form id="asn-form" class="search-input-group">
<input type="text" id="asn-input" placeholder="e.g., 15169 or AS15169" autocomplete="off">
<button type="submit">Lookup</button>
</form>
<p class="search-hint">Enter an AS number to view its announced prefixes and peers</p>
</div>
<div class="search-card">
<h2>AS Name Search</h2>
<form id="asname-form" class="search-input-group">
<input type="text" id="asname-input" placeholder="e.g., Google, Cloudflare" autocomplete="off">
<button type="submit">Search</button>
</form>
<p class="search-hint">Search for autonomous systems by organization name</p>
<div id="asname-results"></div>
</div>
<div class="search-card">
<h2>IP Address Lookup</h2>
<form id="ip-form" class="search-input-group">
<input type="text" id="ip-input" placeholder="e.g., 8.8.8.8 or 2001:4860:4860::8888" autocomplete="off">
<button type="submit">Lookup</button>
</form>
<p class="search-hint">Get routing information for any IP address</p>
<div id="ip-result" class="ip-result">
<div class="ip-result-header">
<h3>Result</h3>
<button type="button" id="ip-result-close">Clear</button>
</div>
<pre id="ip-result-content"></pre>
</div>
</div>
</div>
</main>
<footer class="footer">
<span><a href="{{appRepoURL}}">{{appName}}</a> by <a href="{{appAuthorURL}}">{{appAuthor}}</a></span>
<span class="separator">|</span>
<span>{{appLicense}}</span>
<span class="separator">|</span>
<span><a href="{{appGitCommitURL}}">{{appGitRevision}}</a></span>
</footer>
<script>
function formatNumber(num) {
if (num >= 1000000) {
return (num / 1000000).toFixed(1) + 'M';
} else if (num >= 1000) {
return (num / 1000).toFixed(1) + 'K';
}
return num.toLocaleString();
}
// Fetch and display stats
function updateStats() {
fetch('/api/v1/stats')
.then(response => response.json())
.then(response => {
if (response.status !== 'ok') return;
const data = response.data;
const statusCard = document.getElementById('status-card');
const statusEl = document.getElementById('stat-status');
statusEl.textContent = data.connected ? 'Connected' : 'Disconnected';
statusCard.className = 'stat-card ' + (data.connected ? 'connected' : 'disconnected');
document.getElementById('stat-routes').textContent = formatNumber(data.live_routes);
document.getElementById('stat-asns').textContent = formatNumber(data.asns);
document.getElementById('stat-prefixes').textContent = formatNumber(data.prefixes);
if (data.stream) {
document.getElementById('stat-peers').textContent = formatNumber(data.stream.bgp_peer_count);
}
const totalUpdates = data.ipv4_updates_per_sec + data.ipv6_updates_per_sec;
document.getElementById('stat-updates').textContent = totalUpdates.toFixed(1);
})
.catch(() => {
document.getElementById('stat-status').textContent = 'Error';
document.getElementById('status-card').className = 'stat-card disconnected';
});
}
// ASN lookup
document.getElementById('asn-form').addEventListener('submit', function(e) {
e.preventDefault();
let asn = document.getElementById('asn-input').value.trim();
// Remove 'AS' prefix if present
asn = asn.replace(/^AS/i, '');
if (asn && /^\d+$/.test(asn)) {
window.location.href = '/as/' + asn;
}
});
// AS name search
document.getElementById('asname-form').addEventListener('submit', function(e) {
e.preventDefault();
const query = document.getElementById('asname-input').value.trim();
if (!query) return;
const resultsDiv = document.getElementById('asname-results');
resultsDiv.innerHTML = '<p class="loading" style="color: #7f8c8d; margin-top: 12px;">Searching...</p>';
// Use a simple client-side search against the asinfo data
// For now, redirect to AS page if it looks like an ASN
if (/^\d+$/.test(query)) {
window.location.href = '/as/' + query;
return;
}
// Show a message that server-side search is coming
resultsDiv.innerHTML = '<p style="color: #7f8c8d; margin-top: 12px; font-size: 13px;">AS name search coming soon. For now, try an AS number.</p>';
});
// IP lookup
document.getElementById('ip-form').addEventListener('submit', function(e) {
e.preventDefault();
const ip = document.getElementById('ip-input').value.trim();
if (!ip) return;
const resultDiv = document.getElementById('ip-result');
const contentEl = document.getElementById('ip-result-content');
resultDiv.classList.add('visible');
contentEl.className = '';
contentEl.textContent = 'Loading...';
contentEl.classList.add('loading');
fetch('/ip/' + encodeURIComponent(ip))
.then(response => response.json())
.then(response => {
contentEl.classList.remove('loading');
if (response.status === 'error') {
contentEl.className = 'error';
contentEl.textContent = 'Error: ' + response.error.msg;
} else {
contentEl.className = '';
contentEl.textContent = JSON.stringify(response.data, null, 2);
}
})
.catch(error => {
contentEl.classList.remove('loading');
contentEl.className = 'error';
contentEl.textContent = 'Error: ' + error.message;
});
});
// Close IP result
document.getElementById('ip-result-close').addEventListener('click', function() {
document.getElementById('ip-result').classList.remove('visible');
document.getElementById('ip-input').value = '';
});
// Initial load and refresh stats every 5 seconds
updateStats();
setInterval(updateStats, 5000);
</script>
</body>
</html>

View File

@ -5,76 +5,13 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>{{.Prefix}} - RouteWatch</title>
<style>
* {
box-sizing: border-box;
}
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
margin: 0;
padding: 0;
padding: 20px;
background: #f5f5f5;
color: #333;
}
/* Navbar styles */
.navbar {
background: #2c3e50;
padding: 0 20px;
display: flex;
align-items: center;
justify-content: space-between;
height: 56px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.navbar-brand {
display: flex;
align-items: center;
gap: 8px;
font-size: 18px;
font-weight: 600;
}
.navbar-brand a {
color: white;
text-decoration: none;
}
.navbar-brand a:hover {
color: #ecf0f1;
}
.navbar-brand .by {
font-weight: normal;
color: #95a5a6;
font-size: 14px;
}
.navbar-brand .author {
color: #3498db;
font-weight: normal;
}
.navbar-brand .author:hover {
text-decoration: underline;
}
.navbar-links {
display: flex;
gap: 20px;
}
.navbar-links a {
color: #ecf0f1;
text-decoration: none;
font-size: 14px;
padding: 8px 12px;
border-radius: 4px;
transition: background-color 0.2s;
}
.navbar-links a:hover {
background: rgba(255,255,255,0.1);
}
.navbar-links a.active {
background: rgba(255,255,255,0.15);
}
/* Main content */
.main-content {
padding: 20px;
}
.container {
width: 90%;
max-width: 1600px;
@ -243,20 +180,9 @@
</style>
</head>
<body>
<nav class="navbar">
<div class="navbar-brand">
<a href="/">routewatch</a>
<span class="by">by</span>
<a href="https://sneak.berlin" class="author">@sneak</a>
</div>
<div class="navbar-links">
<a href="/">Home</a>
<a href="/status">Status</a>
</div>
</nav>
<main class="main-content">
<div class="container">
<a href="/status" class="nav-link">← Back to Status</a>
<h1>{{.Prefix}}</h1>
<p class="subtitle">IPv{{.IPVersion}} Prefix{{if .MaskLength}} • /{{.MaskLength}}{{end}}</p>
@ -281,7 +207,7 @@
<div class="origin-list">
{{range .Origins}}
<div class="origin-item">
<a href="/as/{{.ASN}}" class="as-link">AS{{.ASN}}</a>
<a href="/as/{{.Number}}" class="as-link">AS{{.Number}}</a>
{{if .Handle}} ({{.Handle}}){{end}}
<span style="color: #7f8c8d; margin-left: 10px;">{{.PeerCount}} peer{{if ne .PeerCount 1}}s{{end}}</span>
</div>
@ -314,7 +240,7 @@
<a href="/as/{{.OriginASN}}" class="as-link">AS{{.OriginASN}}</a>
</td>
<td class="peer-ip">{{.PeerIP}}</td>
<td class="as-path">{{range $i, $as := .ASPathWithHandle}}{{if $i}} → {{end}}<a href="/as/{{$as.ASN}}" class="as-link">{{if $as.Handle}}{{$as.Handle}}{{else}}AS{{$as.ASN}}{{end}}</a>{{end}}</td>
<td class="as-path">{{range $i, $as := .ASPathWithHandle}}{{if $i}} → {{end}}<a href="/as/{{$as.Number}}" class="as-link">{{if $as.Handle}}{{$as.Handle}}{{else}}AS{{$as.Number}}{{end}}</a>{{end}}</td>
<td class="peer-ip">{{.NextHop}}</td>
<td>{{.LastUpdated.Format "2006-01-02 15:04:05"}}</td>
<td class="age">{{.LastUpdated | timeSince}}</td>
@ -329,6 +255,5 @@
</div>
{{end}}
</div>
</main>
</body>
</html>

View File

@ -5,76 +5,12 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Prefixes with /{{ .MaskLength }} - RouteWatch</title>
<style>
* {
box-sizing: border-box;
}
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
margin: 0;
padding: 0;
background: #f5f5f5;
}
/* Navbar styles */
.navbar {
background: #2c3e50;
padding: 0 20px;
display: flex;
align-items: center;
justify-content: space-between;
height: 56px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.navbar-brand {
display: flex;
align-items: center;
gap: 8px;
font-size: 18px;
font-weight: 600;
}
.navbar-brand a {
color: white;
text-decoration: none;
}
.navbar-brand a:hover {
color: #ecf0f1;
}
.navbar-brand .by {
font-weight: normal;
color: #95a5a6;
font-size: 14px;
}
.navbar-brand .author {
color: #3498db;
font-weight: normal;
}
.navbar-brand .author:hover {
text-decoration: underline;
}
.navbar-links {
display: flex;
gap: 20px;
}
.navbar-links a {
color: #ecf0f1;
text-decoration: none;
font-size: 14px;
padding: 8px 12px;
border-radius: 4px;
transition: background-color 0.2s;
}
.navbar-links a:hover {
background: rgba(255,255,255,0.1);
}
.navbar-links a.active {
background: rgba(255,255,255,0.15);
}
/* Main content */
.main-content {
max-width: 1200px;
margin: 0 auto;
padding: 20px;
background: #f5f5f5;
}
h1 {
color: #333;
@ -142,19 +78,7 @@
</style>
</head>
<body>
<nav class="navbar">
<div class="navbar-brand">
<a href="/">routewatch</a>
<span class="by">by</span>
<a href="https://sneak.berlin" class="author">@sneak</a>
</div>
<div class="navbar-links">
<a href="/">Home</a>
<a href="/status">Status</a>
</div>
</nav>
<main class="main-content">
<a href="/status" class="back-link">← Back to Status</a>
<h1>IPv{{ .IPVersion }} Prefixes with /{{ .MaskLength }}</h1>
<p class="subtitle">Showing {{ .Count }} randomly selected prefixes</p>
@ -169,7 +93,7 @@
<tbody>
{{ range .Prefixes }}
<tr>
<td><a href="{{ .Prefix | prefixURL }}" class="prefix-link">{{ .Prefix }}</a></td>
<td><a href="/prefix/{{ .Prefix | urlEncode }}" class="prefix-link">{{ .Prefix }}</a></td>
<td class="age">{{ .Age }}</td>
<td>
<a href="/as/{{ .OriginASN }}" class="as-link">
@ -180,6 +104,5 @@
{{ end }}
</tbody>
</table>
</main>
</body>
</html>

View File

@ -5,76 +5,12 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>RouteWatch Status</title>
<style>
* {
box-sizing: border-box;
}
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
margin: 0;
padding: 0;
background: #f5f5f5;
}
/* Navbar styles */
.navbar {
background: #2c3e50;
padding: 0 20px;
display: flex;
align-items: center;
justify-content: space-between;
height: 56px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.navbar-brand {
display: flex;
align-items: center;
gap: 8px;
font-size: 18px;
font-weight: 600;
}
.navbar-brand a {
color: white;
text-decoration: none;
}
.navbar-brand a:hover {
color: #ecf0f1;
}
.navbar-brand .by {
font-weight: normal;
color: #95a5a6;
font-size: 14px;
}
.navbar-brand .author {
color: #3498db;
font-weight: normal;
}
.navbar-brand .author:hover {
text-decoration: underline;
}
.navbar-links {
display: flex;
gap: 20px;
}
.navbar-links a {
color: #ecf0f1;
text-decoration: none;
font-size: 14px;
padding: 8px 12px;
border-radius: 4px;
transition: background-color 0.2s;
}
.navbar-links a:hover {
background: rgba(255,255,255,0.1);
}
.navbar-links a.active {
background: rgba(255,255,255,0.15);
}
/* Main content */
.main-content {
max-width: 1200px;
margin: 0 auto;
padding: 20px;
background: #f5f5f5;
}
h1 {
color: #333;
@ -136,43 +72,10 @@
border-radius: 4px;
margin-top: 20px;
}
.footer {
margin-top: 40px;
padding: 20px;
background: white;
border-radius: 8px;
box-shadow: 0 -2px 10px rgba(0,0,0,0.1);
text-align: center;
color: #666;
font-size: 14px;
}
.footer a {
color: #0066cc;
text-decoration: none;
}
.footer a:hover {
text-decoration: underline;
}
.footer .separator {
margin: 0 10px;
color: #ccc;
}
</style>
</head>
<body>
<nav class="navbar">
<div class="navbar-brand">
<a href="/">routewatch</a>
<span class="by">by</span>
<a href="https://sneak.berlin" class="author">@sneak</a>
</div>
<div class="navbar-links">
<a href="/">Home</a>
<a href="/status" class="active">Status</a>
</div>
</nav>
<main class="main-content">
<h1>RouteWatch Status</h1>
<div id="error" class="error" style="display: none;"></div>
<div class="status-grid">
<div class="status-card">
@ -201,18 +104,6 @@
<div class="status-card">
<h2>Stream Statistics</h2>
<div class="metric">
<span class="metric-label">Connection Duration</span>
<span class="metric-value" id="connection_duration">-</span>
</div>
<div class="metric">
<span class="metric-label">Reconnections</span>
<span class="metric-value" id="reconnect_count">-</span>
</div>
<div class="metric">
<span class="metric-label">BGP Peers</span>
<span class="metric-value" id="bgp_peer_count">-</span>
</div>
<div class="metric">
<span class="metric-label">Total Messages</span>
<span class="metric-value" id="total_messages">-</span>
@ -221,49 +112,13 @@
<span class="metric-label">Messages/sec</span>
<span class="metric-value" id="messages_per_sec">-</span>
</div>
<div class="metric">
<span class="metric-label">Announcements</span>
<span class="metric-value" id="announcements">-</span>
</div>
<div class="metric">
<span class="metric-label">Withdrawals</span>
<span class="metric-value" id="withdrawals">-</span>
</div>
<div class="metric">
<span class="metric-label">Route Churn/sec</span>
<span class="metric-value" id="route_churn_per_sec">-</span>
</div>
<div class="metric">
<span class="metric-label">Total Data</span>
<span class="metric-value" id="total_wire_bytes">-</span>
<span class="metric-value" id="total_bytes">-</span>
</div>
<div class="metric">
<span class="metric-label">Throughput</span>
<span class="metric-value" id="wire_mbits_per_sec">-</span>
</div>
</div>
<div class="status-card">
<h2>GC Statistics</h2>
<div class="metric">
<span class="metric-label">GC Runs</span>
<span class="metric-value" id="gc_num">-</span>
</div>
<div class="metric">
<span class="metric-label">Total Pause</span>
<span class="metric-value" id="gc_total_pause">-</span>
</div>
<div class="metric">
<span class="metric-label">Last Pause</span>
<span class="metric-value" id="gc_last_pause">-</span>
</div>
<div class="metric">
<span class="metric-label">Heap Alloc</span>
<span class="metric-value" id="gc_heap_alloc">-</span>
</div>
<div class="metric">
<span class="metric-label">Heap Sys</span>
<span class="metric-value" id="gc_heap_sys">-</span>
<span class="metric-value" id="mbits_per_sec">-</span>
</div>
</div>
@ -321,46 +176,6 @@
<span class="metric-label">IPv6 Updates/sec</span>
<span class="metric-value" id="ipv6_updates_per_sec">-</span>
</div>
<div class="metric">
<span class="metric-label">Oldest Route</span>
<span class="metric-value" id="oldest_route">-</span>
</div>
<div class="metric">
<span class="metric-label">Newest Route</span>
<span class="metric-value" id="newest_route">-</span>
</div>
</div>
<div class="status-card">
<h2>WHOIS Fetcher</h2>
<div class="metric">
<span class="metric-label">Fresh ASNs</span>
<span class="metric-value" id="whois_fresh">-</span>
</div>
<div class="metric">
<span class="metric-label">Stale ASNs</span>
<span class="metric-value" id="whois_stale">-</span>
</div>
<div class="metric">
<span class="metric-label">Never Fetched</span>
<span class="metric-value" id="whois_never">-</span>
</div>
<div class="metric">
<span class="metric-label">Fresh %</span>
<span class="metric-value" id="whois_percent">-</span>
</div>
<div class="metric">
<span class="metric-label">Successes (1h)</span>
<span class="metric-value" id="whois_successes">-</span>
</div>
<div class="metric">
<span class="metric-label">Errors (1h)</span>
<span class="metric-value" id="whois_errors">-</span>
</div>
<div class="metric">
<span class="metric-label">Current Interval</span>
<span class="metric-value" id="whois_interval">-</span>
</div>
</div>
</div>
@ -409,22 +224,6 @@
}
}
function formatRelativeTime(isoString) {
if (!isoString) return '-';
const date = new Date(isoString);
const now = new Date();
const diffMs = now - date;
const diffSec = Math.floor(diffMs / 1000);
const diffMin = Math.floor(diffSec / 60);
const diffHour = Math.floor(diffMin / 60);
const diffDay = Math.floor(diffHour / 24);
if (diffSec < 60) return diffSec + 's ago';
if (diffMin < 60) return diffMin + 'm ago';
if (diffHour < 24) return diffHour + 'h ' + (diffMin % 60) + 'm ago';
return diffDay + 'd ' + (diffHour % 24) + 'h ago';
}
function updatePrefixDistribution(elementId, distribution) {
const container = document.getElementById(elementId);
container.innerHTML = '';
@ -437,16 +236,12 @@
// Sort by mask length
distribution.sort((a, b) => a.mask_length - b.mask_length);
// Determine the URL path based on whether this is IPv4 or IPv6
const isIPv6 = elementId.includes('ipv6');
const urlPath = isIPv6 ? '/prefixlength6/' : '/prefixlength/';
distribution.forEach(item => {
const metric = document.createElement('div');
metric.className = 'metric';
metric.innerHTML = `
<span class="metric-label">/${item.mask_length}</span>
<a href="${urlPath}${item.mask_length}" class="metric-value metric-link">${formatNumber(item.count)}</a>
<a href="/prefixlength/${item.mask_length}" class="metric-value metric-link">${formatNumber(item.count)}</a>
`;
container.appendChild(metric);
});
@ -469,10 +264,6 @@
<span class="metric-label">Queue</span>
<span class="metric-value">${handler.queue_length}/${handler.queue_capacity}</span>
</div>
<div class="metric">
<span class="metric-label">High Water Mark</span>
<span class="metric-value">${handler.queue_high_water_mark}/${handler.queue_capacity} (${Math.round(handler.queue_high_water_mark * 100 / handler.queue_capacity)}%)</span>
</div>
<div class="metric">
<span class="metric-label">Processed</span>
<span class="metric-value">${formatNumber(handler.processed_count)}</span>
@ -495,60 +286,6 @@
});
}
function resetAllFields() {
// Reset all metric fields to '-'
document.getElementById('connected').textContent = '-';
document.getElementById('connected').className = 'metric-value';
document.getElementById('uptime').textContent = '-';
document.getElementById('go_version').textContent = '-';
document.getElementById('goroutines').textContent = '-';
document.getElementById('memory_usage').textContent = '-';
document.getElementById('connection_duration').textContent = '-';
document.getElementById('reconnect_count').textContent = '-';
document.getElementById('bgp_peer_count').textContent = '-';
document.getElementById('total_messages').textContent = '-';
document.getElementById('messages_per_sec').textContent = '-';
document.getElementById('announcements').textContent = '-';
document.getElementById('withdrawals').textContent = '-';
document.getElementById('route_churn_per_sec').textContent = '-';
document.getElementById('total_wire_bytes').textContent = '-';
document.getElementById('wire_mbits_per_sec').textContent = '-';
document.getElementById('gc_num').textContent = '-';
document.getElementById('gc_total_pause').textContent = '-';
document.getElementById('gc_last_pause').textContent = '-';
document.getElementById('gc_heap_alloc').textContent = '-';
document.getElementById('gc_heap_sys').textContent = '-';
document.getElementById('asns').textContent = '-';
document.getElementById('prefixes').textContent = '-';
document.getElementById('ipv4_prefixes').textContent = '-';
document.getElementById('ipv6_prefixes').textContent = '-';
document.getElementById('peerings').textContent = '-';
document.getElementById('peers').textContent = '-';
document.getElementById('database_size').textContent = '-';
document.getElementById('live_routes').textContent = '-';
document.getElementById('ipv4_routes').textContent = '-';
document.getElementById('ipv6_routes').textContent = '-';
document.getElementById('ipv4_updates_per_sec').textContent = '-';
document.getElementById('ipv6_updates_per_sec').textContent = '-';
document.getElementById('oldest_route').textContent = '-';
document.getElementById('newest_route').textContent = '-';
document.getElementById('whois_fresh').textContent = '-';
document.getElementById('whois_stale').textContent = '-';
document.getElementById('whois_never').textContent = '-';
document.getElementById('whois_percent').textContent = '-';
document.getElementById('whois_successes').textContent = '-';
document.getElementById('whois_errors').textContent = '-';
document.getElementById('whois_errors').className = 'metric-value';
document.getElementById('whois_interval').textContent = '-';
// Clear handler stats
document.getElementById('handler-stats-container').innerHTML = '';
// Clear prefix distributions
document.getElementById('ipv4-prefix-distribution').innerHTML = '<div class="metric"><span class="metric-label">No data</span></div>';
document.getElementById('ipv6-prefix-distribution').innerHTML = '<div class="metric"><span class="metric-label">No data</span></div>';
}
function updateStatus() {
fetch('/api/v1/stats')
.then(response => response.json())
@ -557,7 +294,6 @@
if (response.status === 'error') {
document.getElementById('error').textContent = 'Error: ' + response.error.msg;
document.getElementById('error').style.display = 'block';
resetAllFields();
return;
}
@ -574,12 +310,10 @@
document.getElementById('go_version').textContent = data.go_version;
document.getElementById('goroutines').textContent = formatNumber(data.goroutines);
document.getElementById('memory_usage').textContent = data.memory_usage;
document.getElementById('connection_duration').textContent = data.connection_duration;
document.getElementById('reconnect_count').textContent = formatNumber(data.reconnect_count);
document.getElementById('total_messages').textContent = formatNumber(data.total_messages);
document.getElementById('messages_per_sec').textContent = data.messages_per_sec.toFixed(1);
document.getElementById('total_wire_bytes').textContent = formatBytes(data.total_wire_bytes);
document.getElementById('wire_mbits_per_sec').textContent = data.wire_mbits_per_sec.toFixed(2) + ' Mbps';
document.getElementById('total_bytes').textContent = formatBytes(data.total_bytes);
document.getElementById('mbits_per_sec').textContent = data.mbits_per_sec.toFixed(2) + ' Mbps';
document.getElementById('asns').textContent = formatNumber(data.asns);
document.getElementById('prefixes').textContent = formatNumber(data.prefixes);
document.getElementById('ipv4_prefixes').textContent = formatNumber(data.ipv4_prefixes);
@ -592,38 +326,6 @@
document.getElementById('ipv6_routes').textContent = formatNumber(data.ipv6_routes);
document.getElementById('ipv4_updates_per_sec').textContent = data.ipv4_updates_per_sec.toFixed(1);
document.getElementById('ipv6_updates_per_sec').textContent = data.ipv6_updates_per_sec.toFixed(1);
document.getElementById('oldest_route').textContent = formatRelativeTime(data.oldest_route);
document.getElementById('newest_route').textContent = formatRelativeTime(data.newest_route);
// Update stream stats
if (data.stream) {
document.getElementById('bgp_peer_count').textContent = formatNumber(data.stream.bgp_peer_count);
document.getElementById('announcements').textContent = formatNumber(data.stream.announcements);
document.getElementById('withdrawals').textContent = formatNumber(data.stream.withdrawals);
document.getElementById('route_churn_per_sec').textContent = data.stream.route_churn_per_sec.toFixed(1);
}
// Update GC stats
if (data.gc) {
document.getElementById('gc_num').textContent = formatNumber(data.gc.num_gc);
document.getElementById('gc_total_pause').textContent = data.gc.total_pause_ms + ' ms';
document.getElementById('gc_last_pause').textContent = data.gc.last_pause_ms.toFixed(3) + ' ms';
document.getElementById('gc_heap_alloc').textContent = formatBytes(data.gc.heap_alloc_bytes);
document.getElementById('gc_heap_sys').textContent = formatBytes(data.gc.heap_sys_bytes);
}
// Update WHOIS stats
if (data.whois_stats) {
document.getElementById('whois_fresh').textContent = formatNumber(data.whois_stats.fresh_asns);
document.getElementById('whois_stale').textContent = formatNumber(data.whois_stats.stale_asns);
document.getElementById('whois_never').textContent = formatNumber(data.whois_stats.never_fetched);
document.getElementById('whois_percent').textContent = data.whois_stats.fresh_percent.toFixed(1) + '%';
document.getElementById('whois_successes').textContent = formatNumber(data.whois_stats.successes_last_hour);
const errorsEl = document.getElementById('whois_errors');
errorsEl.textContent = formatNumber(data.whois_stats.errors_last_hour);
errorsEl.className = 'metric-value' + (data.whois_stats.errors_last_hour > 0 ? ' disconnected' : '');
document.getElementById('whois_interval').textContent = data.whois_stats.current_interval;
}
// Update handler stats
updateHandlerStats(data.handler_stats || []);
@ -638,22 +340,12 @@
.catch(error => {
document.getElementById('error').textContent = 'Error fetching status: ' + error;
document.getElementById('error').style.display = 'block';
resetAllFields();
});
}
// Update immediately and then every 2 seconds
// Update immediately and then every 500ms
updateStatus();
setInterval(updateStatus, 2000);
setInterval(updateStatus, 500);
</script>
</main>
<footer class="footer">
<span><a href="{{appRepoURL}}">{{appName}}</a> by <a href="{{appAuthorURL}}">{{appAuthor}}</a></span>
<span class="separator">|</span>
<span>{{appLicense}}</span>
<span class="separator">|</span>
<span><a href="{{appGitCommitURL}}">{{appGitRevision}}</a></span>
</footer>
</body>
</html>

View File

@ -5,16 +5,10 @@ import (
_ "embed"
"html/template"
"net/url"
"strings"
"sync"
"time"
"git.eeqj.de/sneak/routewatch/internal/version"
)
//go:embed index.html
var indexHTML string
//go:embed status.html
var statusHTML string
@ -29,15 +23,9 @@ var prefixLengthHTML string
// Templates contains all parsed templates
type Templates struct {
// Index is the template for the home page
Index *template.Template
// Status is the template for the main status page
Status *template.Template
// ASDetail is the template for displaying AS (Autonomous System) details
ASDetail *template.Template
// PrefixDetail is the template for displaying prefix details
PrefixDetail *template.Template
// PrefixLength is the template for displaying prefixes by length
PrefixLength *template.Template
}
@ -51,7 +39,6 @@ var (
const (
hoursPerDay = 24
daysPerMonth = 30
cidrPartCount = 2 // A CIDR has two parts: prefix and length
)
// timeSince returns a human-readable duration since the given time
@ -87,20 +74,6 @@ func timeSince(t time.Time) string {
return t.Format("2006-01-02")
}
// prefixURL generates a URL path for a prefix in CIDR notation.
// Takes a prefix like "192.168.1.0/24" and returns "/prefix/192.168.1.0/24"
// with the prefix part URL-encoded to handle IPv6 colons.
func prefixURL(cidr string) string {
// Split CIDR into prefix and length
parts := strings.SplitN(cidr, "/", cidrPartCount)
if len(parts) != cidrPartCount {
// Fallback if no slash found
return "/prefix/" + url.PathEscape(cidr) + "/0"
}
return "/prefix/" + url.PathEscape(parts[0]) + "/" + parts[1]
}
// initTemplates parses all embedded templates
func initTemplates() {
var err error
@ -111,24 +84,10 @@ func initTemplates() {
funcs := template.FuncMap{
"timeSince": timeSince,
"urlEncode": url.QueryEscape,
"prefixURL": prefixURL,
"appName": func() string { return version.Name },
"appAuthor": func() string { return version.Author },
"appAuthorURL": func() string { return version.AuthorURL },
"appLicense": func() string { return version.License },
"appRepoURL": func() string { return version.RepoURL },
"appGitRevision": func() string { return version.GitRevisionShort },
"appGitCommitURL": func() string { return version.CommitURL() },
}
// Parse index template
defaultTemplates.Index, err = template.New("index").Funcs(funcs).Parse(indexHTML)
if err != nil {
panic("failed to parse index template: " + err.Error())
}
// Parse status template
defaultTemplates.Status, err = template.New("status").Funcs(funcs).Parse(statusHTML)
defaultTemplates.Status, err = template.New("status").Parse(statusHTML)
if err != nil {
panic("failed to parse status template: " + err.Error())
}
@ -159,11 +118,6 @@ func Get() *Templates {
return defaultTemplates
}
// IndexTemplate returns the parsed index template
func IndexTemplate() *template.Template {
return Get().Index
}
// StatusTemplate returns the parsed status template
func StatusTemplate() *template.Template {
return Get().Status

View File

@ -1,34 +0,0 @@
// Package version provides build version information
package version
// Build-time variables set via ldflags
//
//nolint:gochecknoglobals // These must be variables to allow ldflags injection at build time
var (
// GitRevision is the git commit hash
GitRevision = "unknown"
// GitRevisionShort is the short git commit hash (7 chars)
GitRevisionShort = "unknown"
)
const (
// Name is the program name
Name = "routewatch"
// Author is the program author
Author = "@sneak"
// AuthorURL is the author's website
AuthorURL = "https://sneak.berlin"
// License is the program license
License = "WTFPL"
// RepoURL is the git repository URL
RepoURL = "https://git.eeqj.de/sneak/routewatch"
)
// CommitURL returns the URL to view the current commit
func CommitURL() string {
if GitRevision == "unknown" {
return RepoURL
}
return RepoURL + "/commit/" + GitRevision
}

View File

@ -1,347 +0,0 @@
// Package whois provides WHOIS lookup functionality for ASN information.
package whois
import (
"bufio"
"context"
"fmt"
"net"
"regexp"
"strings"
"time"
)
// Timeout constants for WHOIS queries.
const (
dialTimeout = 10 * time.Second
readTimeout = 30 * time.Second
writeTimeout = 5 * time.Second
)
// Parsing constants.
const (
keyValueParts = 2 // Expected parts when splitting "key: value"
lacnicDateFormatLen = 8 // Length of YYYYMMDD date format
)
// WHOIS server addresses.
const (
whoisServerIANA = "whois.iana.org:43"
whoisServerARIN = "whois.arin.net:43"
whoisServerRIPE = "whois.ripe.net:43"
whoisServerAPNIC = "whois.apnic.net:43"
whoisServerLACNIC = "whois.lacnic.net:43"
whoisServerAFRINIC = "whois.afrinic.net:43"
)
// RIR identifiers.
const (
RIRARIN = "ARIN"
RIRRIPE = "RIPE"
RIRAPNIC = "APNIC"
RIRLACNIC = "LACNIC"
RIRAFRNIC = "AFRINIC"
)
// ASNInfo contains parsed WHOIS information for an ASN.
type ASNInfo struct {
ASN int
ASName string
OrgName string
OrgID string
Address string
CountryCode string
AbuseEmail string
AbusePhone string
TechEmail string
TechPhone string
RIR string
RegDate *time.Time
LastMod *time.Time
RawResponse string
}
// Client performs WHOIS lookups for ASNs.
type Client struct {
// Dialer for creating connections (can be overridden for testing)
dialer *net.Dialer
}
// NewClient creates a new WHOIS client.
func NewClient() *Client {
return &Client{
dialer: &net.Dialer{
Timeout: dialTimeout,
},
}
}
// LookupASN queries WHOIS for the given ASN and returns parsed information.
func (c *Client) LookupASN(ctx context.Context, asn int) (*ASNInfo, error) {
// Query IANA first to find the authoritative RIR
query := fmt.Sprintf("AS%d", asn)
ianaResp, err := c.query(ctx, whoisServerIANA, query)
if err != nil {
return nil, fmt.Errorf("IANA query failed: %w", err)
}
// Determine RIR from IANA response
rir, whoisServer := c.parseIANAReferral(ianaResp)
if whoisServer == "" {
// No referral, try to parse what we have
return c.parseResponse(asn, rir, ianaResp), nil
}
// Query the authoritative RIR
rirResp, err := c.query(ctx, whoisServer, query)
if err != nil {
// Return partial data from IANA if RIR query fails
info := c.parseResponse(asn, rir, ianaResp)
info.RawResponse = ianaResp + "\n--- RIR query failed: " + err.Error() + " ---\n"
return info, nil
}
// Combine responses and parse
fullResponse := ianaResp + "\n" + rirResp
info := c.parseResponse(asn, rir, fullResponse)
info.RawResponse = fullResponse
return info, nil
}
// query performs a raw WHOIS query to the specified server.
func (c *Client) query(ctx context.Context, server, query string) (string, error) {
conn, err := c.dialer.DialContext(ctx, "tcp", server)
if err != nil {
return "", fmt.Errorf("dial %s: %w", server, err)
}
defer func() { _ = conn.Close() }()
// Set deadlines
if err := conn.SetWriteDeadline(time.Now().Add(writeTimeout)); err != nil {
return "", fmt.Errorf("set write deadline: %w", err)
}
// Send query
if _, err := fmt.Fprintf(conn, "%s\r\n", query); err != nil {
return "", fmt.Errorf("write query: %w", err)
}
// Read response
if err := conn.SetReadDeadline(time.Now().Add(readTimeout)); err != nil {
return "", fmt.Errorf("set read deadline: %w", err)
}
var sb strings.Builder
scanner := bufio.NewScanner(conn)
for scanner.Scan() {
sb.WriteString(scanner.Text())
sb.WriteString("\n")
}
if err := scanner.Err(); err != nil {
return sb.String(), fmt.Errorf("read response: %w", err)
}
return sb.String(), nil
}
// parseIANAReferral extracts the RIR and WHOIS server from an IANA response.
func (c *Client) parseIANAReferral(response string) (rir, whoisServer string) {
lines := strings.Split(response, "\n")
for _, line := range lines {
line = strings.TrimSpace(line)
// Look for "refer:" line
if strings.HasPrefix(strings.ToLower(line), "refer:") {
server := strings.TrimSpace(strings.TrimPrefix(line, "refer:"))
server = strings.TrimSpace(strings.TrimPrefix(server, "Refer:"))
switch {
case strings.Contains(server, "arin"):
return RIRARIN, whoisServerARIN
case strings.Contains(server, "ripe"):
return RIRRIPE, whoisServerRIPE
case strings.Contains(server, "apnic"):
return RIRAPNIC, whoisServerAPNIC
case strings.Contains(server, "lacnic"):
return RIRLACNIC, whoisServerLACNIC
case strings.Contains(server, "afrinic"):
return RIRAFRNIC, whoisServerAFRINIC
default:
// Unknown server, add port if missing
if !strings.Contains(server, ":") {
server += ":43"
}
return "", server
}
}
// Also check organisation line for RIR hints
if strings.HasPrefix(strings.ToLower(line), "organisation:") {
org := strings.ToLower(line)
switch {
case strings.Contains(org, "arin"):
rir = RIRARIN
case strings.Contains(org, "ripe"):
rir = RIRRIPE
case strings.Contains(org, "apnic"):
rir = RIRAPNIC
case strings.Contains(org, "lacnic"):
rir = RIRLACNIC
case strings.Contains(org, "afrinic"):
rir = RIRAFRNIC
}
}
}
return rir, ""
}
// parseResponse extracts ASN information from a WHOIS response.
func (c *Client) parseResponse(asn int, rir, response string) *ASNInfo {
info := &ASNInfo{
ASN: asn,
RIR: rir,
RawResponse: response,
}
lines := strings.Split(response, "\n")
var addressLines []string
for _, line := range lines {
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "%") || strings.HasPrefix(line, "#") {
continue
}
// Split on first colon
parts := strings.SplitN(line, ":", keyValueParts)
if len(parts) != keyValueParts {
continue
}
key := strings.TrimSpace(strings.ToLower(parts[0]))
value := strings.TrimSpace(parts[1])
if value == "" {
continue
}
switch key {
// AS Name (varies by RIR)
case "asname", "as-name":
if info.ASName == "" {
info.ASName = value
}
// Organization
case "orgname", "org-name", "owner":
if info.OrgName == "" {
info.OrgName = value
}
case "orgid", "org-id", "org":
if info.OrgID == "" {
info.OrgID = value
}
// Address (collect multiple lines)
case "address":
addressLines = append(addressLines, value)
// Country
case "country":
if info.CountryCode == "" && len(value) == 2 {
info.CountryCode = strings.ToUpper(value)
}
// Abuse contact
case "orgabuseemail", "abuse-mailbox":
if info.AbuseEmail == "" {
info.AbuseEmail = value
}
case "orgabusephone":
if info.AbusePhone == "" {
info.AbusePhone = value
}
// Tech contact
case "orgtechemail":
if info.TechEmail == "" {
info.TechEmail = value
}
case "orgtechphone":
if info.TechPhone == "" {
info.TechPhone = value
}
// Registration dates
case "regdate", "created":
if info.RegDate == nil {
info.RegDate = c.parseDate(value)
}
case "updated", "last-modified", "changed":
if info.LastMod == nil {
info.LastMod = c.parseDate(value)
}
}
}
// Combine address lines
if len(addressLines) > 0 {
info.Address = strings.Join(addressLines, "\n")
}
// Extract abuse email from comment lines (common in ARIN responses)
if info.AbuseEmail == "" {
info.AbuseEmail = c.extractAbuseEmail(response)
}
return info
}
// parseDate attempts to parse various date formats used in WHOIS responses.
func (c *Client) parseDate(value string) *time.Time {
// Common formats
formats := []string{
"2006-01-02",
"2006-01-02T15:04:05Z",
"2006-01-02T15:04:05-07:00",
"20060102",
"02-Jan-2006",
}
// Clean up value
value = strings.TrimSpace(value)
// Handle "YYYYMMDD" format from LACNIC
if len(value) == lacnicDateFormatLen {
if _, err := time.Parse("20060102", value); err == nil {
t, _ := time.Parse("20060102", value)
return &t
}
}
for _, format := range formats {
if t, err := time.Parse(format, value); err == nil {
return &t
}
}
return nil
}
// extractAbuseEmail extracts abuse email from response using regex.
func (c *Client) extractAbuseEmail(response string) string {
// Look for "Abuse contact for 'AS...' is 'email@domain'"
re := regexp.MustCompile(`[Aa]buse contact.*?is\s+['"]?([^\s'"]+@[^\s'"]+)['"]?`)
if matches := re.FindStringSubmatch(response); len(matches) > 1 {
return matches[1]
}
return ""
}

178395
log.txt Normal file

File diff suppressed because it is too large Load Diff