Initial commit: RouteWatch BGP stream monitor

- Connects to RIPE RIS Live stream to receive real-time BGP updates
- Stores BGP data in SQLite database:
  - ASNs with first/last seen timestamps
  - Prefixes with IPv4/IPv6 classification
  - BGP announcements and withdrawals
  - AS-to-AS peering relationships from AS paths
  - Live routing table tracking active routes
- HTTP server with statistics endpoints
- Metrics tracking with go-metrics
- Custom JSON unmarshaling to handle nested AS sets in paths
- Dependency injection with uber/fx
- Pure Go implementation (no CGO)
- Includes streamdumper utility for debugging raw messages
This commit is contained in:
Jeffrey Paul 2025-07-27 21:18:57 +02:00
commit 92f7527cc5
24 changed files with 3587 additions and 0 deletions

28
.gitignore vendored Normal file
View File

@ -0,0 +1,28 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
/bin/
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
# Go workspace file
go.work
go.work.sum
# env file
.env
# Database files
*.db
*.db-journal
*.db-wal

92
.golangci.yml Normal file
View File

@ -0,0 +1,92 @@
version: "2"
run:
go: "1.24"
tests: false
linters:
enable:
# Additional linters requested
- testifylint # Checks usage of github.com/stretchr/testify
- usetesting # usetesting is an analyzer that detects using os.Setenv instead of t.Setenv since Go 1.17
# - tagliatelle # Disabled: we need snake_case for external API compatibility
- nlreturn # nlreturn checks for a new line before return and branch statements
- nilnil # Checks that there is no simultaneous return of nil error and an invalid value
- nestif # Reports deeply nested if statements
- mnd # An analyzer to detect magic numbers
- lll # Reports long lines
- intrange # intrange is a linter to find places where for loops could make use of an integer range
- gochecknoglobals # Check that no global variables exist
# Default/existing linters that are commonly useful
- govet
- errcheck
- staticcheck
- unused
- ineffassign
- misspell
- revive
- gosec
- unconvert
- unparam
linters-settings:
lll:
line-length: 120
nestif:
min-complexity: 4
nlreturn:
block-size: 2
revive:
rules:
- name: var-naming
arguments:
- []
- []
- "upperCaseConst=true"
tagliatelle:
case:
rules:
json: snake
yaml: snake
xml: snake
bson: snake
testifylint:
enable-all: true
usetesting: {}
issues:
max-issues-per-linter: 0
max-same-issues: 0
exclude-rules:
# Exclude unused parameter warnings for cobra command signatures
- text: "parameter '(args|cmd)' seems to be unused"
linters:
- revive
# Allow ALL_CAPS constant names
- text: "don't use ALL_CAPS in Go names"
linters:
- revive
# Allow snake_case JSON tags for external API compatibility
- path: "internal/types/ris.go"
linters:
- tagliatelle
# Allow snake_case JSON tags for database models
- path: "internal/database/models.go"
linters:
- tagliatelle
# Allow generic package name for types that define data structures
- path: "internal/types/"
text: "avoid meaningless package names"
linters:
- revive

97
CLAUDE.md Normal file
View File

@ -0,0 +1,97 @@
# IMPORTANT RULES
* Claude is an inanimate tool. The spam that Claude attempts to insert into
commit messages (which it erroneously refers to as "attribution") is not
attribution, as I am the sole author of code created using Claude. It is
corporate advertising for Anthropic and is therefore completely
unacceptable in commit messages.
* Tests should always be run before committing code. No commits should be
made that do not pass tests.
* Code should always be formatted before committing. Do not commit
unformatted code.
* Code should always be linted and linter errors fixed before committing.
NEVER commit code that does not pass the linter. DO NOT modify the linter
config unless specifically instructed.
* The test suite is fast and local. When running tests, NEVER run
individual parts of the test suite, always run the whole thing by running
"make test".
* Do not stop working on a task until you have reached the definition of
done provided to you in the initial instruction. Don't do part or most of
the work, do all of the work until the criteria for done are met.
* When you complete each task, if the tests are passing and the code is
formatted and there are no linter errors, always commit and push your
work. Use a good commit message and don't mention any author or co-author
attribution.
* Do not create additional files in the root directory of the project
without asking permission first. Configuration files, documentation, and
build files are acceptable in the root, but source code and other files
should be organized in appropriate subdirectories.
* Do not use bare strings or numbers in code, especially if they appear
anywhere more than once. Always define a constant (usually at the top of
the file) and give it a descriptive name, then use that constant in the
code instead of the bare string or number.
* If you are fixing a bug, write a test first that reproduces the bug and
fails, and then fix the bug in the code, using the test to verify that the
fix worked.
* When implementing new features, be aware of potential side-effects (such
as state files on disk, data in the database, etc.) and ensure that it is
possible to mock or stub these side-effects in tests when designing an
API.
* When dealing with dates and times or timestamps, always use, display, and
store UTC. Set the local timezone to UTC on startup. If the user needs
to see the time in a different timezone, store the user's timezone in a
separate field and convert the UTC time to the user's timezone when
displaying it. For internal use and internal applications and
administrative purposes, always display UTC.
* When implementing programs, put the main.go in
./cmd/<program_name>/main.go and put the program's code in
./internal/<program_name>/. This allows for multiple programs to be
implemented in the same repository without cluttering the root directory.
main.go should simply import and call <program_name>.CLIEntry(). The
full implementation should be in ./internal/<program_name>/.
* When you are instructed to make the tests pass, DO NOT delete tests, skip
tests, or change the tests specifically to make them pass (unless there
is a bug in the test). This is cheating, and it is bad. You should only
be modifying the test if it is incorrect or if the test is no longer
relevant. In almost all cases, you should be fixing the code that is
being tested, or updating the tests to match a refactored implementation.
* Always write a `Makefile` with the default target being `test`, and with a
`fmt` target that formats the code. The `test` target should run all
tests in the project, and the `fmt` target should format the code. `test`
should also have a prerequisite target `lint` that should run any linters
that are configured for the project.
* After each completed bugfix or feature, the code must be committed. Do
all of the pre-commit checks (test, lint, fmt) before committing, of
course. After each commit, push to the remote.
* Always write tests, even if they are extremely simple and just check for
correct syntax (ability to compile/import). If you are writing a new
feature, write a test for it. You don't need to target complete coverage,
but you should at least test any new functionality you add.
* Always use structured logging. Log any relevant state/context with the
messages (but do not log secrets). If stdout is not a terminal, output
the structured logs in jsonl format. Use go's log/slog.
* You do not need to summarize your changes in the chat after making them.
Making the changes and committing them is sufficient. If anything out of
the ordinary happened, please explain it, but in the normal case where you
found and fixed the bug, or implemented the feature, there is no need for
the end-of-change summary.
* When testing daemons, use a 15 second timeout always.

24
Makefile Normal file
View File

@ -0,0 +1,24 @@
export DEBUG = routewatch
.PHONY: test fmt lint build clean run
all: test
test: lint
go test -v ./...
fmt:
go fmt ./...
lint:
go vet ./...
golangci-lint run
build:
go build -o bin/routewatch cmd/routewatch/main.go
clean:
rm -rf bin/
run: build
./bin/routewatch

10
cmd/routewatch/main.go Normal file
View File

@ -0,0 +1,10 @@
// Package main provides the entry point for the routewatch daemon.
package main
import (
"git.eeqj.de/sneak/routewatch/internal/routewatch"
)
func main() {
routewatch.CLIEntry()
}

55
cmd/streamdumper/main.go Normal file
View File

@ -0,0 +1,55 @@
// Package main provides a utility to dump raw RIS Live stream messages to stdout
package main
import (
"context"
"fmt"
"log"
"os"
"os/signal"
"syscall"
"git.eeqj.de/sneak/routewatch/internal/metrics"
"git.eeqj.de/sneak/routewatch/internal/streamer"
"log/slog"
)
func main() {
// Set up logger to only show errors
logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{
Level: slog.LevelError,
}))
// Create metrics tracker
metricsTracker := metrics.New()
// Create streamer
s := streamer.New(logger, metricsTracker)
// Register raw message handler that prints to stdout
s.RegisterRawHandler(func(line string) {
fmt.Println(line)
})
// Set up context with cancellation
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Handle signals
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
go func() {
<-sigChan
log.Println("Received shutdown signal")
cancel()
}()
// Start streaming
if err := s.Start(); err != nil {
log.Fatal("Failed to start streamer:", err)
}
defer s.Stop()
// Wait for context cancellation
<-ctx.Done()
}

1000
docs/message-examples.json Normal file

File diff suppressed because it is too large Load Diff

26
go.mod Normal file
View File

@ -0,0 +1,26 @@
module git.eeqj.de/sneak/routewatch
go 1.24.4
require (
github.com/google/uuid v1.6.0
go.uber.org/fx v1.24.0
modernc.org/sqlite v1.38.1
)
require (
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/go-chi/chi/v5 v5.2.2 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
go.uber.org/dig v1.19.0 // indirect
go.uber.org/multierr v1.10.0 // indirect
go.uber.org/zap v1.26.0 // indirect
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect
golang.org/x/sys v0.34.0 // indirect
modernc.org/libc v1.66.3 // indirect
modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.11.0 // indirect
)

71
go.sum Normal file
View File

@ -0,0 +1,71 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618=
github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg=
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o=
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
modernc.org/cc/v4 v4.26.2 h1:991HMkLjJzYBIfha6ECZdjrIYz2/1ayr+FL8GN+CNzM=
modernc.org/cc/v4 v4.26.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
modernc.org/ccgo/v4 v4.28.0 h1:rjznn6WWehKq7dG4JtLRKxb52Ecv8OUGah8+Z/SfpNU=
modernc.org/ccgo/v4 v4.28.0/go.mod h1:JygV3+9AV6SmPhDasu4JgquwU81XAKLd3OKTUDNOiKE=
modernc.org/fileutil v1.3.8 h1:qtzNm7ED75pd1C7WgAGcK4edm4fvhtBsEiI/0NQ54YM=
modernc.org/fileutil v1.3.8/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
modernc.org/libc v1.66.3 h1:cfCbjTUcdsKyyZZfEUKfoHcP3S0Wkvz3jgSzByEWVCQ=
modernc.org/libc v1.66.3/go.mod h1:XD9zO8kt59cANKvHPXpx7yS2ELPheAey0vjIuZOhOU8=
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
modernc.org/sqlite v1.38.1 h1:jNnIjleVta+DKSAr3TnkKK87EEhjPhBLzi6hvIX9Bas=
modernc.org/sqlite v1.38.1/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E=
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=

View File

@ -0,0 +1,462 @@
// Package database provides SQLite storage for BGP routing data including ASNs, prefixes, announcements and peerings.
package database
import (
"database/sql"
"fmt"
"log/slog"
"time"
"github.com/google/uuid"
_ "modernc.org/sqlite" // Pure Go SQLite driver
)
const (
dbSchema = `
CREATE TABLE IF NOT EXISTS asns (
id TEXT PRIMARY KEY,
number INTEGER UNIQUE NOT NULL,
first_seen DATETIME NOT NULL,
last_seen DATETIME NOT NULL
);
CREATE TABLE IF NOT EXISTS prefixes (
id TEXT PRIMARY KEY,
prefix TEXT UNIQUE NOT NULL,
ip_version INTEGER NOT NULL, -- 4 for IPv4, 6 for IPv6
first_seen DATETIME NOT NULL,
last_seen DATETIME NOT NULL
);
CREATE TABLE IF NOT EXISTS announcements (
id TEXT PRIMARY KEY,
prefix_id TEXT NOT NULL,
asn_id TEXT NOT NULL,
origin_asn_id TEXT NOT NULL,
path TEXT NOT NULL,
next_hop TEXT,
timestamp DATETIME NOT NULL,
is_withdrawal BOOLEAN NOT NULL DEFAULT 0,
FOREIGN KEY (prefix_id) REFERENCES prefixes(id),
FOREIGN KEY (asn_id) REFERENCES asns(id),
FOREIGN KEY (origin_asn_id) REFERENCES asns(id)
);
CREATE TABLE IF NOT EXISTS asn_peerings (
id TEXT PRIMARY KEY,
from_asn_id TEXT NOT NULL,
to_asn_id TEXT NOT NULL,
first_seen DATETIME NOT NULL,
last_seen DATETIME NOT NULL,
FOREIGN KEY (from_asn_id) REFERENCES asns(id),
FOREIGN KEY (to_asn_id) REFERENCES asns(id),
UNIQUE(from_asn_id, to_asn_id)
);
-- Live routing table: current state of announced routes
CREATE TABLE IF NOT EXISTS live_routes (
id TEXT PRIMARY KEY,
prefix_id TEXT NOT NULL,
origin_asn_id TEXT NOT NULL,
peer_asn INTEGER NOT NULL,
next_hop TEXT,
announced_at DATETIME NOT NULL,
withdrawn_at DATETIME,
FOREIGN KEY (prefix_id) REFERENCES prefixes(id),
FOREIGN KEY (origin_asn_id) REFERENCES asns(id),
UNIQUE(prefix_id, origin_asn_id, peer_asn)
);
CREATE INDEX IF NOT EXISTS idx_prefixes_ip_version ON prefixes(ip_version);
CREATE INDEX IF NOT EXISTS idx_prefixes_version_prefix ON prefixes(ip_version, prefix);
CREATE INDEX IF NOT EXISTS idx_announcements_timestamp ON announcements(timestamp);
CREATE INDEX IF NOT EXISTS idx_announcements_prefix_id ON announcements(prefix_id);
CREATE INDEX IF NOT EXISTS idx_announcements_asn_id ON announcements(asn_id);
CREATE INDEX IF NOT EXISTS idx_asn_peerings_from_asn ON asn_peerings(from_asn_id);
CREATE INDEX IF NOT EXISTS idx_asn_peerings_to_asn ON asn_peerings(to_asn_id);
-- Indexes for live routes table
CREATE INDEX IF NOT EXISTS idx_live_routes_active
ON live_routes(prefix_id, origin_asn_id)
WHERE withdrawn_at IS NULL;
CREATE INDEX IF NOT EXISTS idx_live_routes_origin
ON live_routes(origin_asn_id)
WHERE withdrawn_at IS NULL;
CREATE INDEX IF NOT EXISTS idx_live_routes_prefix
ON live_routes(prefix_id)
WHERE withdrawn_at IS NULL;
`
)
// Database manages the SQLite database connection and operations.
type Database struct {
db *sql.DB
logger *slog.Logger
}
// Config holds database configuration
type Config struct {
Path string
}
// NewConfig provides default database configuration
func NewConfig() Config {
return Config{
Path: "routewatch.db",
}
}
// New creates a new database connection and initializes the schema.
func New(logger *slog.Logger) (*Database, error) {
config := NewConfig()
return NewWithConfig(config, logger)
}
// NewWithConfig creates a new database connection with custom configuration
func NewWithConfig(config Config, logger *slog.Logger) (*Database, error) {
// Add connection parameters for modernc.org/sqlite
dsn := fmt.Sprintf("file:%s?_busy_timeout=5000&_journal_mode=WAL", config.Path)
db, err := sql.Open("sqlite", dsn)
if err != nil {
return nil, fmt.Errorf("failed to open database: %w", err)
}
if err := db.Ping(); err != nil {
return nil, fmt.Errorf("failed to ping database: %w", err)
}
// Set connection pool parameters
db.SetMaxOpenConns(1) // Force serialization since SQLite doesn't handle true concurrency well
db.SetMaxIdleConns(1)
db.SetConnMaxLifetime(0)
database := &Database{db: db, logger: logger}
if err := database.Initialize(); err != nil {
return nil, fmt.Errorf("failed to initialize database: %w", err)
}
return database, nil
}
// Initialize creates the database schema if it doesn't exist.
func (d *Database) Initialize() error {
_, err := d.db.Exec(dbSchema)
return err
}
// Close closes the database connection.
func (d *Database) Close() error {
return d.db.Close()
}
// GetOrCreateASN retrieves an existing ASN or creates a new one if it doesn't exist.
func (d *Database) GetOrCreateASN(number int, timestamp time.Time) (*ASN, error) {
tx, err := d.db.Begin()
if err != nil {
return nil, err
}
defer func() {
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
d.logger.Error("Failed to rollback transaction", "error", err)
}
}()
var asn ASN
var idStr string
err = tx.QueryRow("SELECT id, number, first_seen, last_seen FROM asns WHERE number = ?", number).
Scan(&idStr, &asn.Number, &asn.FirstSeen, &asn.LastSeen)
if err == nil {
// ASN exists, update last_seen
asn.ID, _ = uuid.Parse(idStr)
_, err = tx.Exec("UPDATE asns SET last_seen = ? WHERE id = ?", timestamp, asn.ID.String())
if err != nil {
return nil, err
}
asn.LastSeen = timestamp
if err = tx.Commit(); err != nil {
d.logger.Error("Failed to commit transaction for ASN update", "asn", number, "error", err)
return nil, err
}
return &asn, nil
}
if err != sql.ErrNoRows {
return nil, err
}
// ASN doesn't exist, create it
asn = ASN{
ID: generateUUID(),
Number: number,
FirstSeen: timestamp,
LastSeen: timestamp,
}
_, err = tx.Exec("INSERT INTO asns (id, number, first_seen, last_seen) VALUES (?, ?, ?, ?)",
asn.ID.String(), asn.Number, asn.FirstSeen, asn.LastSeen)
if err != nil {
return nil, err
}
if err = tx.Commit(); err != nil {
d.logger.Error("Failed to commit transaction for ASN creation", "asn", number, "error", err)
return nil, err
}
return &asn, nil
}
// GetOrCreatePrefix retrieves an existing prefix or creates a new one if it doesn't exist.
func (d *Database) GetOrCreatePrefix(prefix string, timestamp time.Time) (*Prefix, error) {
tx, err := d.db.Begin()
if err != nil {
return nil, err
}
defer func() {
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
d.logger.Error("Failed to rollback transaction", "error", err)
}
}()
var p Prefix
var idStr string
err = tx.QueryRow("SELECT id, prefix, ip_version, first_seen, last_seen FROM prefixes WHERE prefix = ?", prefix).
Scan(&idStr, &p.Prefix, &p.IPVersion, &p.FirstSeen, &p.LastSeen)
if err == nil {
// Prefix exists, update last_seen
p.ID, _ = uuid.Parse(idStr)
_, err = tx.Exec("UPDATE prefixes SET last_seen = ? WHERE id = ?", timestamp, p.ID.String())
if err != nil {
return nil, err
}
p.LastSeen = timestamp
if err = tx.Commit(); err != nil {
d.logger.Error("Failed to commit transaction for prefix update", "prefix", prefix, "error", err)
return nil, err
}
return &p, nil
}
if err != sql.ErrNoRows {
return nil, err
}
// Prefix doesn't exist, create it
p = Prefix{
ID: generateUUID(),
Prefix: prefix,
IPVersion: detectIPVersion(prefix),
FirstSeen: timestamp,
LastSeen: timestamp,
}
_, err = tx.Exec("INSERT INTO prefixes (id, prefix, ip_version, first_seen, last_seen) VALUES (?, ?, ?, ?, ?)",
p.ID.String(), p.Prefix, p.IPVersion, p.FirstSeen, p.LastSeen)
if err != nil {
return nil, err
}
if err = tx.Commit(); err != nil {
d.logger.Error("Failed to commit transaction for prefix creation", "prefix", prefix, "error", err)
return nil, err
}
return &p, nil
}
// RecordAnnouncement inserts a new BGP announcement or withdrawal into the database.
func (d *Database) RecordAnnouncement(announcement *Announcement) error {
_, err := d.db.Exec(`
INSERT INTO announcements (id, prefix_id, asn_id, origin_asn_id, path, next_hop, timestamp, is_withdrawal)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`,
announcement.ID.String(), announcement.PrefixID.String(),
announcement.ASNID.String(), announcement.OriginASNID.String(),
announcement.Path, announcement.NextHop, announcement.Timestamp, announcement.IsWithdrawal)
return err
}
// RecordPeering records a peering relationship between two ASNs.
func (d *Database) RecordPeering(fromASNID, toASNID string, timestamp time.Time) error {
tx, err := d.db.Begin()
if err != nil {
return err
}
defer func() {
if err := tx.Rollback(); err != nil && err != sql.ErrTxDone {
d.logger.Error("Failed to rollback transaction", "error", err)
}
}()
var exists bool
err = tx.QueryRow("SELECT EXISTS(SELECT 1 FROM asn_peerings WHERE from_asn_id = ? AND to_asn_id = ?)",
fromASNID, toASNID).Scan(&exists)
if err != nil {
return err
}
if exists {
_, err = tx.Exec("UPDATE asn_peerings SET last_seen = ? WHERE from_asn_id = ? AND to_asn_id = ?",
timestamp, fromASNID, toASNID)
} else {
_, err = tx.Exec(`
INSERT INTO asn_peerings (id, from_asn_id, to_asn_id, first_seen, last_seen)
VALUES (?, ?, ?, ?, ?)`,
generateUUID().String(), fromASNID, toASNID, timestamp, timestamp)
}
if err != nil {
return err
}
if err = tx.Commit(); err != nil {
d.logger.Error("Failed to commit transaction for peering",
"from_asn_id", fromASNID,
"to_asn_id", toASNID,
"error", err,
)
return err
}
return nil
}
// UpdateLiveRoute updates the live routing table for an announcement
func (d *Database) UpdateLiveRoute(
prefixID, originASNID uuid.UUID,
peerASN int,
nextHop string,
timestamp time.Time,
) error {
// Check if route already exists
var routeID sql.NullString
err := d.db.QueryRow(`
SELECT id FROM live_routes
WHERE prefix_id = ? AND origin_asn_id = ? AND peer_asn = ? AND withdrawn_at IS NULL`,
prefixID.String(), originASNID.String(), peerASN).Scan(&routeID)
if err != nil && err != sql.ErrNoRows {
return err
}
if routeID.Valid {
// Route exists and is active, update it
_, err = d.db.Exec(`
UPDATE live_routes
SET next_hop = ?, announced_at = ?
WHERE id = ?`,
nextHop, timestamp, routeID.String)
} else {
// Either new route or re-announcement of withdrawn route
_, err = d.db.Exec(`
INSERT OR REPLACE INTO live_routes
(id, prefix_id, origin_asn_id, peer_asn, next_hop, announced_at, withdrawn_at)
VALUES (?, ?, ?, ?, ?, ?, NULL)`,
generateUUID().String(), prefixID.String(), originASNID.String(),
peerASN, nextHop, timestamp)
}
return err
}
// WithdrawLiveRoute marks a route as withdrawn in the live routing table
func (d *Database) WithdrawLiveRoute(prefixID uuid.UUID, peerASN int, timestamp time.Time) error {
_, err := d.db.Exec(`
UPDATE live_routes
SET withdrawn_at = ?
WHERE prefix_id = ? AND peer_asn = ? AND withdrawn_at IS NULL`,
timestamp, prefixID.String(), peerASN)
return err
}
// GetActiveLiveRoutes returns all currently active routes (not withdrawn)
func (d *Database) GetActiveLiveRoutes() ([]LiveRoute, error) {
rows, err := d.db.Query(`
SELECT id, prefix_id, origin_asn_id, peer_asn, next_hop, announced_at
FROM live_routes
WHERE withdrawn_at IS NULL
ORDER BY announced_at DESC`)
if err != nil {
return nil, err
}
defer func() {
_ = rows.Close()
}()
var routes []LiveRoute
for rows.Next() {
var route LiveRoute
var idStr, prefixIDStr, originASNIDStr string
err := rows.Scan(&idStr, &prefixIDStr, &originASNIDStr,
&route.PeerASN, &route.NextHop, &route.AnnouncedAt)
if err != nil {
return nil, err
}
route.ID, _ = uuid.Parse(idStr)
route.PrefixID, _ = uuid.Parse(prefixIDStr)
route.OriginASNID, _ = uuid.Parse(originASNIDStr)
routes = append(routes, route)
}
return routes, rows.Err()
}
// GetStats returns database statistics
func (d *Database) GetStats() (Stats, error) {
var stats Stats
// Count ASNs
err := d.db.QueryRow("SELECT COUNT(*) FROM asns").Scan(&stats.ASNs)
if err != nil {
return stats, err
}
// Count prefixes
err = d.db.QueryRow("SELECT COUNT(*) FROM prefixes").Scan(&stats.Prefixes)
if err != nil {
return stats, err
}
// Count IPv4 and IPv6 prefixes
const ipVersionV4 = 4
err = d.db.QueryRow("SELECT COUNT(*) FROM prefixes WHERE ip_version = ?", ipVersionV4).Scan(&stats.IPv4Prefixes)
if err != nil {
return stats, err
}
const ipVersionV6 = 6
err = d.db.QueryRow("SELECT COUNT(*) FROM prefixes WHERE ip_version = ?", ipVersionV6).Scan(&stats.IPv6Prefixes)
if err != nil {
return stats, err
}
// Count peerings
err = d.db.QueryRow("SELECT COUNT(*) FROM asn_peerings").Scan(&stats.Peerings)
if err != nil {
return stats, err
}
// Count live routes
err = d.db.QueryRow("SELECT COUNT(*) FROM live_routes WHERE withdrawn_at IS NULL").Scan(&stats.LiveRoutes)
if err != nil {
return stats, err
}
return stats, nil
}

View File

@ -0,0 +1,46 @@
package database
import (
"time"
"github.com/google/uuid"
)
// Stats contains database statistics
type Stats struct {
ASNs int
Prefixes int
IPv4Prefixes int
IPv6Prefixes int
Peerings int
LiveRoutes int
}
// Store defines the interface for database operations
type Store interface {
// ASN operations
GetOrCreateASN(number int, timestamp time.Time) (*ASN, error)
// Prefix operations
GetOrCreatePrefix(prefix string, timestamp time.Time) (*Prefix, error)
// Announcement operations
RecordAnnouncement(announcement *Announcement) error
// Peering operations
RecordPeering(fromASNID, toASNID string, timestamp time.Time) error
// Live route operations
UpdateLiveRoute(prefixID, originASNID uuid.UUID, peerASN int, nextHop string, timestamp time.Time) error
WithdrawLiveRoute(prefixID uuid.UUID, peerASN int, timestamp time.Time) error
GetActiveLiveRoutes() ([]LiveRoute, error)
// Statistics
GetStats() (Stats, error)
// Lifecycle
Close() error
}
// Ensure Database implements Store
var _ Store = (*Database)(nil)

View File

@ -0,0 +1,57 @@
package database
import (
"time"
"github.com/google/uuid"
)
// ASN represents an Autonomous System Number
type ASN struct {
ID uuid.UUID `json:"id"`
Number int `json:"number"`
FirstSeen time.Time `json:"first_seen"`
LastSeen time.Time `json:"last_seen"`
}
// Prefix represents an IP prefix (CIDR block)
type Prefix struct {
ID uuid.UUID `json:"id"`
Prefix string `json:"prefix"`
IPVersion int `json:"ip_version"` // 4 or 6
FirstSeen time.Time `json:"first_seen"`
LastSeen time.Time `json:"last_seen"`
}
// Announcement represents a BGP announcement
type Announcement struct {
ID uuid.UUID `json:"id"`
PrefixID uuid.UUID `json:"prefix_id"`
ASNID uuid.UUID `json:"asn_id"`
OriginASNID uuid.UUID `json:"origin_asn_id"`
Path string `json:"path"` // JSON-encoded AS path
NextHop string `json:"next_hop"`
Timestamp time.Time `json:"timestamp"`
IsWithdrawal bool `json:"is_withdrawal"`
}
// ASNPeering represents a peering relationship between two ASNs
type ASNPeering struct {
ID uuid.UUID `json:"id"`
FromASNID uuid.UUID `json:"from_asn_id"`
ToASNID uuid.UUID `json:"to_asn_id"`
FirstSeen time.Time `json:"first_seen"`
LastSeen time.Time `json:"last_seen"`
}
// LiveRoute represents the current state of a route in the live routing table
type LiveRoute struct {
ID uuid.UUID `json:"id"`
PrefixID uuid.UUID `json:"prefix_id"`
OriginASNID uuid.UUID `json:"origin_asn_id"`
PeerASN int `json:"peer_asn"`
Path string `json:"path"`
NextHop string `json:"next_hop"`
AnnouncedAt time.Time `json:"announced_at"`
WithdrawnAt *time.Time `json:"withdrawn_at"`
}

View File

@ -0,0 +1,25 @@
package database
import (
"strings"
"github.com/google/uuid"
)
func generateUUID() uuid.UUID {
return uuid.New()
}
const (
ipVersionV4 = 4
ipVersionV6 = 6
)
// detectIPVersion determines if a prefix is IPv4 (returns 4) or IPv6 (returns 6)
func detectIPVersion(prefix string) int {
if strings.Contains(prefix, ":") {
return ipVersionV6
}
return ipVersionV4
}

100
internal/metrics/metrics.go Normal file
View File

@ -0,0 +1,100 @@
// Package metrics provides centralized metrics tracking for the RouteWatch application
package metrics
import (
"sync"
"sync/atomic"
"time"
"github.com/rcrowley/go-metrics"
)
// Tracker provides centralized metrics tracking
type Tracker struct {
mu sync.RWMutex
registry metrics.Registry
connectedSince time.Time
isConnected atomic.Bool
// Stream metrics
messageCounter metrics.Counter
byteCounter metrics.Counter
messageRate metrics.Meter
byteRate metrics.Meter
}
// New creates a new metrics tracker
func New() *Tracker {
registry := metrics.NewRegistry()
return &Tracker{
registry: registry,
messageCounter: metrics.NewCounter(),
byteCounter: metrics.NewCounter(),
messageRate: metrics.NewMeter(),
byteRate: metrics.NewMeter(),
}
}
// SetConnected updates the connection status
func (t *Tracker) SetConnected(connected bool) {
t.isConnected.Store(connected)
if connected {
t.mu.Lock()
t.connectedSince = time.Now()
t.mu.Unlock()
}
}
// IsConnected returns the current connection status
func (t *Tracker) IsConnected() bool {
return t.isConnected.Load()
}
// RecordMessage records a received message and its size
func (t *Tracker) RecordMessage(bytes int64) {
t.messageCounter.Inc(1)
t.byteCounter.Inc(bytes)
t.messageRate.Mark(1)
t.byteRate.Mark(bytes)
}
// GetStreamMetrics returns current streaming metrics
func (t *Tracker) GetStreamMetrics() StreamMetrics {
t.mu.RLock()
connectedSince := t.connectedSince
t.mu.RUnlock()
const bitsPerByte = 8
// Safely convert counters to uint64
msgCount := t.messageCounter.Count()
byteCount := t.byteCounter.Count()
var totalMessages, totalBytes uint64
if msgCount >= 0 {
totalMessages = uint64(msgCount)
}
if byteCount >= 0 {
totalBytes = uint64(byteCount)
}
return StreamMetrics{
TotalMessages: totalMessages,
TotalBytes: totalBytes,
ConnectedSince: connectedSince,
Connected: t.isConnected.Load(),
MessagesPerSec: t.messageRate.Rate1(),
BitsPerSec: t.byteRate.Rate1() * bitsPerByte,
}
}
// StreamMetrics contains streaming statistics
type StreamMetrics struct {
TotalMessages uint64
TotalBytes uint64
ConnectedSince time.Time
Connected bool
MessagesPerSec float64
BitsPerSec float64
}

81
internal/ristypes/ris.go Normal file
View File

@ -0,0 +1,81 @@
// Package ristypes defines the data structures for RIS Live BGP messages and announcements.
package ristypes
import (
"encoding/json"
"time"
)
// ASPath represents an AS path that may contain nested AS sets
type ASPath []int
// UnmarshalJSON implements custom JSON unmarshaling to flatten nested arrays
func (p *ASPath) UnmarshalJSON(data []byte) error {
// First try to unmarshal as a simple array of integers
var simple []int
if err := json.Unmarshal(data, &simple); err == nil {
*p = ASPath(simple)
return nil
}
// If that fails, unmarshal as array of interfaces and flatten
var raw []interface{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
// Flatten the array
result := make([]int, 0)
for _, item := range raw {
switch v := item.(type) {
case float64:
result = append(result, int(v))
case []interface{}:
// Nested array - flatten it
for _, nested := range v {
if num, ok := nested.(float64); ok {
result = append(result, int(num))
}
}
}
}
*p = ASPath(result)
return nil
}
// RISLiveMessage represents the outer wrapper from the RIS Live stream
type RISLiveMessage struct {
Type string `json:"type"`
Data RISMessage `json:"data"`
}
// RISMessage represents a message from the RIS Live stream
type RISMessage struct {
Type string `json:"type"`
Timestamp float64 `json:"timestamp"`
ParsedTimestamp time.Time `json:"-"` // Parsed from Timestamp field
Peer string `json:"peer"`
PeerASN string `json:"peer_asn"`
ID string `json:"id"`
Host string `json:"host"`
RRC string `json:"rrc,omitempty"`
MrtTime float64 `json:"mrt_time,omitempty"`
SocketTime float64 `json:"socket_time,omitempty"`
Path ASPath `json:"path,omitempty"`
Community [][]int `json:"community,omitempty"`
Origin string `json:"origin,omitempty"`
MED *int `json:"med,omitempty"`
LocalPref *int `json:"local_pref,omitempty"`
Announcements []RISAnnouncement `json:"announcements,omitempty"`
Withdrawals []string `json:"withdrawals,omitempty"`
Raw string `json:"raw,omitempty"`
}
// RISAnnouncement represents announcement data within a RIS message
type RISAnnouncement struct {
NextHop string `json:"next_hop"`
Prefixes []string `json:"prefixes"`
}

158
internal/routewatch/app.go Normal file
View File

@ -0,0 +1,158 @@
// Package routewatch contains the primary RouteWatch type that represents a running instance
// of the application and contains pointers to its core dependencies, and is responsible for initialization.
package routewatch
import (
"context"
"log/slog"
"os"
"strings"
"time"
"git.eeqj.de/sneak/routewatch/internal/database"
"git.eeqj.de/sneak/routewatch/internal/metrics"
"git.eeqj.de/sneak/routewatch/internal/server"
"git.eeqj.de/sneak/routewatch/internal/streamer"
"go.uber.org/fx"
)
// Config contains runtime configuration for RouteWatch
type Config struct {
MaxRuntime time.Duration // Maximum runtime (0 = run forever)
}
// NewConfig provides default configuration
func NewConfig() Config {
return Config{
MaxRuntime: 0, // Run forever by default
}
}
// Dependencies contains all dependencies for RouteWatch
type Dependencies struct {
fx.In
DB database.Store
Streamer *streamer.Streamer
Server *server.Server
Logger *slog.Logger
Config Config `optional:"true"`
}
// RouteWatch represents the main application instance
type RouteWatch struct {
db database.Store
streamer *streamer.Streamer
server *server.Server
logger *slog.Logger
maxRuntime time.Duration
}
// New creates a new RouteWatch instance
func New(deps Dependencies) *RouteWatch {
return &RouteWatch{
db: deps.DB,
streamer: deps.Streamer,
server: deps.Server,
logger: deps.Logger,
maxRuntime: deps.Config.MaxRuntime,
}
}
// Run starts the RouteWatch application
func (rw *RouteWatch) Run(ctx context.Context) error {
rw.logger.Info("Starting RouteWatch")
// Apply runtime limit if specified
if rw.maxRuntime > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, rw.maxRuntime)
defer cancel()
rw.logger.Info("Running with time limit", "max_runtime", rw.maxRuntime)
}
// Register database handler to process BGP UPDATE messages
dbHandler := NewDatabaseHandler(rw.db, rw.logger)
rw.streamer.RegisterHandler(dbHandler)
// Start streaming
if err := rw.streamer.Start(); err != nil {
return err
}
// Start HTTP server
if err := rw.server.Start(); err != nil {
return err
}
// Wait for context cancellation
<-ctx.Done()
// Stop services
rw.streamer.Stop()
// Stop HTTP server with a timeout
const serverStopTimeout = 5 * time.Second
stopCtx, cancel := context.WithTimeout(context.Background(), serverStopTimeout)
defer cancel()
if err := rw.server.Stop(stopCtx); err != nil {
rw.logger.Error("Failed to stop HTTP server gracefully", "error", err)
}
// Log final metrics
metrics := rw.streamer.GetMetrics()
rw.logger.Info("Final metrics",
"total_messages", metrics.TotalMessages,
"total_bytes", metrics.TotalBytes,
"messages_per_sec", metrics.MessagesPerSec,
"bits_per_sec", metrics.BitsPerSec,
"duration", time.Since(metrics.ConnectedSince),
)
return nil
}
// NewLogger creates a structured logger
func NewLogger() *slog.Logger {
level := slog.LevelInfo
if debug := os.Getenv("DEBUG"); strings.Contains(debug, "routewatch") {
level = slog.LevelDebug
}
opts := &slog.HandlerOptions{
Level: level,
}
var handler slog.Handler
if os.Stdout.Name() != "/dev/stdout" || os.Getenv("TERM") == "" {
// Not a terminal, use JSON
handler = slog.NewJSONHandler(os.Stdout, opts)
} else {
// Terminal, use text
handler = slog.NewTextHandler(os.Stdout, opts)
}
return slog.New(handler)
}
// getModule provides all fx dependencies
func getModule() fx.Option {
return fx.Options(
fx.Provide(
NewLogger,
NewConfig,
metrics.New,
database.New,
fx.Annotate(
func(db *database.Database) database.Store {
return db
},
fx.As(new(database.Store)),
),
streamer.New,
server.New,
New,
),
)
}

View File

@ -0,0 +1,243 @@
package routewatch
import (
"context"
"strings"
"sync"
"testing"
"time"
"git.eeqj.de/sneak/routewatch/internal/database"
"git.eeqj.de/sneak/routewatch/internal/metrics"
"git.eeqj.de/sneak/routewatch/internal/server"
"git.eeqj.de/sneak/routewatch/internal/streamer"
"github.com/google/uuid"
)
// mockStore is a mock implementation of database.Store for testing
type mockStore struct {
mu sync.Mutex
// Counters for tracking calls
ASNCount int
PrefixCount int
PeeringCount int
RouteCount int
WithdrawalCount int
// Track unique items
ASNs map[int]*database.ASN
Prefixes map[string]*database.Prefix
Peerings map[string]bool // key is "from_to"
Routes map[string]bool // key is "prefix_origin_peer"
// Track IP versions
IPv4Prefixes int
IPv6Prefixes int
}
// newMockStore creates a new mock store
func newMockStore() *mockStore {
return &mockStore{
ASNs: make(map[int]*database.ASN),
Prefixes: make(map[string]*database.Prefix),
Peerings: make(map[string]bool),
Routes: make(map[string]bool),
}
}
// GetOrCreateASN mock implementation
func (m *mockStore) GetOrCreateASN(number int, timestamp time.Time) (*database.ASN, error) {
m.mu.Lock()
defer m.mu.Unlock()
if asn, exists := m.ASNs[number]; exists {
asn.LastSeen = timestamp
return asn, nil
}
asn := &database.ASN{
ID: uuid.New(),
Number: number,
FirstSeen: timestamp,
LastSeen: timestamp,
}
m.ASNs[number] = asn
m.ASNCount++
return asn, nil
}
// GetOrCreatePrefix mock implementation
func (m *mockStore) GetOrCreatePrefix(prefix string, timestamp time.Time) (*database.Prefix, error) {
m.mu.Lock()
defer m.mu.Unlock()
if p, exists := m.Prefixes[prefix]; exists {
p.LastSeen = timestamp
return p, nil
}
const (
ipVersionV4 = 4
ipVersionV6 = 6
)
ipVersion := ipVersionV4
if strings.Contains(prefix, ":") {
ipVersion = ipVersionV6
}
p := &database.Prefix{
ID: uuid.New(),
Prefix: prefix,
IPVersion: ipVersion,
FirstSeen: timestamp,
LastSeen: timestamp,
}
m.Prefixes[prefix] = p
m.PrefixCount++
if ipVersion == ipVersionV4 {
m.IPv4Prefixes++
} else {
m.IPv6Prefixes++
}
return p, nil
}
// RecordAnnouncement mock implementation
func (m *mockStore) RecordAnnouncement(_ *database.Announcement) error {
// Not tracking announcements in detail for now
return nil
}
// RecordPeering mock implementation
func (m *mockStore) RecordPeering(fromASNID, toASNID string, _ time.Time) error {
m.mu.Lock()
defer m.mu.Unlock()
key := fromASNID + "_" + toASNID
if !m.Peerings[key] {
m.Peerings[key] = true
m.PeeringCount++
}
return nil
}
// UpdateLiveRoute mock implementation
func (m *mockStore) UpdateLiveRoute(prefixID, originASNID uuid.UUID, peerASN int, _ string, _ time.Time) error {
m.mu.Lock()
defer m.mu.Unlock()
key := prefixID.String() + "_" + originASNID.String() + "_" + string(rune(peerASN))
if !m.Routes[key] {
m.Routes[key] = true
m.RouteCount++
}
return nil
}
// WithdrawLiveRoute mock implementation
func (m *mockStore) WithdrawLiveRoute(_ uuid.UUID, _ int, _ time.Time) error {
m.mu.Lock()
defer m.mu.Unlock()
m.WithdrawalCount++
return nil
}
// GetActiveLiveRoutes mock implementation
func (m *mockStore) GetActiveLiveRoutes() ([]database.LiveRoute, error) {
return []database.LiveRoute{}, nil
}
// Close mock implementation
func (m *mockStore) Close() error {
return nil
}
// GetStats returns statistics about the mock store
func (m *mockStore) GetStats() (database.Stats, error) {
m.mu.Lock()
defer m.mu.Unlock()
return database.Stats{
ASNs: len(m.ASNs),
Prefixes: len(m.Prefixes),
IPv4Prefixes: m.IPv4Prefixes,
IPv6Prefixes: m.IPv6Prefixes,
Peerings: m.PeeringCount,
LiveRoutes: m.RouteCount,
}, nil
}
func TestRouteWatchLiveFeed(t *testing.T) {
// Create mock database
mockDB := newMockStore()
defer mockDB.Close()
logger := NewLogger()
// Create metrics tracker
metricsTracker := metrics.New()
// Create streamer
s := streamer.New(logger, metricsTracker)
// Create server
srv := server.New(mockDB, s, logger)
// Create RouteWatch with 5 second limit
deps := Dependencies{
DB: mockDB,
Streamer: s,
Server: srv,
Logger: logger,
Config: Config{
MaxRuntime: 5 * time.Second,
},
}
rw := New(deps)
// Run with context
ctx := context.Background()
go func() {
_ = rw.Run(ctx)
}()
// Wait for the configured duration
time.Sleep(5 * time.Second)
// Get statistics
stats, err := mockDB.GetStats()
if err != nil {
t.Fatalf("Failed to get stats: %v", err)
}
if stats.ASNs == 0 {
t.Error("Expected to receive some ASNs from live feed")
}
t.Logf("Received %d unique ASNs in 5 seconds", stats.ASNs)
if stats.Prefixes == 0 {
t.Error("Expected to receive some prefixes from live feed")
}
t.Logf("Received %d unique prefixes (%d IPv4, %d IPv6) in 5 seconds", stats.Prefixes, stats.IPv4Prefixes, stats.IPv6Prefixes)
if stats.Peerings == 0 {
t.Error("Expected to receive some peerings from live feed")
}
t.Logf("Recorded %d AS peering relationships in 5 seconds", stats.Peerings)
if stats.LiveRoutes == 0 {
t.Error("Expected to have some active routes")
}
t.Logf("Active routes: %d", stats.LiveRoutes)
}

View File

@ -0,0 +1,12 @@
package routewatch
import (
"testing"
)
func TestNewLogger(t *testing.T) {
logger := NewLogger()
if logger == nil {
t.Fatal("NewLogger returned nil")
}
}

View File

@ -0,0 +1,51 @@
package routewatch
import (
"context"
"log/slog"
"os"
"os/signal"
"syscall"
"go.uber.org/fx"
)
// CLIEntry is the main entry point for the CLI
func CLIEntry() {
app := fx.New(
getModule(),
fx.Invoke(func(lc fx.Lifecycle, rw *RouteWatch, logger *slog.Logger) {
lc.Append(fx.Hook{
OnStart: func(_ context.Context) error {
go func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Handle shutdown signals
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM)
go func() {
<-sigCh
logger.Info("Received shutdown signal")
cancel()
}()
if err := rw.Run(ctx); err != nil {
logger.Error("RouteWatch error", "error", err)
}
}()
return nil
},
OnStop: func(_ context.Context) error {
logger.Info("Shutting down RouteWatch")
return nil
},
})
}),
)
app.Run()
}

View File

@ -0,0 +1,144 @@
package routewatch
import (
"log/slog"
"strconv"
"git.eeqj.de/sneak/routewatch/internal/database"
"git.eeqj.de/sneak/routewatch/internal/ristypes"
)
// DatabaseHandler handles BGP messages and stores them in the database
type DatabaseHandler struct {
db database.Store
logger *slog.Logger
}
// NewDatabaseHandler creates a new database handler
func NewDatabaseHandler(db database.Store, logger *slog.Logger) *DatabaseHandler {
return &DatabaseHandler{
db: db,
logger: logger,
}
}
// WantsMessage returns true if this handler wants to process messages of the given type
func (h *DatabaseHandler) WantsMessage(messageType string) bool {
// We only care about UPDATE messages for the database
return messageType == "UPDATE"
}
// HandleMessage processes a RIS message and updates the database
func (h *DatabaseHandler) HandleMessage(msg *ristypes.RISMessage) {
// Use the pre-parsed timestamp
timestamp := msg.ParsedTimestamp
// Parse peer ASN
peerASN, err := strconv.Atoi(msg.PeerASN)
if err != nil {
h.logger.Error("Failed to parse peer ASN", "peer_asn", msg.PeerASN, "error", err)
return
}
// Get origin ASN from path (last element)
var originASN int
if len(msg.Path) > 0 {
originASN = msg.Path[len(msg.Path)-1]
}
// Process announcements
for _, announcement := range msg.Announcements {
for _, prefix := range announcement.Prefixes {
// Get or create prefix
p, err := h.db.GetOrCreatePrefix(prefix, timestamp)
if err != nil {
h.logger.Error("Failed to get/create prefix", "prefix", prefix, "error", err)
continue
}
// Get or create origin ASN
asn, err := h.db.GetOrCreateASN(originASN, timestamp)
if err != nil {
h.logger.Error("Failed to get/create ASN", "asn", originASN, "error", err)
continue
}
// Update live route
err = h.db.UpdateLiveRoute(
p.ID,
asn.ID,
peerASN,
announcement.NextHop,
timestamp,
)
if err != nil {
h.logger.Error("Failed to update live route",
"prefix", prefix,
"origin_asn", originASN,
"peer_asn", peerASN,
"error", err,
)
}
// TODO: Record the announcement in the announcements table
// Process AS path to update peerings
if len(msg.Path) > 1 {
for i := range len(msg.Path) - 1 {
fromASN := msg.Path[i]
toASN := msg.Path[i+1]
// Get or create both ASNs
fromAS, err := h.db.GetOrCreateASN(fromASN, timestamp)
if err != nil {
h.logger.Error("Failed to get/create from ASN", "asn", fromASN, "error", err)
continue
}
toAS, err := h.db.GetOrCreateASN(toASN, timestamp)
if err != nil {
h.logger.Error("Failed to get/create to ASN", "asn", toASN, "error", err)
continue
}
// Record the peering
err = h.db.RecordPeering(fromAS.ID.String(), toAS.ID.String(), timestamp)
if err != nil {
h.logger.Error("Failed to record peering",
"from_asn", fromASN,
"to_asn", toASN,
"error", err,
)
}
}
}
}
}
// Process withdrawals
for _, prefix := range msg.Withdrawals {
// Get prefix
p, err := h.db.GetOrCreatePrefix(prefix, timestamp)
if err != nil {
h.logger.Error("Failed to get prefix for withdrawal", "prefix", prefix, "error", err)
continue
}
// Withdraw the route
err = h.db.WithdrawLiveRoute(p.ID, peerASN, timestamp)
if err != nil {
h.logger.Error("Failed to withdraw route",
"prefix", prefix,
"peer_asn", peerASN,
"error", err,
)
}
// TODO: Record the withdrawal in the withdrawals table
}
}

View File

@ -0,0 +1,45 @@
package routewatch
import (
"git.eeqj.de/sneak/routewatch/internal/ristypes"
"log/slog"
)
// SimpleHandler is a basic implementation of streamer.MessageHandler
type SimpleHandler struct {
logger *slog.Logger
messageTypes []string
callback func(*ristypes.RISMessage)
}
// NewSimpleHandler creates a handler that accepts specific message types
func NewSimpleHandler(logger *slog.Logger, messageTypes []string, callback func(*ristypes.RISMessage)) *SimpleHandler {
return &SimpleHandler{
logger: logger,
messageTypes: messageTypes,
callback: callback,
}
}
// WantsMessage returns true if this handler wants to process messages of the given type
func (h *SimpleHandler) WantsMessage(messageType string) bool {
// If no specific types are set, accept all messages
if len(h.messageTypes) == 0 {
return true
}
for _, t := range h.messageTypes {
if t == messageType {
return true
}
}
return false
}
// HandleMessage processes a RIS message
func (h *SimpleHandler) HandleMessage(msg *ristypes.RISMessage) {
if h.callback != nil {
h.callback(msg)
}
}

416
internal/server/server.go Normal file
View File

@ -0,0 +1,416 @@
// Package server provides HTTP endpoints for status monitoring and statistics
package server
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"net/http"
"os"
"time"
"git.eeqj.de/sneak/routewatch/internal/database"
"git.eeqj.de/sneak/routewatch/internal/streamer"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
)
// Server provides HTTP endpoints for status monitoring
type Server struct {
router *chi.Mux
db database.Store
streamer *streamer.Streamer
logger *slog.Logger
srv *http.Server
}
// New creates a new HTTP server
func New(db database.Store, streamer *streamer.Streamer, logger *slog.Logger) *Server {
s := &Server{
db: db,
streamer: streamer,
logger: logger,
}
s.setupRoutes()
return s
}
// setupRoutes configures the HTTP routes
func (s *Server) setupRoutes() {
r := chi.NewRouter()
// Middleware
r.Use(middleware.RequestID)
r.Use(middleware.RealIP)
r.Use(middleware.Logger)
r.Use(middleware.Recoverer)
const requestTimeout = 60 * time.Second
r.Use(middleware.Timeout(requestTimeout))
// Routes
r.Get("/", s.handleRoot())
r.Get("/status", s.handleStatusHTML())
r.Get("/status.json", s.handleStatusJSON())
// API routes
r.Route("/api/v1", func(r chi.Router) {
r.Get("/stats", s.handleStats())
})
s.router = r
}
// Start starts the HTTP server
func (s *Server) Start() error {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
const readHeaderTimeout = 10 * time.Second
s.srv = &http.Server{
Addr: ":" + port,
Handler: s.router,
ReadHeaderTimeout: readHeaderTimeout,
}
s.logger.Info("Starting HTTP server", "port", port)
go func() {
if err := s.srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
s.logger.Error("HTTP server error", "error", err)
}
}()
return nil
}
// Stop gracefully stops the HTTP server
func (s *Server) Stop(ctx context.Context) error {
if s.srv == nil {
return nil
}
s.logger.Info("Stopping HTTP server")
return s.srv.Shutdown(ctx)
}
// handleRoot returns a handler that redirects to /status
func (s *Server) handleRoot() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/status", http.StatusSeeOther)
}
}
// handleStatusJSON returns a handler that serves JSON statistics
func (s *Server) handleStatusJSON() http.HandlerFunc {
// Stats represents the statistics response
type Stats struct {
Uptime string `json:"uptime"`
TotalMessages uint64 `json:"total_messages"`
TotalBytes uint64 `json:"total_bytes"`
MessagesPerSec float64 `json:"messages_per_sec"`
MbitsPerSec float64 `json:"mbits_per_sec"`
Connected bool `json:"connected"`
ASNs int `json:"asns"`
Prefixes int `json:"prefixes"`
IPv4Prefixes int `json:"ipv4_prefixes"`
IPv6Prefixes int `json:"ipv6_prefixes"`
Peerings int `json:"peerings"`
LiveRoutes int `json:"live_routes"`
}
return func(w http.ResponseWriter, _ *http.Request) {
metrics := s.streamer.GetMetrics()
// Get database stats
dbStats, err := s.db.GetStats()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
uptime := time.Since(metrics.ConnectedSince).Truncate(time.Second).String()
if metrics.ConnectedSince.IsZero() {
uptime = "0s"
}
const bitsPerMegabit = 1000000.0
stats := Stats{
Uptime: uptime,
TotalMessages: metrics.TotalMessages,
TotalBytes: metrics.TotalBytes,
MessagesPerSec: metrics.MessagesPerSec,
MbitsPerSec: metrics.BitsPerSec / bitsPerMegabit,
Connected: metrics.Connected,
ASNs: dbStats.ASNs,
Prefixes: dbStats.Prefixes,
IPv4Prefixes: dbStats.IPv4Prefixes,
IPv6Prefixes: dbStats.IPv6Prefixes,
Peerings: dbStats.Peerings,
LiveRoutes: dbStats.LiveRoutes,
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(stats); err != nil {
s.logger.Error("Failed to encode stats", "error", err)
}
}
}
// handleStats returns a handler that serves API v1 statistics
func (s *Server) handleStats() http.HandlerFunc {
// StatsResponse represents the API statistics response
type StatsResponse struct {
Uptime string `json:"uptime"`
TotalMessages uint64 `json:"total_messages"`
TotalBytes uint64 `json:"total_bytes"`
MessagesPerSec float64 `json:"messages_per_sec"`
MbitsPerSec float64 `json:"mbits_per_sec"`
Connected bool `json:"connected"`
ASNs int `json:"asns"`
Prefixes int `json:"prefixes"`
IPv4Prefixes int `json:"ipv4_prefixes"`
IPv6Prefixes int `json:"ipv6_prefixes"`
Peerings int `json:"peerings"`
LiveRoutes int `json:"live_routes"`
}
return func(w http.ResponseWriter, _ *http.Request) {
metrics := s.streamer.GetMetrics()
// Get database stats
dbStats, err := s.db.GetStats()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
uptime := time.Since(metrics.ConnectedSince).Truncate(time.Second).String()
if metrics.ConnectedSince.IsZero() {
uptime = "0s"
}
const bitsPerMegabit = 1000000.0
stats := StatsResponse{
Uptime: uptime,
TotalMessages: metrics.TotalMessages,
TotalBytes: metrics.TotalBytes,
MessagesPerSec: metrics.MessagesPerSec,
MbitsPerSec: metrics.BitsPerSec / bitsPerMegabit,
Connected: metrics.Connected,
ASNs: dbStats.ASNs,
Prefixes: dbStats.Prefixes,
IPv4Prefixes: dbStats.IPv4Prefixes,
IPv6Prefixes: dbStats.IPv6Prefixes,
Peerings: dbStats.Peerings,
LiveRoutes: dbStats.LiveRoutes,
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(stats); err != nil {
s.logger.Error("Failed to encode stats", "error", err)
}
}
}
// handleStatusHTML returns a handler that serves the HTML status page
func (s *Server) handleStatusHTML() http.HandlerFunc {
return func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
if _, err := fmt.Fprint(w, statusHTML); err != nil {
s.logger.Error("Failed to write HTML", "error", err)
}
}
}
const statusHTML = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>RouteWatch Status</title>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
max-width: 1200px;
margin: 0 auto;
padding: 20px;
background: #f5f5f5;
}
h1 {
color: #333;
margin-bottom: 30px;
}
.status-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
gap: 20px;
margin-bottom: 30px;
}
.status-card {
background: white;
padding: 20px;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.status-card h2 {
margin: 0 0 15px 0;
font-size: 18px;
color: #666;
}
.metric {
display: flex;
justify-content: space-between;
padding: 8px 0;
border-bottom: 1px solid #eee;
}
.metric:last-child {
border-bottom: none;
}
.metric-label {
color: #666;
}
.metric-value {
font-weight: 600;
color: #333;
}
.connected {
color: #22c55e;
}
.disconnected {
color: #ef4444;
}
.error {
background: #fee;
color: #c00;
padding: 10px;
border-radius: 4px;
margin-top: 20px;
}
</style>
</head>
<body>
<h1>RouteWatch Status</h1>
<div id="error" class="error" style="display: none;"></div>
<div class="status-grid">
<div class="status-card">
<h2>Connection Status</h2>
<div class="metric">
<span class="metric-label">Status</span>
<span class="metric-value" id="connected">-</span>
</div>
<div class="metric">
<span class="metric-label">Uptime</span>
<span class="metric-value" id="uptime">-</span>
</div>
</div>
<div class="status-card">
<h2>Stream Statistics</h2>
<div class="metric">
<span class="metric-label">Total Messages</span>
<span class="metric-value" id="total_messages">-</span>
</div>
<div class="metric">
<span class="metric-label">Messages/sec</span>
<span class="metric-value" id="messages_per_sec">-</span>
</div>
<div class="metric">
<span class="metric-label">Total Data</span>
<span class="metric-value" id="total_bytes">-</span>
</div>
<div class="metric">
<span class="metric-label">Throughput</span>
<span class="metric-value" id="mbits_per_sec">-</span>
</div>
</div>
<div class="status-card">
<h2>Database Statistics</h2>
<div class="metric">
<span class="metric-label">ASNs</span>
<span class="metric-value" id="asns">-</span>
</div>
<div class="metric">
<span class="metric-label">Total Prefixes</span>
<span class="metric-value" id="prefixes">-</span>
</div>
<div class="metric">
<span class="metric-label">IPv4 Prefixes</span>
<span class="metric-value" id="ipv4_prefixes">-</span>
</div>
<div class="metric">
<span class="metric-label">IPv6 Prefixes</span>
<span class="metric-value" id="ipv6_prefixes">-</span>
</div>
<div class="metric">
<span class="metric-label">Peerings</span>
<span class="metric-value" id="peerings">-</span>
</div>
<div class="metric">
<span class="metric-label">Live Routes</span>
<span class="metric-value" id="live_routes">-</span>
</div>
</div>
</div>
<script>
function formatBytes(bytes) {
if (bytes === 0) return '0 B';
const k = 1024;
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
}
function formatNumber(num) {
return num.toLocaleString();
}
function updateStatus() {
fetch('/api/v1/stats')
.then(response => response.json())
.then(data => {
// Connection status
const connectedEl = document.getElementById('connected');
connectedEl.textContent = data.connected ? 'Connected' : 'Disconnected';
connectedEl.className = 'metric-value ' + (data.connected ? 'connected' : 'disconnected');
// Update all metrics
document.getElementById('uptime').textContent = data.uptime;
document.getElementById('total_messages').textContent = formatNumber(data.total_messages);
document.getElementById('messages_per_sec').textContent = data.messages_per_sec.toFixed(1);
document.getElementById('total_bytes').textContent = formatBytes(data.total_bytes);
document.getElementById('mbits_per_sec').textContent = data.mbits_per_sec.toFixed(2) + ' Mbps';
document.getElementById('asns').textContent = formatNumber(data.asns);
document.getElementById('prefixes').textContent = formatNumber(data.prefixes);
document.getElementById('ipv4_prefixes').textContent = formatNumber(data.ipv4_prefixes);
document.getElementById('ipv6_prefixes').textContent = formatNumber(data.ipv6_prefixes);
document.getElementById('peerings').textContent = formatNumber(data.peerings);
document.getElementById('live_routes').textContent = formatNumber(data.live_routes);
// Clear any errors
document.getElementById('error').style.display = 'none';
})
.catch(error => {
document.getElementById('error').textContent = 'Error fetching status: ' + error;
document.getElementById('error').style.display = 'block';
});
}
// Update immediately and then every 500ms
updateStatus();
setInterval(updateStatus, 500);
</script>
</body>
</html>
`

View File

@ -0,0 +1,310 @@
// Package streamer implements an HTTP client that connects to the RIPE RIS Live streaming API,
// parses BGP UPDATE messages from the JSON stream, and dispatches them to registered handlers.
package streamer
import (
"bufio"
"context"
"encoding/json"
"fmt"
"log/slog"
"net/http"
"os"
"sync"
"time"
"git.eeqj.de/sneak/routewatch/internal/metrics"
"git.eeqj.de/sneak/routewatch/internal/ristypes"
)
const (
risLiveURL = "https://ris-live.ripe.net/v1/stream/?format=json"
metricsWindowSize = 60 // seconds for rolling average
metricsUpdateRate = time.Second
metricsLogInterval = 10 * time.Second
bytesPerKB = 1024
bytesPerMB = 1024 * 1024
)
// MessageHandler is an interface for handling RIS messages
type MessageHandler interface {
// WantsMessage returns true if this handler wants to process messages of the given type
WantsMessage(messageType string) bool
// HandleMessage processes a RIS message
HandleMessage(msg *ristypes.RISMessage)
}
// RawMessageHandler is a callback for handling raw JSON lines from the stream
type RawMessageHandler func(line string)
// Streamer handles streaming BGP updates from RIS Live
type Streamer struct {
logger *slog.Logger
client *http.Client
handlers []MessageHandler
rawHandler RawMessageHandler
mu sync.RWMutex
cancel context.CancelFunc
running bool
metrics *metrics.Tracker
}
// New creates a new RIS streamer
func New(logger *slog.Logger, metrics *metrics.Tracker) *Streamer {
return &Streamer{
logger: logger,
client: &http.Client{
Timeout: 0, // No timeout for streaming
},
handlers: make([]MessageHandler, 0),
metrics: metrics,
}
}
// RegisterHandler adds a callback for message processing
func (s *Streamer) RegisterHandler(handler MessageHandler) {
s.mu.Lock()
defer s.mu.Unlock()
s.handlers = append(s.handlers, handler)
}
// RegisterRawHandler sets a callback for raw message lines
func (s *Streamer) RegisterRawHandler(handler RawMessageHandler) {
s.mu.Lock()
defer s.mu.Unlock()
s.rawHandler = handler
}
// Start begins streaming in a goroutine
func (s *Streamer) Start() error {
s.mu.Lock()
defer s.mu.Unlock()
if s.running {
return fmt.Errorf("streamer already running")
}
ctx, cancel := context.WithCancel(context.Background())
s.cancel = cancel
s.running = true
go func() {
if err := s.stream(ctx); err != nil {
s.logger.Error("Streaming error", "error", err)
}
s.mu.Lock()
s.running = false
s.mu.Unlock()
}()
return nil
}
// Stop halts the streaming
func (s *Streamer) Stop() {
s.mu.Lock()
if s.cancel != nil {
s.cancel()
}
s.mu.Unlock()
s.metrics.SetConnected(false)
}
// IsRunning returns whether the streamer is currently active
func (s *Streamer) IsRunning() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.running
}
// GetMetrics returns current streaming metrics
func (s *Streamer) GetMetrics() metrics.StreamMetrics {
return s.metrics.GetStreamMetrics()
}
// logMetrics logs the current streaming statistics
func (s *Streamer) logMetrics() {
metrics := s.metrics.GetStreamMetrics()
uptime := time.Since(metrics.ConnectedSince)
const bitsPerMegabit = 1000000
s.logger.Info("Stream statistics",
"uptime", uptime,
"total_messages", metrics.TotalMessages,
"total_bytes", metrics.TotalBytes,
"total_mb", fmt.Sprintf("%.2f", float64(metrics.TotalBytes)/bytesPerMB),
"messages_per_sec", fmt.Sprintf("%.2f", metrics.MessagesPerSec),
"bits_per_sec", fmt.Sprintf("%.0f", metrics.BitsPerSec),
"mbps", fmt.Sprintf("%.2f", metrics.BitsPerSec/bitsPerMegabit),
)
}
// updateMetrics updates the metrics counters and rates
func (s *Streamer) updateMetrics(messageBytes int) {
s.metrics.RecordMessage(int64(messageBytes))
}
func (s *Streamer) stream(ctx context.Context) error {
req, err := http.NewRequestWithContext(ctx, "GET", risLiveURL, nil)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
resp, err := s.client.Do(req)
if err != nil {
return fmt.Errorf("failed to connect to RIS Live: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
s.logger.Error("Failed to close response body", "error", err)
}
}()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
s.logger.Info("Connected to RIS Live stream")
s.metrics.SetConnected(true)
// Start metrics logging goroutine
metricsTicker := time.NewTicker(metricsLogInterval)
defer metricsTicker.Stop()
go func() {
for {
select {
case <-metricsTicker.C:
s.logMetrics()
case <-ctx.Done():
return
}
}
}()
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
select {
case <-ctx.Done():
s.logger.Info("Stream stopped by context")
return ctx.Err()
default:
}
line := scanner.Bytes()
if len(line) == 0 {
continue
}
// Update metrics with message size
s.updateMetrics(len(line))
// Call raw handler if registered
s.mu.RLock()
rawHandler := s.rawHandler
s.mu.RUnlock()
if rawHandler != nil {
// Call raw handler synchronously to preserve order
rawHandler(string(line))
}
// Get current handlers
s.mu.RLock()
handlers := make([]MessageHandler, len(s.handlers))
copy(handlers, s.handlers)
s.mu.RUnlock()
// Spawn goroutine to parse and process the message
go func(rawLine []byte, messageHandlers []MessageHandler) {
// Parse the outer wrapper first
var wrapper ristypes.RISLiveMessage
if err := json.Unmarshal(rawLine, &wrapper); err != nil {
// Output the raw line and panic on parse failure
fmt.Fprintf(os.Stderr, "Failed to parse JSON: %v\n", err)
fmt.Fprintf(os.Stderr, "Raw line: %s\n", string(rawLine))
panic(fmt.Sprintf("JSON parse error: %v", err))
}
// Check if it's a ris_message wrapper
if wrapper.Type != "ris_message" {
s.logger.Error("Unexpected wrapper type",
"type", wrapper.Type,
"line", string(rawLine),
)
return
}
// Get the actual message
msg := wrapper.Data
// Parse the timestamp
msg.ParsedTimestamp = time.Unix(int64(msg.Timestamp), 0).UTC()
// Process based on message type
switch msg.Type {
case "UPDATE":
// Process BGP UPDATE messages
// Will be handled by registered handlers
case "RIS_PEER_STATE":
s.logger.Info("RIS peer state change",
"peer", msg.Peer,
"peer_asn", msg.PeerASN,
)
case "KEEPALIVE":
// BGP keepalive messages - just log at debug level
s.logger.Debug("BGP keepalive",
"peer", msg.Peer,
"peer_asn", msg.PeerASN,
)
case "OPEN":
// BGP open messages
s.logger.Info("BGP session opened",
"peer", msg.Peer,
"peer_asn", msg.PeerASN,
)
case "NOTIFICATION":
// BGP notification messages (errors)
s.logger.Warn("BGP notification",
"peer", msg.Peer,
"peer_asn", msg.PeerASN,
)
case "STATE":
// Peer state changes
s.logger.Info("Peer state change",
"peer", msg.Peer,
"peer_asn", msg.PeerASN,
)
default:
fmt.Fprintf(
os.Stderr,
"UNKNOWN MESSAGE TYPE: %s\nRAW MESSAGE: %s\n",
msg.Type,
string(rawLine),
)
panic(fmt.Sprintf("Unknown RIS message type: %s", msg.Type))
}
// Spawn goroutine for each handler callback that wants this message type
for _, handler := range messageHandlers {
if handler.WantsMessage(msg.Type) {
go func(h MessageHandler) {
h.HandleMessage(&msg)
}(handler)
}
}
}(append([]byte(nil), line...), handlers) // Copy the line to avoid data races
}
if err := scanner.Err(); err != nil {
return fmt.Errorf("scanner error: %w", err)
}
return nil
}

View File

@ -0,0 +1,34 @@
package streamer
import (
"testing"
"git.eeqj.de/sneak/routewatch/internal/metrics"
"log/slog"
)
func TestNewStreamer(t *testing.T) {
logger := slog.Default()
metricsTracker := metrics.New()
s := New(logger, metricsTracker)
if s == nil {
t.Fatal("New() returned nil")
}
if s.logger != logger {
t.Error("logger not set correctly")
}
if s.client == nil {
t.Error("HTTP client not initialized")
}
if s.handlers == nil {
t.Error("handlers slice not initialized")
}
if s.metrics != metricsTracker {
t.Error("metrics tracker not set correctly")
}
}