Initial commit: RouteWatch BGP stream monitor
- Connects to RIPE RIS Live stream to receive real-time BGP updates - Stores BGP data in SQLite database: - ASNs with first/last seen timestamps - Prefixes with IPv4/IPv6 classification - BGP announcements and withdrawals - AS-to-AS peering relationships from AS paths - Live routing table tracking active routes - HTTP server with statistics endpoints - Metrics tracking with go-metrics - Custom JSON unmarshaling to handle nested AS sets in paths - Dependency injection with uber/fx - Pure Go implementation (no CGO) - Includes streamdumper utility for debugging raw messages
This commit is contained in:
158
internal/routewatch/app.go
Normal file
158
internal/routewatch/app.go
Normal file
@@ -0,0 +1,158 @@
|
||||
// Package routewatch contains the primary RouteWatch type that represents a running instance
|
||||
// of the application and contains pointers to its core dependencies, and is responsible for initialization.
|
||||
package routewatch
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.eeqj.de/sneak/routewatch/internal/database"
|
||||
"git.eeqj.de/sneak/routewatch/internal/metrics"
|
||||
"git.eeqj.de/sneak/routewatch/internal/server"
|
||||
"git.eeqj.de/sneak/routewatch/internal/streamer"
|
||||
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
// Config contains runtime configuration for RouteWatch
|
||||
type Config struct {
|
||||
MaxRuntime time.Duration // Maximum runtime (0 = run forever)
|
||||
}
|
||||
|
||||
// NewConfig provides default configuration
|
||||
func NewConfig() Config {
|
||||
return Config{
|
||||
MaxRuntime: 0, // Run forever by default
|
||||
}
|
||||
}
|
||||
|
||||
// Dependencies contains all dependencies for RouteWatch
|
||||
type Dependencies struct {
|
||||
fx.In
|
||||
|
||||
DB database.Store
|
||||
Streamer *streamer.Streamer
|
||||
Server *server.Server
|
||||
Logger *slog.Logger
|
||||
Config Config `optional:"true"`
|
||||
}
|
||||
|
||||
// RouteWatch represents the main application instance
|
||||
type RouteWatch struct {
|
||||
db database.Store
|
||||
streamer *streamer.Streamer
|
||||
server *server.Server
|
||||
logger *slog.Logger
|
||||
maxRuntime time.Duration
|
||||
}
|
||||
|
||||
// New creates a new RouteWatch instance
|
||||
func New(deps Dependencies) *RouteWatch {
|
||||
return &RouteWatch{
|
||||
db: deps.DB,
|
||||
streamer: deps.Streamer,
|
||||
server: deps.Server,
|
||||
logger: deps.Logger,
|
||||
maxRuntime: deps.Config.MaxRuntime,
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts the RouteWatch application
|
||||
func (rw *RouteWatch) Run(ctx context.Context) error {
|
||||
rw.logger.Info("Starting RouteWatch")
|
||||
|
||||
// Apply runtime limit if specified
|
||||
if rw.maxRuntime > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, rw.maxRuntime)
|
||||
defer cancel()
|
||||
rw.logger.Info("Running with time limit", "max_runtime", rw.maxRuntime)
|
||||
}
|
||||
|
||||
// Register database handler to process BGP UPDATE messages
|
||||
dbHandler := NewDatabaseHandler(rw.db, rw.logger)
|
||||
rw.streamer.RegisterHandler(dbHandler)
|
||||
|
||||
// Start streaming
|
||||
if err := rw.streamer.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start HTTP server
|
||||
if err := rw.server.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for context cancellation
|
||||
<-ctx.Done()
|
||||
|
||||
// Stop services
|
||||
rw.streamer.Stop()
|
||||
|
||||
// Stop HTTP server with a timeout
|
||||
const serverStopTimeout = 5 * time.Second
|
||||
stopCtx, cancel := context.WithTimeout(context.Background(), serverStopTimeout)
|
||||
defer cancel()
|
||||
if err := rw.server.Stop(stopCtx); err != nil {
|
||||
rw.logger.Error("Failed to stop HTTP server gracefully", "error", err)
|
||||
}
|
||||
|
||||
// Log final metrics
|
||||
metrics := rw.streamer.GetMetrics()
|
||||
rw.logger.Info("Final metrics",
|
||||
"total_messages", metrics.TotalMessages,
|
||||
"total_bytes", metrics.TotalBytes,
|
||||
"messages_per_sec", metrics.MessagesPerSec,
|
||||
"bits_per_sec", metrics.BitsPerSec,
|
||||
"duration", time.Since(metrics.ConnectedSince),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewLogger creates a structured logger
|
||||
func NewLogger() *slog.Logger {
|
||||
level := slog.LevelInfo
|
||||
if debug := os.Getenv("DEBUG"); strings.Contains(debug, "routewatch") {
|
||||
level = slog.LevelDebug
|
||||
}
|
||||
|
||||
opts := &slog.HandlerOptions{
|
||||
Level: level,
|
||||
}
|
||||
|
||||
var handler slog.Handler
|
||||
if os.Stdout.Name() != "/dev/stdout" || os.Getenv("TERM") == "" {
|
||||
// Not a terminal, use JSON
|
||||
handler = slog.NewJSONHandler(os.Stdout, opts)
|
||||
} else {
|
||||
// Terminal, use text
|
||||
handler = slog.NewTextHandler(os.Stdout, opts)
|
||||
}
|
||||
|
||||
return slog.New(handler)
|
||||
}
|
||||
|
||||
// getModule provides all fx dependencies
|
||||
func getModule() fx.Option {
|
||||
return fx.Options(
|
||||
fx.Provide(
|
||||
NewLogger,
|
||||
NewConfig,
|
||||
metrics.New,
|
||||
database.New,
|
||||
fx.Annotate(
|
||||
func(db *database.Database) database.Store {
|
||||
return db
|
||||
},
|
||||
fx.As(new(database.Store)),
|
||||
),
|
||||
streamer.New,
|
||||
server.New,
|
||||
New,
|
||||
),
|
||||
)
|
||||
}
|
||||
243
internal/routewatch/app_integration_test.go
Normal file
243
internal/routewatch/app_integration_test.go
Normal file
@@ -0,0 +1,243 @@
|
||||
package routewatch
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.eeqj.de/sneak/routewatch/internal/database"
|
||||
"git.eeqj.de/sneak/routewatch/internal/metrics"
|
||||
"git.eeqj.de/sneak/routewatch/internal/server"
|
||||
"git.eeqj.de/sneak/routewatch/internal/streamer"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// mockStore is a mock implementation of database.Store for testing
|
||||
type mockStore struct {
|
||||
mu sync.Mutex
|
||||
|
||||
// Counters for tracking calls
|
||||
ASNCount int
|
||||
PrefixCount int
|
||||
PeeringCount int
|
||||
RouteCount int
|
||||
WithdrawalCount int
|
||||
|
||||
// Track unique items
|
||||
ASNs map[int]*database.ASN
|
||||
Prefixes map[string]*database.Prefix
|
||||
Peerings map[string]bool // key is "from_to"
|
||||
Routes map[string]bool // key is "prefix_origin_peer"
|
||||
|
||||
// Track IP versions
|
||||
IPv4Prefixes int
|
||||
IPv6Prefixes int
|
||||
}
|
||||
|
||||
// newMockStore creates a new mock store
|
||||
func newMockStore() *mockStore {
|
||||
return &mockStore{
|
||||
ASNs: make(map[int]*database.ASN),
|
||||
Prefixes: make(map[string]*database.Prefix),
|
||||
Peerings: make(map[string]bool),
|
||||
Routes: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// GetOrCreateASN mock implementation
|
||||
func (m *mockStore) GetOrCreateASN(number int, timestamp time.Time) (*database.ASN, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if asn, exists := m.ASNs[number]; exists {
|
||||
asn.LastSeen = timestamp
|
||||
|
||||
return asn, nil
|
||||
}
|
||||
|
||||
asn := &database.ASN{
|
||||
ID: uuid.New(),
|
||||
Number: number,
|
||||
FirstSeen: timestamp,
|
||||
LastSeen: timestamp,
|
||||
}
|
||||
m.ASNs[number] = asn
|
||||
m.ASNCount++
|
||||
|
||||
return asn, nil
|
||||
}
|
||||
|
||||
// GetOrCreatePrefix mock implementation
|
||||
func (m *mockStore) GetOrCreatePrefix(prefix string, timestamp time.Time) (*database.Prefix, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if p, exists := m.Prefixes[prefix]; exists {
|
||||
p.LastSeen = timestamp
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
const (
|
||||
ipVersionV4 = 4
|
||||
ipVersionV6 = 6
|
||||
)
|
||||
|
||||
ipVersion := ipVersionV4
|
||||
if strings.Contains(prefix, ":") {
|
||||
ipVersion = ipVersionV6
|
||||
}
|
||||
|
||||
p := &database.Prefix{
|
||||
ID: uuid.New(),
|
||||
Prefix: prefix,
|
||||
IPVersion: ipVersion,
|
||||
FirstSeen: timestamp,
|
||||
LastSeen: timestamp,
|
||||
}
|
||||
m.Prefixes[prefix] = p
|
||||
m.PrefixCount++
|
||||
|
||||
if ipVersion == ipVersionV4 {
|
||||
m.IPv4Prefixes++
|
||||
} else {
|
||||
m.IPv6Prefixes++
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// RecordAnnouncement mock implementation
|
||||
func (m *mockStore) RecordAnnouncement(_ *database.Announcement) error {
|
||||
// Not tracking announcements in detail for now
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordPeering mock implementation
|
||||
func (m *mockStore) RecordPeering(fromASNID, toASNID string, _ time.Time) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
key := fromASNID + "_" + toASNID
|
||||
if !m.Peerings[key] {
|
||||
m.Peerings[key] = true
|
||||
m.PeeringCount++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateLiveRoute mock implementation
|
||||
func (m *mockStore) UpdateLiveRoute(prefixID, originASNID uuid.UUID, peerASN int, _ string, _ time.Time) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
key := prefixID.String() + "_" + originASNID.String() + "_" + string(rune(peerASN))
|
||||
if !m.Routes[key] {
|
||||
m.Routes[key] = true
|
||||
m.RouteCount++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithdrawLiveRoute mock implementation
|
||||
func (m *mockStore) WithdrawLiveRoute(_ uuid.UUID, _ int, _ time.Time) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.WithdrawalCount++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetActiveLiveRoutes mock implementation
|
||||
func (m *mockStore) GetActiveLiveRoutes() ([]database.LiveRoute, error) {
|
||||
return []database.LiveRoute{}, nil
|
||||
}
|
||||
|
||||
// Close mock implementation
|
||||
func (m *mockStore) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetStats returns statistics about the mock store
|
||||
func (m *mockStore) GetStats() (database.Stats, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
return database.Stats{
|
||||
ASNs: len(m.ASNs),
|
||||
Prefixes: len(m.Prefixes),
|
||||
IPv4Prefixes: m.IPv4Prefixes,
|
||||
IPv6Prefixes: m.IPv6Prefixes,
|
||||
Peerings: m.PeeringCount,
|
||||
LiveRoutes: m.RouteCount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestRouteWatchLiveFeed(t *testing.T) {
|
||||
// Create mock database
|
||||
mockDB := newMockStore()
|
||||
defer mockDB.Close()
|
||||
|
||||
logger := NewLogger()
|
||||
|
||||
// Create metrics tracker
|
||||
metricsTracker := metrics.New()
|
||||
|
||||
// Create streamer
|
||||
s := streamer.New(logger, metricsTracker)
|
||||
|
||||
// Create server
|
||||
srv := server.New(mockDB, s, logger)
|
||||
|
||||
// Create RouteWatch with 5 second limit
|
||||
deps := Dependencies{
|
||||
DB: mockDB,
|
||||
Streamer: s,
|
||||
Server: srv,
|
||||
Logger: logger,
|
||||
Config: Config{
|
||||
MaxRuntime: 5 * time.Second,
|
||||
},
|
||||
}
|
||||
rw := New(deps)
|
||||
|
||||
// Run with context
|
||||
ctx := context.Background()
|
||||
go func() {
|
||||
_ = rw.Run(ctx)
|
||||
}()
|
||||
|
||||
// Wait for the configured duration
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Get statistics
|
||||
stats, err := mockDB.GetStats()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get stats: %v", err)
|
||||
}
|
||||
|
||||
if stats.ASNs == 0 {
|
||||
t.Error("Expected to receive some ASNs from live feed")
|
||||
}
|
||||
t.Logf("Received %d unique ASNs in 5 seconds", stats.ASNs)
|
||||
|
||||
if stats.Prefixes == 0 {
|
||||
t.Error("Expected to receive some prefixes from live feed")
|
||||
}
|
||||
t.Logf("Received %d unique prefixes (%d IPv4, %d IPv6) in 5 seconds", stats.Prefixes, stats.IPv4Prefixes, stats.IPv6Prefixes)
|
||||
|
||||
if stats.Peerings == 0 {
|
||||
t.Error("Expected to receive some peerings from live feed")
|
||||
}
|
||||
t.Logf("Recorded %d AS peering relationships in 5 seconds", stats.Peerings)
|
||||
|
||||
if stats.LiveRoutes == 0 {
|
||||
t.Error("Expected to have some active routes")
|
||||
}
|
||||
t.Logf("Active routes: %d", stats.LiveRoutes)
|
||||
}
|
||||
12
internal/routewatch/app_test.go
Normal file
12
internal/routewatch/app_test.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package routewatch
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewLogger(t *testing.T) {
|
||||
logger := NewLogger()
|
||||
if logger == nil {
|
||||
t.Fatal("NewLogger returned nil")
|
||||
}
|
||||
}
|
||||
51
internal/routewatch/cli.go
Normal file
51
internal/routewatch/cli.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package routewatch
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
// CLIEntry is the main entry point for the CLI
|
||||
func CLIEntry() {
|
||||
app := fx.New(
|
||||
getModule(),
|
||||
fx.Invoke(func(lc fx.Lifecycle, rw *RouteWatch, logger *slog.Logger) {
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(_ context.Context) error {
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Handle shutdown signals
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
go func() {
|
||||
<-sigCh
|
||||
logger.Info("Received shutdown signal")
|
||||
cancel()
|
||||
}()
|
||||
|
||||
if err := rw.Run(ctx); err != nil {
|
||||
logger.Error("RouteWatch error", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
},
|
||||
OnStop: func(_ context.Context) error {
|
||||
logger.Info("Shutting down RouteWatch")
|
||||
|
||||
return nil
|
||||
},
|
||||
})
|
||||
}),
|
||||
)
|
||||
|
||||
app.Run()
|
||||
}
|
||||
144
internal/routewatch/dbhandler.go
Normal file
144
internal/routewatch/dbhandler.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package routewatch
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"strconv"
|
||||
|
||||
"git.eeqj.de/sneak/routewatch/internal/database"
|
||||
"git.eeqj.de/sneak/routewatch/internal/ristypes"
|
||||
)
|
||||
|
||||
// DatabaseHandler handles BGP messages and stores them in the database
|
||||
type DatabaseHandler struct {
|
||||
db database.Store
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewDatabaseHandler creates a new database handler
|
||||
func NewDatabaseHandler(db database.Store, logger *slog.Logger) *DatabaseHandler {
|
||||
return &DatabaseHandler{
|
||||
db: db,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// WantsMessage returns true if this handler wants to process messages of the given type
|
||||
func (h *DatabaseHandler) WantsMessage(messageType string) bool {
|
||||
// We only care about UPDATE messages for the database
|
||||
return messageType == "UPDATE"
|
||||
}
|
||||
|
||||
// HandleMessage processes a RIS message and updates the database
|
||||
func (h *DatabaseHandler) HandleMessage(msg *ristypes.RISMessage) {
|
||||
// Use the pre-parsed timestamp
|
||||
timestamp := msg.ParsedTimestamp
|
||||
|
||||
// Parse peer ASN
|
||||
peerASN, err := strconv.Atoi(msg.PeerASN)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to parse peer ASN", "peer_asn", msg.PeerASN, "error", err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Get origin ASN from path (last element)
|
||||
var originASN int
|
||||
if len(msg.Path) > 0 {
|
||||
originASN = msg.Path[len(msg.Path)-1]
|
||||
}
|
||||
|
||||
// Process announcements
|
||||
for _, announcement := range msg.Announcements {
|
||||
for _, prefix := range announcement.Prefixes {
|
||||
// Get or create prefix
|
||||
p, err := h.db.GetOrCreatePrefix(prefix, timestamp)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to get/create prefix", "prefix", prefix, "error", err)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Get or create origin ASN
|
||||
asn, err := h.db.GetOrCreateASN(originASN, timestamp)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to get/create ASN", "asn", originASN, "error", err)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Update live route
|
||||
err = h.db.UpdateLiveRoute(
|
||||
p.ID,
|
||||
asn.ID,
|
||||
peerASN,
|
||||
announcement.NextHop,
|
||||
timestamp,
|
||||
)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to update live route",
|
||||
"prefix", prefix,
|
||||
"origin_asn", originASN,
|
||||
"peer_asn", peerASN,
|
||||
"error", err,
|
||||
)
|
||||
}
|
||||
|
||||
// TODO: Record the announcement in the announcements table
|
||||
// Process AS path to update peerings
|
||||
if len(msg.Path) > 1 {
|
||||
for i := range len(msg.Path) - 1 {
|
||||
fromASN := msg.Path[i]
|
||||
toASN := msg.Path[i+1]
|
||||
|
||||
// Get or create both ASNs
|
||||
fromAS, err := h.db.GetOrCreateASN(fromASN, timestamp)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to get/create from ASN", "asn", fromASN, "error", err)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
toAS, err := h.db.GetOrCreateASN(toASN, timestamp)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to get/create to ASN", "asn", toASN, "error", err)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Record the peering
|
||||
err = h.db.RecordPeering(fromAS.ID.String(), toAS.ID.String(), timestamp)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to record peering",
|
||||
"from_asn", fromASN,
|
||||
"to_asn", toASN,
|
||||
"error", err,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process withdrawals
|
||||
for _, prefix := range msg.Withdrawals {
|
||||
// Get prefix
|
||||
p, err := h.db.GetOrCreatePrefix(prefix, timestamp)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to get prefix for withdrawal", "prefix", prefix, "error", err)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Withdraw the route
|
||||
err = h.db.WithdrawLiveRoute(p.ID, peerASN, timestamp)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to withdraw route",
|
||||
"prefix", prefix,
|
||||
"peer_asn", peerASN,
|
||||
"error", err,
|
||||
)
|
||||
}
|
||||
|
||||
// TODO: Record the withdrawal in the withdrawals table
|
||||
}
|
||||
}
|
||||
45
internal/routewatch/handler.go
Normal file
45
internal/routewatch/handler.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package routewatch
|
||||
|
||||
import (
|
||||
"git.eeqj.de/sneak/routewatch/internal/ristypes"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
// SimpleHandler is a basic implementation of streamer.MessageHandler
|
||||
type SimpleHandler struct {
|
||||
logger *slog.Logger
|
||||
messageTypes []string
|
||||
callback func(*ristypes.RISMessage)
|
||||
}
|
||||
|
||||
// NewSimpleHandler creates a handler that accepts specific message types
|
||||
func NewSimpleHandler(logger *slog.Logger, messageTypes []string, callback func(*ristypes.RISMessage)) *SimpleHandler {
|
||||
return &SimpleHandler{
|
||||
logger: logger,
|
||||
messageTypes: messageTypes,
|
||||
callback: callback,
|
||||
}
|
||||
}
|
||||
|
||||
// WantsMessage returns true if this handler wants to process messages of the given type
|
||||
func (h *SimpleHandler) WantsMessage(messageType string) bool {
|
||||
// If no specific types are set, accept all messages
|
||||
if len(h.messageTypes) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, t := range h.messageTypes {
|
||||
if t == messageType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// HandleMessage processes a RIS message
|
||||
func (h *SimpleHandler) HandleMessage(msg *ristypes.RISMessage) {
|
||||
if h.callback != nil {
|
||||
h.callback(msg)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user