fix: address all PR #10 review findings
All checks were successful
check / check (push) Successful in 2m19s

Security:
- Add channel membership check before PRIVMSG (prevents non-members from sending)
- Add membership check on history endpoint (channels require membership, DMs scoped to own nick)
- Enforce MaxBytesReader on all POST request bodies
- Fix rand.Read error being silently ignored in token generation

Data integrity:
- Fix TOCTOU race in GetOrCreateChannel using INSERT OR IGNORE + SELECT

Build:
- Add CGO_ENABLED=0 to golangci-lint install in Dockerfile (fixes alpine build)

Linting:
- Strict .golangci.yml: only wsl disabled (deprecated in v2)
- Re-enable exhaustruct, depguard, godot, wrapcheck, varnamelen
- Fix linters-settings -> linters.settings for v2 config format
- Fix ALL lint findings in actual code (no linter config weakening)
- Wrap all external package errors (wrapcheck)
- Fill struct fields or add targeted nolint:exhaustruct where appropriate
- Rename short variables (ts->timestamp, n->bufIndex, etc.)
- Add depguard deny policy for io/ioutil and math/rand
- Exclude G704 (SSRF) in gosec config (CLI client takes user-configured URLs)

Tests:
- Add security tests (TestNonMemberCannotSend, TestHistoryNonMember)
- Split TestInsertAndPollMessages for reduced complexity
- Fix parallel test safety (viper global state prevents parallelism)
- Use t.Context() instead of context.Background() in tests

Docker build verified passing locally.
This commit is contained in:
clawbot
2026-02-26 21:21:49 -08:00
parent 4b4a337a88
commit a57a73e94e
22 changed files with 2650 additions and 1903 deletions

View File

@@ -8,25 +8,28 @@ import (
// Broker notifies waiting clients when new messages are available.
type Broker struct {
mu sync.Mutex
listeners map[int64][]chan struct{} // userID -> list of waiting channels
listeners map[int64][]chan struct{}
}
// New creates a new Broker.
func New() *Broker {
return &Broker{
return &Broker{ //nolint:exhaustruct // mu has zero-value default
listeners: make(map[int64][]chan struct{}),
}
}
// Wait returns a channel that will be closed when a message is available for the user.
// Wait returns a channel that will be closed when a message
// is available for the user.
func (b *Broker) Wait(userID int64) chan struct{} {
ch := make(chan struct{}, 1)
waitCh := make(chan struct{}, 1)
b.mu.Lock()
b.listeners[userID] = append(b.listeners[userID], ch)
b.listeners[userID] = append(
b.listeners[userID], waitCh,
)
b.mu.Unlock()
return ch
return waitCh
}
// Notify wakes up all waiting clients for a user.
@@ -36,24 +39,29 @@ func (b *Broker) Notify(userID int64) {
delete(b.listeners, userID)
b.mu.Unlock()
for _, ch := range waiters {
for _, waiter := range waiters {
select {
case ch <- struct{}{}:
case waiter <- struct{}{}:
default:
}
}
}
// Remove removes a specific wait channel (for cleanup on timeout).
func (b *Broker) Remove(userID int64, ch chan struct{}) {
func (b *Broker) Remove(
userID int64,
waitCh chan struct{},
) {
b.mu.Lock()
defer b.mu.Unlock()
waiters := b.listeners[userID]
for i, w := range waiters {
if w == ch {
b.listeners[userID] = append(waiters[:i], waiters[i+1:]...)
for i, waiter := range waiters {
if waiter == waitCh {
b.listeners[userID] = append(
waiters[:i], waiters[i+1:]...,
)
break
}

View File

@@ -11,8 +11,8 @@ import (
func TestNewBroker(t *testing.T) {
t.Parallel()
b := broker.New()
if b == nil {
brk := broker.New()
if brk == nil {
t.Fatal("expected non-nil broker")
}
}
@@ -20,16 +20,16 @@ func TestNewBroker(t *testing.T) {
func TestWaitAndNotify(t *testing.T) {
t.Parallel()
b := broker.New()
ch := b.Wait(1)
brk := broker.New()
waitCh := brk.Wait(1)
go func() {
time.Sleep(10 * time.Millisecond)
b.Notify(1)
brk.Notify(1)
}()
select {
case <-ch:
case <-waitCh:
case <-time.After(2 * time.Second):
t.Fatal("timeout")
}
@@ -38,21 +38,22 @@ func TestWaitAndNotify(t *testing.T) {
func TestNotifyWithoutWaiters(t *testing.T) {
t.Parallel()
b := broker.New()
b.Notify(42) // should not panic
brk := broker.New()
brk.Notify(42) // should not panic.
}
func TestRemove(t *testing.T) {
t.Parallel()
b := broker.New()
ch := b.Wait(1)
b.Remove(1, ch)
brk := broker.New()
waitCh := brk.Wait(1)
b.Notify(1)
brk.Remove(1, waitCh)
brk.Notify(1)
select {
case <-ch:
case <-waitCh:
t.Fatal("should not receive after remove")
case <-time.After(50 * time.Millisecond):
}
@@ -61,20 +62,20 @@ func TestRemove(t *testing.T) {
func TestMultipleWaiters(t *testing.T) {
t.Parallel()
b := broker.New()
ch1 := b.Wait(1)
ch2 := b.Wait(1)
brk := broker.New()
waitCh1 := brk.Wait(1)
waitCh2 := brk.Wait(1)
b.Notify(1)
brk.Notify(1)
select {
case <-ch1:
case <-waitCh1:
case <-time.After(time.Second):
t.Fatal("ch1 timeout")
}
select {
case <-ch2:
case <-waitCh2:
case <-time.After(time.Second):
t.Fatal("ch2 timeout")
}
@@ -83,36 +84,38 @@ func TestMultipleWaiters(t *testing.T) {
func TestConcurrentWaitNotify(t *testing.T) {
t.Parallel()
b := broker.New()
brk := broker.New()
var wg sync.WaitGroup
var waitGroup sync.WaitGroup
const concurrency = 100
for i := range concurrency {
wg.Add(1)
for idx := range concurrency {
waitGroup.Add(1)
go func(uid int64) {
defer wg.Done()
defer waitGroup.Done()
ch := b.Wait(uid)
b.Notify(uid)
waitCh := brk.Wait(uid)
brk.Notify(uid)
select {
case <-ch:
case <-waitCh:
case <-time.After(time.Second):
t.Error("timeout")
}
}(int64(i % 10))
}(int64(idx % 10))
}
wg.Wait()
waitGroup.Wait()
}
func TestRemoveNonexistent(t *testing.T) {
t.Parallel()
b := broker.New()
ch := make(chan struct{}, 1)
b.Remove(999, ch) // should not panic
brk := broker.New()
waitCh := make(chan struct{}, 1)
brk.Remove(999, waitCh) // should not panic.
}