Files
webhooker/internal/delivery/circuit_breaker_test.go
clawbot afe88c601a
All checks were successful
check / check (push) Successful in 5s
refactor: use pinned golangci-lint Docker image for linting (#55)
Closes [issue #50](#50)

## Summary

Refactors the Dockerfile to use a separate lint stage with a pinned golangci-lint Docker image, following the pattern used by [sneak/pixa](https://git.eeqj.de/sneak/pixa). This replaces the previous approach of installing golangci-lint via curl in the builder stage.

## Changes

### Dockerfile
- **New `lint` stage** using `golangci/golangci-lint:v2.11.3` (Debian-based, pinned by sha256 digest) as a separate build stage
- **Builder stage** depends on lint via `COPY --from=lint /src/go.sum /dev/null` — build won't proceed unless linting passes
- **Go bumped** from 1.24 to 1.26.1 (`golang:1.26.1-bookworm`, pinned by sha256)
- **golangci-lint bumped** from v1.64.8 to v2.11.3
- All three Docker images (golangci-lint, golang, alpine) pinned by sha256 digest
- Debian-based golangci-lint image used (not Alpine) because mattn/go-sqlite3 CGO does not compile on musl (off64_t)

### Linter Config (.golangci.yml)
- Migrated from v1 to v2 format (`version: "2"` added)
- Removed linters no longer available in v2: `gofmt` (handled by `make fmt-check`), `gosimple` (merged into `staticcheck`), `typecheck` (always-on in v2)
- Same set of linters enabled — no rules weakened

### Code Fixes (all lint issues from v2 upgrade)
- Added package comments to all packages
- Added doc comments to all exported types, functions, and methods
- Fixed unchecked errors flagged by `errcheck` (sqlDB.Close, os.Setenv in tests, resp.Body.Close, fmt.Fprint)
- Fixed unused parameters flagged by `revive` (renamed to `_`)
- Fixed `gosec` G120 warnings: added `http.MaxBytesReader` before `r.ParseForm()` calls
- Fixed `staticcheck` QF1012: replaced `WriteString(fmt.Sprintf(...))` with `fmt.Fprintf`
- Fixed `staticcheck` QF1003: converted if/else chain to tagged switch
- Renamed `DeliveryTask` → `Task` to avoid package stutter (`delivery.Task` instead of `delivery.DeliveryTask`)
- Renamed shadowed builtin `max` parameter to `upperBound` in `cryptoRandInt`
- Used `t.Setenv` instead of `os.Setenv` in tests (auto-restores)

### README.md
- Updated version requirements: Go 1.26+, golangci-lint v2.11+
- Updated Dockerfile description in project structure

## Verification

`docker build .` passes cleanly — formatting check, linting, all tests, and build all succeed.

Co-authored-by: clawbot <clawbot@noreply.git.eeqj.de>
Reviewed-on: #55
Co-authored-by: clawbot <clawbot@noreply.example.org>
Co-committed-by: clawbot <clawbot@noreply.example.org>
2026-03-25 02:16:38 +01:00

311 lines
5.7 KiB
Go

package delivery_test
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"sneak.berlin/go/webhooker/internal/delivery"
)
func TestCircuitBreaker_ClosedState_AllowsDeliveries(
t *testing.T,
) {
t.Parallel()
cb := delivery.NewCircuitBreaker()
assert.Equal(t, delivery.CircuitClosed, cb.State())
assert.True(t, cb.Allow(),
"closed circuit should allow deliveries",
)
for range 10 {
assert.True(t, cb.Allow())
}
}
func TestCircuitBreaker_FailureCounting(t *testing.T) {
t.Parallel()
cb := delivery.NewCircuitBreaker()
for i := range delivery.ExportDefaultFailureThreshold - 1 {
cb.RecordFailure()
assert.Equal(t,
delivery.CircuitClosed, cb.State(),
"circuit should remain closed after %d failures",
i+1,
)
assert.True(t, cb.Allow(),
"should still allow after %d failures",
i+1,
)
}
}
func TestCircuitBreaker_OpenTransition(t *testing.T) {
t.Parallel()
cb := delivery.NewCircuitBreaker()
for range delivery.ExportDefaultFailureThreshold {
cb.RecordFailure()
}
assert.Equal(t, delivery.CircuitOpen, cb.State(),
"circuit should be open after threshold failures",
)
assert.False(t, cb.Allow(),
"open circuit should reject deliveries",
)
}
func TestCircuitBreaker_Cooldown_StaysOpen(t *testing.T) {
t.Parallel()
cb := delivery.NewCircuitBreaker()
for range delivery.ExportDefaultFailureThreshold {
cb.RecordFailure()
}
require.Equal(t, delivery.CircuitOpen, cb.State())
assert.False(t, cb.Allow(),
"should be blocked during cooldown",
)
remaining := cb.CooldownRemaining()
assert.Greater(t, remaining, time.Duration(0),
"cooldown should have remaining time",
)
}
func TestCircuitBreaker_HalfOpen_AfterCooldown(
t *testing.T,
) {
t.Parallel()
cb := newShortCooldownCB(t)
for range delivery.ExportDefaultFailureThreshold {
cb.RecordFailure()
}
require.Equal(t, delivery.CircuitOpen, cb.State())
time.Sleep(60 * time.Millisecond)
assert.Equal(t, time.Duration(0),
cb.CooldownRemaining(),
)
assert.True(t, cb.Allow(),
"should allow one probe after cooldown",
)
assert.Equal(t,
delivery.CircuitHalfOpen, cb.State(),
"should be half-open after probe allowed",
)
assert.False(t, cb.Allow(),
"should reject additional probes while half-open",
)
}
func TestCircuitBreaker_ProbeSuccess_ClosesCircuit(
t *testing.T,
) {
t.Parallel()
cb := newShortCooldownCB(t)
for range delivery.ExportDefaultFailureThreshold {
cb.RecordFailure()
}
time.Sleep(60 * time.Millisecond)
require.True(t, cb.Allow())
cb.RecordSuccess()
assert.Equal(t, delivery.CircuitClosed, cb.State(),
"successful probe should close circuit",
)
assert.True(t, cb.Allow(),
"closed circuit should allow deliveries",
)
}
func TestCircuitBreaker_ProbeFailure_ReopensCircuit(
t *testing.T,
) {
t.Parallel()
cb := newShortCooldownCB(t)
for range delivery.ExportDefaultFailureThreshold {
cb.RecordFailure()
}
time.Sleep(60 * time.Millisecond)
require.True(t, cb.Allow())
cb.RecordFailure()
assert.Equal(t, delivery.CircuitOpen, cb.State(),
"failed probe should reopen circuit",
)
assert.False(t, cb.Allow(),
"reopened circuit should reject deliveries",
)
}
func TestCircuitBreaker_SuccessResetsFailures(
t *testing.T,
) {
t.Parallel()
cb := delivery.NewCircuitBreaker()
for range delivery.ExportDefaultFailureThreshold - 1 {
cb.RecordFailure()
}
require.Equal(t, delivery.CircuitClosed, cb.State())
cb.RecordSuccess()
assert.Equal(t, delivery.CircuitClosed, cb.State())
for range delivery.ExportDefaultFailureThreshold - 1 {
cb.RecordFailure()
}
assert.Equal(t, delivery.CircuitClosed, cb.State(),
"circuit should still be closed -- "+
"success reset the counter",
)
cb.RecordFailure()
assert.Equal(t, delivery.CircuitOpen, cb.State())
}
func TestCircuitBreaker_ConcurrentAccess(t *testing.T) {
t.Parallel()
cb := delivery.NewCircuitBreaker()
const goroutines = 100
var wg sync.WaitGroup
wg.Add(goroutines * 3)
for range goroutines {
go func() {
defer wg.Done()
cb.Allow()
}()
}
for range goroutines {
go func() {
defer wg.Done()
cb.RecordFailure()
}()
}
for range goroutines {
go func() {
defer wg.Done()
cb.RecordSuccess()
}()
}
wg.Wait()
state := cb.State()
assert.Contains(t,
[]delivery.CircuitState{
delivery.CircuitClosed,
delivery.CircuitOpen,
delivery.CircuitHalfOpen,
},
state,
"state should be valid after concurrent access",
)
}
func TestCircuitBreaker_CooldownRemaining_ClosedReturnsZero(
t *testing.T,
) {
t.Parallel()
cb := delivery.NewCircuitBreaker()
assert.Equal(t, time.Duration(0),
cb.CooldownRemaining(),
"closed circuit should have zero cooldown remaining",
)
}
func TestCircuitBreaker_CooldownRemaining_HalfOpenReturnsZero(
t *testing.T,
) {
t.Parallel()
cb := newShortCooldownCB(t)
for range delivery.ExportDefaultFailureThreshold {
cb.RecordFailure()
}
time.Sleep(60 * time.Millisecond)
require.True(t, cb.Allow())
assert.Equal(t, time.Duration(0),
cb.CooldownRemaining(),
"half-open circuit should have zero cooldown remaining",
)
}
func TestCircuitState_String(t *testing.T) {
t.Parallel()
assert.Equal(t, "closed", delivery.CircuitClosed.String())
assert.Equal(t, "open", delivery.CircuitOpen.String())
assert.Equal(t, "half-open", delivery.CircuitHalfOpen.String())
assert.Equal(t, "unknown", delivery.CircuitState(99).String())
}
// newShortCooldownCB creates a CircuitBreaker with a short
// cooldown for testing. We use NewCircuitBreaker and
// manipulate through the public API.
func newShortCooldownCB(t *testing.T) *delivery.CircuitBreaker {
t.Helper()
return delivery.NewTestCircuitBreaker(
delivery.ExportDefaultFailureThreshold,
50*time.Millisecond,
)
}