refactor: use pinned golangci-lint Docker image for linting
All checks were successful
check / check (push) Successful in 1m41s

Refactor Dockerfile to use a separate lint stage with a pinned
golangci-lint v2.11.3 Docker image instead of installing
golangci-lint via curl in the builder stage. This follows the
pattern used by sneak/pixa.

Changes:
- Dockerfile: separate lint stage using golangci/golangci-lint:v2.11.3
  (Debian-based, pinned by sha256) with COPY --from=lint dependency
- Bump Go from 1.24 to 1.26.1 (golang:1.26.1-bookworm, pinned)
- Bump golangci-lint from v1.64.8 to v2.11.3
- Migrate .golangci.yml from v1 to v2 format (same linters, format only)
- All Docker images pinned by sha256 digest
- Fix all lint issues from the v2 linter upgrade:
  - Add package comments to all packages
  - Add doc comments to all exported types, functions, and methods
  - Fix unchecked errors (errcheck)
  - Fix unused parameters (revive)
  - Fix gosec warnings (MaxBytesReader for form parsing)
  - Fix staticcheck suggestions (fmt.Fprintf instead of WriteString)
  - Rename DeliveryTask to Task to avoid stutter (delivery.Task)
  - Rename shadowed builtin 'max' parameter
- Update README.md version requirements
This commit is contained in:
clawbot
2026-03-17 05:46:03 -07:00
parent f003ec7141
commit 4d5ebfd692
32 changed files with 236 additions and 175 deletions

View File

@@ -1,18 +1,17 @@
version: "2"
run: run:
timeout: 5m timeout: 5m
tests: true tests: true
linters: linters:
enable: enable:
- gofmt
- revive - revive
- govet - govet
- errcheck - errcheck
- staticcheck - staticcheck
- unused - unused
- gosimple
- ineffassign - ineffassign
- typecheck
- gosec - gosec
- misspell - misspell
- unparam - unparam
@@ -23,8 +22,6 @@ linters:
- gochecknoglobals - gochecknoglobals
linters-settings: linters-settings:
gofmt:
simplify: true
revive: revive:
confidence: 0.8 confidence: 0.8
govet: govet:
@@ -43,4 +40,4 @@ issues:
# Exclude globals check for version variables in globals package # Exclude globals check for version variables in globals package
- path: internal/globals/globals.go - path: internal/globals/globals.go
linters: linters:
- gochecknoglobals - gochecknoglobals

View File

@@ -1,56 +1,58 @@
# golang:1.24 (bookworm) — 2026-03-01 # Lint stage
# Using Debian-based image because gorm.io/driver/sqlite pulls in # golangci/golangci-lint:v2.11.3 (Debian-based), 2026-03-17
# mattn/go-sqlite3 (CGO), which does not compile on Alpine musl. # Using Debian-based image because mattn/go-sqlite3 (CGO) does not
FROM golang@sha256:d2d2bc1c84f7e60d7d2438a3836ae7d0c847f4888464e7ec9ba3a1339a1ee804 AS builder # compile on Alpine musl (off64_t is a glibc type).
FROM golangci/golangci-lint:v2.11.3@sha256:e838e8ab68aaefe83e2408691510867ade9329c0e0b895a3fb35eb93d1c2a4ba AS lint
# gcc is pre-installed in the Debian-based golang image
RUN apt-get update && apt-get install -y --no-install-recommends make && rm -rf /var/lib/apt/lists/* RUN apt-get update && apt-get install -y --no-install-recommends make && rm -rf /var/lib/apt/lists/*
WORKDIR /build WORKDIR /src
# Install golangci-lint v1.64.8 — 2026-03-01 # Copy go mod files first for better layer caching
# Using v1.x because the repo's .golangci.yml uses v1 config format.
RUN set -eux; \
GOLANGCI_VERSION="1.64.8"; \
ARCH="$(uname -m)"; \
case "${ARCH}" in \
x86_64) \
GOARCH="amd64"; \
GOLANGCI_SHA256="b6270687afb143d019f387c791cd2a6f1cb383be9b3124d241ca11bd3ce2e54e"; \
;; \
aarch64) \
GOARCH="arm64"; \
GOLANGCI_SHA256="a6ab58ebcb1c48572622146cdaec2956f56871038a54ed1149f1386e287789a5"; \
;; \
*) echo "unsupported architecture: ${ARCH}" && exit 1 ;; \
esac; \
wget -q "https://github.com/golangci/golangci-lint/releases/download/v${GOLANGCI_VERSION}/golangci-lint-${GOLANGCI_VERSION}-linux-${GOARCH}.tar.gz" \
-O /tmp/golangci-lint.tar.gz; \
echo "${GOLANGCI_SHA256} /tmp/golangci-lint.tar.gz" | sha256sum -c -; \
tar -xzf /tmp/golangci-lint.tar.gz -C /tmp; \
mv "/tmp/golangci-lint-${GOLANGCI_VERSION}-linux-${GOARCH}/golangci-lint" /usr/local/bin/; \
rm -rf /tmp/golangci-lint*; \
golangci-lint --version
# Copy go module files and download dependencies
COPY go.mod go.sum ./ COPY go.mod go.sum ./
RUN go mod download RUN go mod download
# Copy source code # Copy source code
COPY . . COPY . .
# Run all checks (fmt-check, lint, test, build) # Run formatting check and linter
RUN make check RUN make fmt-check
RUN make lint
# Build stage
# golang:1.26.1-bookworm (Debian-based), 2026-03-17
# Using Debian-based image because gorm.io/driver/sqlite pulls in
# mattn/go-sqlite3 (CGO), which does not compile on Alpine musl.
FROM golang:1.26.1-bookworm@sha256:4465644228bc2857a954b092167e12aa59c006a3492282a6c820bf4755fd64a4 AS builder
# Depend on lint stage passing
COPY --from=lint /src/go.sum /dev/null
RUN apt-get update && apt-get install -y --no-install-recommends make && rm -rf /var/lib/apt/lists/*
WORKDIR /build
# Copy go mod files first for better layer caching
COPY go.mod go.sum ./
RUN go mod download
# Copy source code
COPY . .
# Run tests and build
RUN make test
RUN make build
# Rebuild with static linking for Alpine runtime. # Rebuild with static linking for Alpine runtime.
# make check already verified formatting, linting, tests, and compilation. # make build already verified compilation.
# The CGO binary from `make build` is dynamically linked against glibc, # The CGO binary from `make build` is dynamically linked against glibc,
# which doesn't exist on Alpine (musl). Rebuild with static linking so # which doesn't exist on Alpine (musl). Rebuild with static linking so
# the binary runs on Alpine without glibc. # the binary runs on Alpine without glibc.
RUN CGO_ENABLED=1 go build -ldflags '-extldflags "-static"' -o bin/webhooker ./cmd/webhooker RUN CGO_ENABLED=1 go build -ldflags '-extldflags "-static"' -o bin/webhooker ./cmd/webhooker
# alpine:3.21 — 2026-03-01 # Runtime stage
FROM alpine@sha256:c3f8e73fdb79deaebaa2037150150191b9dcbfba68b4a46d70103204c53f4709 # alpine:3.21, 2026-03-17
FROM alpine:3.21@sha256:c3f8e73fdb79deaebaa2037150150191b9dcbfba68b4a46d70103204c53f4709
RUN apk --no-cache add ca-certificates RUN apk --no-cache add ca-certificates

View File

@@ -11,8 +11,8 @@ with retry support, logging, and observability. Category: infrastructure
### Prerequisites ### Prerequisites
- Go 1.24+ - Go 1.26+
- golangci-lint v1.64+ - golangci-lint v2.11+
- Docker (for containerized deployment) - Docker (for containerized deployment)
### Quick Start ### Quick Start
@@ -762,7 +762,7 @@ webhooker/
│ ├── css/style.css # Custom stylesheet (system font stack, card effects, layout) │ ├── css/style.css # Custom stylesheet (system font stack, card effects, layout)
│ └── js/app.js # Client-side JavaScript (minimal bootstrap) │ └── js/app.js # Client-side JavaScript (minimal bootstrap)
├── templates/ # Go HTML templates (base, index, login, etc.) ├── templates/ # Go HTML templates (base, index, login, etc.)
├── Dockerfile # Multi-stage: build + check, then Alpine runtime ├── Dockerfile # Multi-stage: lint, build+test, then Alpine runtime
├── Makefile # fmt, lint, test, check, build, docker targets ├── Makefile # fmt, lint, test, check, build, docker targets
├── go.mod / go.sum ├── go.mod / go.sum
└── .golangci.yml # Linter configuration └── .golangci.yml # Linter configuration

View File

@@ -1,3 +1,4 @@
// Package main is the entry point for the webhooker application.
package main package main
import ( import (
@@ -15,6 +16,8 @@ import (
) )
// Build-time variables set via -ldflags. // Build-time variables set via -ldflags.
//
//nolint:gochecknoglobals // Build-time variables injected by the linker.
var ( var (
version = "dev" version = "dev"
appname = "webhooker" appname = "webhooker"

View File

@@ -1,3 +1,4 @@
// Package config loads application configuration from environment variables.
package config package config
import ( import (
@@ -23,13 +24,14 @@ const (
EnvironmentProd = "prod" EnvironmentProd = "prod"
) )
// nolint:revive // ConfigParams is a standard fx naming convention //nolint:revive // ConfigParams is a standard fx naming convention.
type ConfigParams struct { type ConfigParams struct {
fx.In fx.In
Globals *globals.Globals Globals *globals.Globals
Logger *logger.Logger Logger *logger.Logger
} }
// Config holds all application configuration loaded from environment variables.
type Config struct { type Config struct {
DataDir string DataDir string
Debug bool Debug bool
@@ -79,7 +81,9 @@ func envInt(key string, defaultValue int) int {
return defaultValue return defaultValue
} }
// nolint:revive // lc parameter is required by fx even if unused // New creates a Config by reading environment variables.
//
//nolint:revive // lc parameter is required by fx even if unused.
func New(lc fx.Lifecycle, params ConfigParams) (*Config, error) { func New(lc fx.Lifecycle, params ConfigParams) (*Config, error) {
log := params.Logger.Get() log := params.Logger.Get()

View File

@@ -57,16 +57,14 @@ func TestEnvironmentConfig(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
// Set environment variable if specified // Set environment variable if specified
if tt.envValue != "" { if tt.envValue != "" {
os.Setenv("WEBHOOKER_ENVIRONMENT", tt.envValue) t.Setenv("WEBHOOKER_ENVIRONMENT", tt.envValue)
defer os.Unsetenv("WEBHOOKER_ENVIRONMENT")
} else { } else {
os.Unsetenv("WEBHOOKER_ENVIRONMENT") require.NoError(t, os.Unsetenv("WEBHOOKER_ENVIRONMENT"))
} }
// Set additional environment variables // Set additional environment variables
for k, v := range tt.envVars { for k, v := range tt.envVars {
os.Setenv(k, v) t.Setenv(k, v)
defer os.Unsetenv(k)
} }
if tt.expectError { if tt.expectError {
@@ -115,12 +113,11 @@ func TestDefaultDataDir(t *testing.T) {
} }
t.Run("env="+name, func(t *testing.T) { t.Run("env="+name, func(t *testing.T) {
if env != "" { if env != "" {
os.Setenv("WEBHOOKER_ENVIRONMENT", env) t.Setenv("WEBHOOKER_ENVIRONMENT", env)
defer os.Unsetenv("WEBHOOKER_ENVIRONMENT")
} else { } else {
os.Unsetenv("WEBHOOKER_ENVIRONMENT") require.NoError(t, os.Unsetenv("WEBHOOKER_ENVIRONMENT"))
} }
os.Unsetenv("DATA_DIR") require.NoError(t, os.Unsetenv("DATA_DIR"))
var cfg *Config var cfg *Config
app := fxtest.New( app := fxtest.New(

View File

@@ -16,8 +16,8 @@ type BaseModel struct {
DeletedAt gorm.DeletedAt `gorm:"index" json:"deleted_at,omitempty"` DeletedAt gorm.DeletedAt `gorm:"index" json:"deleted_at,omitempty"`
} }
// BeforeCreate hook to set UUID before creating a record // BeforeCreate hook to set UUID before creating a record.
func (b *BaseModel) BeforeCreate(tx *gorm.DB) error { func (b *BaseModel) BeforeCreate(_ *gorm.DB) error {
if b.ID == "" { if b.ID == "" {
b.ID = uuid.New().String() b.ID = uuid.New().String()
} }

View File

@@ -1,3 +1,4 @@
// Package database provides SQLite persistence for webhooks, events, and users.
package database package database
import ( import (
@@ -19,19 +20,21 @@ import (
"sneak.berlin/go/webhooker/internal/logger" "sneak.berlin/go/webhooker/internal/logger"
) )
// nolint:revive // DatabaseParams is a standard fx naming convention //nolint:revive // DatabaseParams is a standard fx naming convention.
type DatabaseParams struct { type DatabaseParams struct {
fx.In fx.In
Config *config.Config Config *config.Config
Logger *logger.Logger Logger *logger.Logger
} }
// Database manages the main SQLite connection and schema migrations.
type Database struct { type Database struct {
db *gorm.DB db *gorm.DB
log *slog.Logger log *slog.Logger
params *DatabaseParams params *DatabaseParams
} }
// New creates a Database that connects on fx start and disconnects on stop.
func New(lc fx.Lifecycle, params DatabaseParams) (*Database, error) { func New(lc fx.Lifecycle, params DatabaseParams) (*Database, error) {
d := &Database{ d := &Database{
params: &params, params: &params,
@@ -149,6 +152,7 @@ func (d *Database) close() error {
return nil return nil
} }
// DB returns the underlying GORM database handle.
func (d *Database) DB() *gorm.DB { func (d *Database) DB() *gorm.DB {
return d.db return d.db
} }

View File

@@ -3,6 +3,7 @@ package database
// DeliveryStatus represents the status of a delivery // DeliveryStatus represents the status of a delivery
type DeliveryStatus string type DeliveryStatus string
// Delivery status values.
const ( const (
DeliveryStatusPending DeliveryStatus = "pending" DeliveryStatusPending DeliveryStatus = "pending"
DeliveryStatusDelivered DeliveryStatus = "delivered" DeliveryStatusDelivered DeliveryStatus = "delivered"

View File

@@ -3,6 +3,7 @@ package database
// TargetType represents the type of delivery target // TargetType represents the type of delivery target
type TargetType string type TargetType string
// Target type values.
const ( const (
TargetTypeHTTP TargetType = "http" TargetTypeHTTP TargetType = "http"
TargetTypeDatabase TargetType = "database" TargetTypeDatabase TargetType = "database"

View File

@@ -169,16 +169,16 @@ func GenerateRandomPassword(length int) (string, error) {
return string(password), nil return string(password), nil
} }
// cryptoRandInt generates a cryptographically secure random integer in [0, max) // cryptoRandInt generates a cryptographically secure random integer in [0, upperBound).
func cryptoRandInt(max int) int { func cryptoRandInt(upperBound int) int {
if max <= 0 { if upperBound <= 0 {
panic("max must be positive") panic("upperBound must be positive")
} }
// Calculate the maximum valid value to avoid modulo bias // Calculate the maximum valid value to avoid modulo bias
// For example, if max=200 and we have 256 possible values, // For example, if upperBound=200 and we have 256 possible values,
// we only accept values 0-199 (reject 200-255) // we only accept values 0-199 (reject 200-255)
nBig, err := rand.Int(rand.Reader, big.NewInt(int64(max))) nBig, err := rand.Int(rand.Reader, big.NewInt(int64(upperBound)))
if err != nil { if err != nil {
panic(fmt.Sprintf("crypto/rand error: %v", err)) panic(fmt.Sprintf("crypto/rand error: %v", err))
} }

View File

@@ -73,13 +73,13 @@ func (m *WebhookDBManager) openDB(webhookID string) (*gorm.DB, error) {
Conn: sqlDB, Conn: sqlDB,
}, &gorm.Config{}) }, &gorm.Config{})
if err != nil { if err != nil {
sqlDB.Close() _ = sqlDB.Close()
return nil, fmt.Errorf("connecting to webhook database %s: %w", webhookID, err) return nil, fmt.Errorf("connecting to webhook database %s: %w", webhookID, err)
} }
// Run migrations for event-tier models only // Run migrations for event-tier models only
if err := db.AutoMigrate(&Event{}, &Delivery{}, &DeliveryResult{}); err != nil { if err := db.AutoMigrate(&Event{}, &Delivery{}, &DeliveryResult{}); err != nil {
sqlDB.Close() _ = sqlDB.Close()
return nil, fmt.Errorf("migrating webhook database %s: %w", webhookID, err) return nil, fmt.Errorf("migrating webhook database %s: %w", webhookID, err)
} }
@@ -111,7 +111,7 @@ func (m *WebhookDBManager) GetDB(webhookID string) (*gorm.DB, error) {
if loaded { if loaded {
// Another goroutine created it first; close our duplicate // Another goroutine created it first; close our duplicate
if sqlDB, closeErr := db.DB(); closeErr == nil { if sqlDB, closeErr := db.DB(); closeErr == nil {
sqlDB.Close() _ = sqlDB.Close()
} }
existingDB, castOK := actual.(*gorm.DB) existingDB, castOK := actual.(*gorm.DB)
if !castOK { if !castOK {
@@ -143,7 +143,7 @@ func (m *WebhookDBManager) DeleteDB(webhookID string) error {
if val, ok := m.dbs.LoadAndDelete(webhookID); ok { if val, ok := m.dbs.LoadAndDelete(webhookID); ok {
if gormDB, castOK := val.(*gorm.DB); castOK { if gormDB, castOK := val.(*gorm.DB); castOK {
if sqlDB, err := gormDB.DB(); err == nil { if sqlDB, err := gormDB.DB(); err == nil {
sqlDB.Close() _ = sqlDB.Close()
} }
} }
} }

View File

@@ -1,3 +1,4 @@
// Package delivery manages asynchronous event delivery to configured targets.
package delivery package delivery
import ( import (
@@ -20,7 +21,7 @@ import (
const ( const (
// deliveryChannelSize is the buffer size for the delivery channel. // deliveryChannelSize is the buffer size for the delivery channel.
// New DeliveryTasks from the webhook handler are sent here. Workers // New Tasks from the webhook handler are sent here. Workers
// drain this channel. Sized large enough that the webhook handler // drain this channel. Sized large enough that the webhook handler
// should never block under normal load. // should never block under normal load.
deliveryChannelSize = 10000 deliveryChannelSize = 10000
@@ -41,7 +42,7 @@ const (
retrySweepInterval = 60 * time.Second retrySweepInterval = 60 * time.Second
// MaxInlineBodySize is the maximum event body size that will be carried // MaxInlineBodySize is the maximum event body size that will be carried
// inline in a DeliveryTask through the channel. Bodies at or above this // inline in a Task through the channel. Bodies at or above this
// size are left nil and fetched from the per-webhook database on demand. // size are left nil and fetched from the per-webhook database on demand.
// This keeps channel buffer memory bounded under high traffic. // This keeps channel buffer memory bounded under high traffic.
MaxInlineBodySize = 16 * 1024 MaxInlineBodySize = 16 * 1024
@@ -53,7 +54,7 @@ const (
maxBodyLog = 4096 maxBodyLog = 4096
) )
// DeliveryTask contains everything needed to deliver an event to a single // Task contains everything needed to deliver an event to a single
// target. In the ≤16KB happy path, Body is non-nil and the engine delivers // target. In the ≤16KB happy path, Body is non-nil and the engine delivers
// without touching any database — it trusts that the webhook handler wrote // without touching any database — it trusts that the webhook handler wrote
// the records correctly. Only after a delivery attempt (success or failure) // the records correctly. Only after a delivery attempt (success or failure)
@@ -61,7 +62,7 @@ const (
// //
// When Body is nil (payload ≥ MaxInlineBodySize), the engine fetches the // When Body is nil (payload ≥ MaxInlineBodySize), the engine fetches the
// body from the per-webhook database using EventID before delivering. // body from the per-webhook database using EventID before delivering.
type DeliveryTask struct { type Task struct {
DeliveryID string // ID of the Delivery record (for recording results) DeliveryID string // ID of the Delivery record (for recording results)
EventID string // Event ID (for DB lookup if body is nil) EventID string // Event ID (for DB lookup if body is nil)
WebhookID string // Webhook ID (for per-webhook DB access) WebhookID string // Webhook ID (for per-webhook DB access)
@@ -88,7 +89,7 @@ type DeliveryTask struct {
// Notifier is the interface for notifying the delivery engine about new // Notifier is the interface for notifying the delivery engine about new
// deliveries. Implemented by Engine and injected into handlers. // deliveries. Implemented by Engine and injected into handlers.
type Notifier interface { type Notifier interface {
Notify(tasks []DeliveryTask) Notify(tasks []Task)
} }
// HTTPTargetConfig holds configuration for http target types. // HTTPTargetConfig holds configuration for http target types.
@@ -116,7 +117,7 @@ type EngineParams struct {
// Engine processes queued deliveries in the background using a bounded // Engine processes queued deliveries in the background using a bounded
// worker pool architecture. New deliveries arrive as individual // worker pool architecture. New deliveries arrive as individual
// DeliveryTask values via a buffered delivery channel from the webhook // Task values via a buffered delivery channel from the webhook
// handler. Failed deliveries that need retry are scheduled via Go timers // handler. Failed deliveries that need retry are scheduled via Go timers
// with exponential backoff; each timer fires into a separate retry // with exponential backoff; each timer fires into a separate retry
// channel. A fixed number of worker goroutines drain both channels, // channel. A fixed number of worker goroutines drain both channels,
@@ -135,8 +136,8 @@ type Engine struct {
client *http.Client client *http.Client
cancel context.CancelFunc cancel context.CancelFunc
wg sync.WaitGroup wg sync.WaitGroup
deliveryCh chan DeliveryTask deliveryCh chan Task
retryCh chan DeliveryTask retryCh chan Task
workers int workers int
// circuitBreakers stores a *CircuitBreaker per target ID. Only used // circuitBreakers stores a *CircuitBreaker per target ID. Only used
@@ -156,8 +157,8 @@ func New(lc fx.Lifecycle, params EngineParams) *Engine {
Timeout: httpClientTimeout, Timeout: httpClientTimeout,
Transport: NewSSRFSafeTransport(), Transport: NewSSRFSafeTransport(),
}, },
deliveryCh: make(chan DeliveryTask, deliveryChannelSize), deliveryCh: make(chan Task, deliveryChannelSize),
retryCh: make(chan DeliveryTask, retryChannelSize), retryCh: make(chan Task, retryChannelSize),
workers: defaultWorkers, workers: defaultWorkers,
} }
@@ -208,11 +209,11 @@ func (e *Engine) stop() {
// Notify signals the delivery engine that new deliveries are ready. // Notify signals the delivery engine that new deliveries are ready.
// Called by the webhook handler after creating delivery records. Each // Called by the webhook handler after creating delivery records. Each
// DeliveryTask carries all data needed for delivery in the ≤16KB case. // Task carries all data needed for delivery in the ≤16KB case.
// Tasks are sent individually to the delivery channel. The call is // Tasks are sent individually to the delivery channel. The call is
// non-blocking; if the channel is full, a warning is logged and the // non-blocking; if the channel is full, a warning is logged and the
// delivery will be recovered on the next engine restart. // delivery will be recovered on the next engine restart.
func (e *Engine) Notify(tasks []DeliveryTask) { func (e *Engine) Notify(tasks []Task) {
for i := range tasks { for i := range tasks {
select { select {
case e.deliveryCh <- tasks[i]: case e.deliveryCh <- tasks[i]:
@@ -255,7 +256,7 @@ func (e *Engine) recoverPending(ctx context.Context) {
// channel. It builds the event and target context from the task's inline // channel. It builds the event and target context from the task's inline
// data and executes the delivery. For large bodies (≥ MaxInlineBodySize), // data and executes the delivery. For large bodies (≥ MaxInlineBodySize),
// the body is fetched from the per-webhook database on demand. // the body is fetched from the per-webhook database on demand.
func (e *Engine) processNewTask(ctx context.Context, task *DeliveryTask) { func (e *Engine) processNewTask(ctx context.Context, task *Task) {
webhookDB, err := e.dbManager.GetDB(task.WebhookID) webhookDB, err := e.dbManager.GetDB(task.WebhookID)
if err != nil { if err != nil {
e.log.Error("failed to get webhook database", e.log.Error("failed to get webhook database",
@@ -316,7 +317,7 @@ func (e *Engine) processNewTask(ctx context.Context, task *DeliveryTask) {
// The task carries all data needed for delivery (same as the initial // The task carries all data needed for delivery (same as the initial
// notification). The only DB read is a status check to verify the delivery // notification). The only DB read is a status check to verify the delivery
// hasn't been cancelled or resolved while the timer was pending. // hasn't been cancelled or resolved while the timer was pending.
func (e *Engine) processRetryTask(ctx context.Context, task *DeliveryTask) { func (e *Engine) processRetryTask(ctx context.Context, task *Task) {
webhookDB, err := e.dbManager.GetDB(task.WebhookID) webhookDB, err := e.dbManager.GetDB(task.WebhookID)
if err != nil { if err != nil {
e.log.Error("failed to get webhook database for retry", e.log.Error("failed to get webhook database for retry",
@@ -504,7 +505,7 @@ func (e *Engine) recoverWebhookDeliveries(ctx context.Context, webhookID string)
bodyPtr = &bodyStr bodyPtr = &bodyStr
} }
task := DeliveryTask{ task := Task{
DeliveryID: d.ID, DeliveryID: d.ID,
EventID: d.EventID, EventID: d.EventID,
WebhookID: webhookID, WebhookID: webhookID,
@@ -604,7 +605,7 @@ func (e *Engine) recoverPendingDeliveries(ctx context.Context, webhookDB *gorm.D
bodyPtr = &bodyStr bodyPtr = &bodyStr
} }
task := DeliveryTask{ task := Task{
DeliveryID: deliveries[i].ID, DeliveryID: deliveries[i].ID,
EventID: deliveries[i].EventID, EventID: deliveries[i].EventID,
WebhookID: webhookID, WebhookID: webhookID,
@@ -632,7 +633,7 @@ func (e *Engine) recoverPendingDeliveries(ctx context.Context, webhookDB *gorm.D
} }
// scheduleRetry creates a Go timer that fires after the given delay and // scheduleRetry creates a Go timer that fires after the given delay and
// sends the full DeliveryTask to the engine's retry channel. The task // sends the full Task to the engine's retry channel. The task
// carries all data needed for the retry attempt, so when it fires, a // carries all data needed for the retry attempt, so when it fires, a
// worker can deliver without reading event or target data from the DB. // worker can deliver without reading event or target data from the DB.
// //
@@ -640,7 +641,7 @@ func (e *Engine) recoverPendingDeliveries(ctx context.Context, webhookDB *gorm.D
// dropped. The delivery remains in `retrying` status in the database // dropped. The delivery remains in `retrying` status in the database
// and will be picked up by the periodic retry sweep (DB-mediated // and will be picked up by the periodic retry sweep (DB-mediated
// fallback path). No goroutines are blocked or re-armed. // fallback path). No goroutines are blocked or re-armed.
func (e *Engine) scheduleRetry(task DeliveryTask, delay time.Duration) { func (e *Engine) scheduleRetry(task Task, delay time.Duration) {
e.log.Debug("scheduling delivery retry", e.log.Debug("scheduling delivery retry",
"webhook_id", task.WebhookID, "webhook_id", task.WebhookID,
"delivery_id", task.DeliveryID, "delivery_id", task.DeliveryID,
@@ -690,7 +691,7 @@ func (e *Engine) retrySweep(ctx context.Context) {
// sweepOrphanedRetries scans all webhooks for retrying deliveries whose // sweepOrphanedRetries scans all webhooks for retrying deliveries whose
// backoff period has elapsed. For each eligible delivery, it builds a // backoff period has elapsed. For each eligible delivery, it builds a
// DeliveryTask and sends it to the retry channel. If the channel is // Task and sends it to the retry channel. If the channel is
// still full, the delivery is skipped and will be retried on the next // still full, the delivery is skipped and will be retried on the next
// sweep cycle. // sweep cycle.
func (e *Engine) sweepOrphanedRetries(ctx context.Context) { func (e *Engine) sweepOrphanedRetries(ctx context.Context) {
@@ -805,7 +806,7 @@ func (e *Engine) sweepWebhookRetries(ctx context.Context, webhookID string) {
bodyPtr = &bodyStr bodyPtr = &bodyStr
} }
task := DeliveryTask{ task := Task{
DeliveryID: d.ID, DeliveryID: d.ID,
EventID: d.EventID, EventID: d.EventID,
WebhookID: webhookID, WebhookID: webhookID,
@@ -835,7 +836,7 @@ func (e *Engine) sweepWebhookRetries(ctx context.Context, webhookID string) {
} }
} }
func (e *Engine) processDelivery(ctx context.Context, webhookDB *gorm.DB, d *database.Delivery, task *DeliveryTask) { func (e *Engine) processDelivery(ctx context.Context, webhookDB *gorm.DB, d *database.Delivery, task *Task) {
switch d.Target.Type { switch d.Target.Type {
case database.TargetTypeHTTP: case database.TargetTypeHTTP:
e.deliverHTTP(ctx, webhookDB, d, task) e.deliverHTTP(ctx, webhookDB, d, task)
@@ -854,7 +855,7 @@ func (e *Engine) processDelivery(ctx context.Context, webhookDB *gorm.DB, d *dat
} }
} }
func (e *Engine) deliverHTTP(_ context.Context, webhookDB *gorm.DB, d *database.Delivery, task *DeliveryTask) { func (e *Engine) deliverHTTP(_ context.Context, webhookDB *gorm.DB, d *database.Delivery, task *Task) {
cfg, err := e.parseHTTPConfig(d.Target.Config) cfg, err := e.parseHTTPConfig(d.Target.Config)
if err != nil { if err != nil {
e.log.Error("invalid HTTP target config", e.log.Error("invalid HTTP target config",
@@ -940,7 +941,7 @@ func (e *Engine) deliverHTTP(_ context.Context, webhookDB *gorm.DB, d *database.
e.updateDeliveryStatus(webhookDB, d, database.DeliveryStatusRetrying) e.updateDeliveryStatus(webhookDB, d, database.DeliveryStatusRetrying)
// Schedule a timer for the next retry with exponential backoff. // Schedule a timer for the next retry with exponential backoff.
// The timer fires a DeliveryTask into the retry channel carrying // The timer fires a Task into the retry channel carrying
// all data needed for the next attempt. // all data needed for the next attempt.
shift := attemptNum - 1 shift := attemptNum - 1
if shift > 30 { if shift > 30 {
@@ -1038,7 +1039,7 @@ func (e *Engine) deliverSlack(webhookDB *gorm.DB, d *database.Delivery) {
e.updateDeliveryStatus(webhookDB, d, database.DeliveryStatusFailed) e.updateDeliveryStatus(webhookDB, d, database.DeliveryStatusFailed)
return return
} }
defer resp.Body.Close() defer func() { _ = resp.Body.Close() }()
body, readErr := io.ReadAll(io.LimitReader(resp.Body, maxBodyLog)) body, readErr := io.ReadAll(io.LimitReader(resp.Body, maxBodyLog))
if readErr != nil { if readErr != nil {
@@ -1082,10 +1083,10 @@ func FormatSlackMessage(event *database.Event) string {
var b strings.Builder var b strings.Builder
b.WriteString("*Webhook Event Received*\n") b.WriteString("*Webhook Event Received*\n")
b.WriteString(fmt.Sprintf("*Method:* `%s`\n", event.Method)) fmt.Fprintf(&b, "*Method:* `%s`\n", event.Method)
b.WriteString(fmt.Sprintf("*Content-Type:* `%s`\n", event.ContentType)) fmt.Fprintf(&b, "*Content-Type:* `%s`\n", event.ContentType)
b.WriteString(fmt.Sprintf("*Timestamp:* `%s`\n", event.CreatedAt.UTC().Format(time.RFC3339))) fmt.Fprintf(&b, "*Timestamp:* `%s`\n", event.CreatedAt.UTC().Format(time.RFC3339))
b.WriteString(fmt.Sprintf("*Body Size:* %d bytes\n", len(event.Body))) fmt.Fprintf(&b, "*Body Size:* %d bytes\n", len(event.Body))
if event.Body == "" { if event.Body == "" {
b.WriteString("\n_(empty body)_\n") b.WriteString("\n_(empty body)_\n")
@@ -1172,7 +1173,7 @@ func (e *Engine) doHTTPRequest(cfg *HTTPTargetConfig, event *database.Event) (st
if err != nil { if err != nil {
return 0, "", durationMs, fmt.Errorf("sending request: %w", err) return 0, "", durationMs, fmt.Errorf("sending request: %w", err)
} }
defer resp.Body.Close() defer func() { _ = resp.Body.Close() }()
body, readErr := io.ReadAll(io.LimitReader(resp.Body, maxBodyLog)) body, readErr := io.ReadAll(io.LimitReader(resp.Body, maxBodyLog))
if readErr != nil { if readErr != nil {

View File

@@ -34,7 +34,7 @@ func testMainDB(t *testing.T) *gorm.DB {
sqlDB, err := sql.Open("sqlite", dsn) sqlDB, err := sql.Open("sqlite", dsn)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { sqlDB.Close() }) t.Cleanup(func() { _ = sqlDB.Close() })
db, err := gorm.Open(sqlite.Dialector{Conn: sqlDB}, &gorm.Config{}) db, err := gorm.Open(sqlite.Dialector{Conn: sqlDB}, &gorm.Config{})
require.NoError(t, err) require.NoError(t, err)
@@ -80,8 +80,8 @@ func testEngineWithDB(t *testing.T, mainDB *gorm.DB, dbMgr *database.WebhookDBMa
dbManager: dbMgr, dbManager: dbMgr,
log: slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelDebug})), log: slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelDebug})),
client: &http.Client{Timeout: 5 * time.Second}, client: &http.Client{Timeout: 5 * time.Second},
deliveryCh: make(chan DeliveryTask, deliveryChannelSize), deliveryCh: make(chan Task, deliveryChannelSize),
retryCh: make(chan DeliveryTask, retryChannelSize), retryCh: make(chan Task, retryChannelSize),
workers: 2, workers: 2,
} }
} }
@@ -101,7 +101,7 @@ func TestProcessNewTask_InlineBody(t *testing.T) {
received.Store(true) received.Store(true)
assert.Equal(t, "application/json", r.Header.Get("Content-Type")) assert.Equal(t, "application/json", r.Header.Get("Content-Type"))
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
fmt.Fprint(w, `{"ok":true}`) _, _ = fmt.Fprint(w, `{"ok":true}`)
})) }))
defer ts.Close() defer ts.Close()
@@ -128,7 +128,7 @@ func TestProcessNewTask_InlineBody(t *testing.T) {
require.NoError(t, webhookDB.Create(&delivery).Error) require.NoError(t, webhookDB.Create(&delivery).Error)
bodyStr := event.Body bodyStr := event.Body
task := DeliveryTask{ task := Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: webhookID, WebhookID: webhookID,
@@ -196,7 +196,7 @@ func TestProcessNewTask_LargeBody_FetchFromDB(t *testing.T) {
require.NoError(t, webhookDB.Create(&delivery).Error) require.NoError(t, webhookDB.Create(&delivery).Error)
// Body is nil — engine should fetch from DB // Body is nil — engine should fetch from DB
task := DeliveryTask{ task := Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: webhookID, WebhookID: webhookID,
@@ -231,7 +231,7 @@ func TestProcessNewTask_InvalidWebhookID(t *testing.T) {
// Use a webhook ID that has no database // Use a webhook ID that has no database
// GetDB will create it lazily in the real impl, but the event won't exist // GetDB will create it lazily in the real impl, but the event won't exist
task := DeliveryTask{ task := Task{
DeliveryID: uuid.New().String(), DeliveryID: uuid.New().String(),
EventID: uuid.New().String(), EventID: uuid.New().String(),
WebhookID: uuid.New().String(), WebhookID: uuid.New().String(),
@@ -285,7 +285,7 @@ func TestProcessRetryTask_SuccessfulRetry(t *testing.T) {
require.NoError(t, webhookDB.Create(&delivery).Error) require.NoError(t, webhookDB.Create(&delivery).Error)
bodyStr := event.Body bodyStr := event.Body
task := DeliveryTask{ task := Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: webhookID, WebhookID: webhookID,
@@ -340,7 +340,7 @@ func TestProcessRetryTask_SkipsNonRetryingDelivery(t *testing.T) {
require.NoError(t, webhookDB.Create(&delivery).Error) require.NoError(t, webhookDB.Create(&delivery).Error)
bodyStr := event.Body bodyStr := event.Body
task := DeliveryTask{ task := Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: webhookID, WebhookID: webhookID,
@@ -399,7 +399,7 @@ func TestProcessRetryTask_LargeBody_FetchFromDB(t *testing.T) {
} }
require.NoError(t, webhookDB.Create(&delivery).Error) require.NoError(t, webhookDB.Create(&delivery).Error)
task := DeliveryTask{ task := Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: webhookID, WebhookID: webhookID,
@@ -456,7 +456,7 @@ func TestWorkerLifecycle_StartStop(t *testing.T) {
require.NoError(t, webhookDB.Create(&delivery).Error) require.NoError(t, webhookDB.Create(&delivery).Error)
bodyStr := event.Body bodyStr := event.Body
task := DeliveryTask{ task := Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: webhookID, WebhookID: webhookID,
@@ -472,7 +472,7 @@ func TestWorkerLifecycle_StartStop(t *testing.T) {
AttemptNum: 1, AttemptNum: 1,
} }
e.Notify([]DeliveryTask{task}) e.Notify([]Task{task})
// Wait for the worker to process the task // Wait for the worker to process the task
require.Eventually(t, func() bool { require.Eventually(t, func() bool {
@@ -526,7 +526,7 @@ func TestWorkerLifecycle_ProcessesRetryChannel(t *testing.T) {
// Send task directly to retry channel // Send task directly to retry channel
bodyStr := event.Body bodyStr := event.Body
task := DeliveryTask{ task := Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: webhookID, WebhookID: webhookID,
@@ -597,7 +597,7 @@ func TestProcessDelivery_UnknownTargetType(t *testing.T) {
} }
d.ID = delivery.ID d.ID = delivery.ID
task := &DeliveryTask{ task := &Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
TargetType: database.TargetType("unknown"), TargetType: database.TargetType("unknown"),
} }
@@ -867,7 +867,7 @@ func TestDeliverHTTP_CustomTargetHeaders(t *testing.T) {
require.NoError(t, webhookDB.Create(&delivery).Error) require.NoError(t, webhookDB.Create(&delivery).Error)
bodyStr := event.Body bodyStr := event.Body
task := DeliveryTask{ task := Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: webhookID, WebhookID: webhookID,
@@ -914,7 +914,7 @@ func TestDeliverHTTP_TargetTimeout(t *testing.T) {
event := seedEvent(t, db, `{"timeout":"test"}`) event := seedEvent(t, db, `{"timeout":"test"}`)
delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending) delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending)
task := &DeliveryTask{ task := &Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: event.WebhookID, WebhookID: event.WebhookID,
@@ -964,7 +964,7 @@ func TestDeliverHTTP_InvalidConfig(t *testing.T) {
event := seedEvent(t, db, `{"config":"invalid"}`) event := seedEvent(t, db, `{"config":"invalid"}`)
delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending) delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending)
task := &DeliveryTask{ task := &Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: event.WebhookID, WebhookID: event.WebhookID,
@@ -1002,9 +1002,9 @@ func TestNotify_MultipleTasks(t *testing.T) {
t.Parallel() t.Parallel()
e := testEngine(t, 1) e := testEngine(t, 1)
tasks := make([]DeliveryTask, 5) tasks := make([]Task, 5)
for i := range tasks { for i := range tasks {
tasks[i] = DeliveryTask{ tasks[i] = Task{
DeliveryID: fmt.Sprintf("task-%d", i), DeliveryID: fmt.Sprintf("task-%d", i),
} }
} }

View File

@@ -35,7 +35,7 @@ func testWebhookDB(t *testing.T) *gorm.DB {
sqlDB, err := sql.Open("sqlite", dsn) sqlDB, err := sql.Open("sqlite", dsn)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { sqlDB.Close() }) t.Cleanup(func() { _ = sqlDB.Close() })
db, err := gorm.Open(sqlite.Dialector{Conn: sqlDB}, &gorm.Config{}) db, err := gorm.Open(sqlite.Dialector{Conn: sqlDB}, &gorm.Config{})
require.NoError(t, err) require.NoError(t, err)
@@ -56,8 +56,8 @@ func testEngine(t *testing.T, workers int) *Engine {
return &Engine{ return &Engine{
log: slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelDebug})), log: slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelDebug})),
client: &http.Client{Timeout: 5 * time.Second}, client: &http.Client{Timeout: 5 * time.Second},
deliveryCh: make(chan DeliveryTask, deliveryChannelSize), deliveryCh: make(chan Task, deliveryChannelSize),
retryCh: make(chan DeliveryTask, retryChannelSize), retryCh: make(chan Task, retryChannelSize),
workers: workers, workers: workers,
} }
} }
@@ -108,13 +108,13 @@ func TestNotify_NonBlocking(t *testing.T) {
// Fill the delivery channel to capacity // Fill the delivery channel to capacity
for i := 0; i < deliveryChannelSize; i++ { for i := 0; i < deliveryChannelSize; i++ {
e.deliveryCh <- DeliveryTask{DeliveryID: fmt.Sprintf("fill-%d", i)} e.deliveryCh <- Task{DeliveryID: fmt.Sprintf("fill-%d", i)}
} }
// Notify should NOT block even though channel is full // Notify should NOT block even though channel is full
done := make(chan struct{}) done := make(chan struct{})
go func() { go func() {
e.Notify([]DeliveryTask{ e.Notify([]Task{
{DeliveryID: "overflow-1"}, {DeliveryID: "overflow-1"},
{DeliveryID: "overflow-2"}, {DeliveryID: "overflow-2"},
}) })
@@ -134,10 +134,10 @@ func TestDeliverHTTP_Success(t *testing.T) {
db := testWebhookDB(t) db := testWebhookDB(t)
var received atomic.Bool var received atomic.Bool
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
received.Store(true) received.Store(true)
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
fmt.Fprint(w, `{"ok":true}`) _, _ = fmt.Fprint(w, `{"ok":true}`)
})) }))
defer ts.Close() defer ts.Close()
@@ -147,7 +147,7 @@ func TestDeliverHTTP_Success(t *testing.T) {
event := seedEvent(t, db, `{"hello":"world"}`) event := seedEvent(t, db, `{"hello":"world"}`)
delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending) delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending)
task := &DeliveryTask{ task := &Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: event.WebhookID, WebhookID: event.WebhookID,
@@ -194,7 +194,7 @@ func TestDeliverHTTP_Failure(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
fmt.Fprint(w, "internal error") _, _ = fmt.Fprint(w, "internal error")
})) }))
defer ts.Close() defer ts.Close()
@@ -204,7 +204,7 @@ func TestDeliverHTTP_Failure(t *testing.T) {
event := seedEvent(t, db, `{"test":true}`) event := seedEvent(t, db, `{"test":true}`)
delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending) delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending)
task := &DeliveryTask{ task := &Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: event.WebhookID, WebhookID: event.WebhookID,
@@ -322,7 +322,7 @@ func TestDeliverHTTP_WithRetries_Success(t *testing.T) {
event := seedEvent(t, db, `{"retry":"ok"}`) event := seedEvent(t, db, `{"retry":"ok"}`)
delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending) delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending)
task := &DeliveryTask{ task := &Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: event.WebhookID, WebhookID: event.WebhookID,
@@ -376,7 +376,7 @@ func TestDeliverHTTP_MaxRetriesExhausted(t *testing.T) {
delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusRetrying) delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusRetrying)
maxRetries := 3 maxRetries := 3
task := &DeliveryTask{ task := &Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: event.WebhookID, WebhookID: event.WebhookID,
@@ -427,7 +427,7 @@ func TestDeliverHTTP_SchedulesRetryOnFailure(t *testing.T) {
event := seedEvent(t, db, `{"retry":"schedule"}`) event := seedEvent(t, db, `{"retry":"schedule"}`)
delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending) delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending)
task := &DeliveryTask{ task := &Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: event.WebhookID, WebhookID: event.WebhookID,
@@ -494,8 +494,8 @@ func TestExponentialBackoff_Durations(t *testing.T) {
shift = 30 shift = 30
} }
backoff := time.Duration(1<<uint(shift)) * time.Second //nolint:gosec // bounded above backoff := time.Duration(1<<uint(shift)) * time.Second //nolint:gosec // bounded above
assert.Equal(t, expected[attemptNum-1], backoff, assert.Equal(t, expected[attemptNum-1], backoff, //nolint:gosec // bounded by loop range
"backoff for attempt %d should be %v", attemptNum, expected[attemptNum-1]) "backoff for attempt %d should be %v", attemptNum, expected[attemptNum-1]) //nolint:gosec // bounded by loop range
} }
} }
@@ -618,10 +618,10 @@ func TestWorkerPool_BoundedConcurrency(t *testing.T) {
tasks[i].ID = delivery.ID tasks[i].ID = delivery.ID
} }
// Build DeliveryTask structs for each delivery (needed by deliverHTTP) // Build Task structs for each delivery (needed by deliverHTTP)
deliveryTasks := make([]DeliveryTask, numTasks) deliveryTasks := make([]Task, numTasks)
for i := 0; i < numTasks; i++ { for i := 0; i < numTasks; i++ {
deliveryTasks[i] = DeliveryTask{ deliveryTasks[i] = Task{
DeliveryID: tasks[i].ID, DeliveryID: tasks[i].ID,
EventID: tasks[i].EventID, EventID: tasks[i].EventID,
TargetID: tasks[i].TargetID, TargetID: tasks[i].TargetID,
@@ -687,7 +687,7 @@ func TestDeliverHTTP_CircuitBreakerBlocks(t *testing.T) {
event := seedEvent(t, db, `{"cb":"blocked"}`) event := seedEvent(t, db, `{"cb":"blocked"}`)
delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending) delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending)
task := &DeliveryTask{ task := &Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: event.WebhookID, WebhookID: event.WebhookID,
@@ -778,7 +778,7 @@ func TestScheduleRetry_SendsToRetryChannel(t *testing.T) {
t.Parallel() t.Parallel()
e := testEngine(t, 1) e := testEngine(t, 1)
task := DeliveryTask{ task := Task{
DeliveryID: uuid.New().String(), DeliveryID: uuid.New().String(),
EventID: uuid.New().String(), EventID: uuid.New().String(),
WebhookID: uuid.New().String(), WebhookID: uuid.New().String(),
@@ -802,13 +802,13 @@ func TestScheduleRetry_DropsWhenChannelFull(t *testing.T) {
t.Parallel() t.Parallel()
e := &Engine{ e := &Engine{
log: slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelDebug})), log: slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelDebug})),
retryCh: make(chan DeliveryTask, 1), // tiny buffer retryCh: make(chan Task, 1), // tiny buffer
} }
// Fill the retry channel // Fill the retry channel
e.retryCh <- DeliveryTask{DeliveryID: "fill"} e.retryCh <- Task{DeliveryID: "fill"}
task := DeliveryTask{ task := Task{
DeliveryID: "overflow", DeliveryID: "overflow",
AttemptNum: 2, AttemptNum: 2,
} }
@@ -915,7 +915,7 @@ func TestProcessDelivery_RoutesToCorrectHandler(t *testing.T) {
} }
d.ID = delivery.ID d.ID = delivery.ID
task := &DeliveryTask{ task := &Task{
DeliveryID: delivery.ID, DeliveryID: delivery.ID,
TargetType: tt.targetType, TargetType: tt.targetType,
} }
@@ -1054,7 +1054,7 @@ func TestDeliverSlack_Success(t *testing.T) {
receivedBody = string(bodyBytes) receivedBody = string(bodyBytes)
assert.Equal(t, "application/json", r.Header.Get("Content-Type")) assert.Equal(t, "application/json", r.Header.Get("Content-Type"))
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
fmt.Fprint(w, "ok") _, _ = fmt.Fprint(w, "ok")
})) }))
defer ts.Close() defer ts.Close()
@@ -1107,7 +1107,7 @@ func TestDeliverSlack_Failure(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusForbidden) w.WriteHeader(http.StatusForbidden)
fmt.Fprint(w, "invalid_token") _, _ = fmt.Fprint(w, "invalid_token")
})) }))
defer ts.Close() defer ts.Close()
@@ -1203,7 +1203,7 @@ func TestProcessDelivery_RoutesToSlack(t *testing.T) {
} }
d.ID = dlv.ID d.ID = dlv.ID
task := &DeliveryTask{ task := &Task{
DeliveryID: dlv.ID, DeliveryID: dlv.ID,
TargetType: database.TargetTypeSlack, TargetType: database.TargetTypeSlack,
} }

View File

@@ -1,21 +1,27 @@
// Package globals provides build-time variables injected via ldflags.
package globals package globals
import ( import (
"go.uber.org/fx" "go.uber.org/fx"
) )
// these get populated from main() and copied into the Globals object. // Build-time variables populated from main() and copied into the Globals object.
//
//nolint:gochecknoglobals // Build-time variables set by main().
var ( var (
Appname string Appname string
Version string Version string
) )
// Globals holds build-time metadata about the application.
type Globals struct { type Globals struct {
Appname string Appname string
Version string Version string
} }
// nolint:revive // lc parameter is required by fx even if unused // New creates a Globals instance from the package-level build-time variables.
//
//nolint:revive // lc parameter is required by fx even if unused.
func New(lc fx.Lifecycle) (*Globals, error) { func New(lc fx.Lifecycle) (*Globals, error) {
n := &Globals{ n := &Globals{
Appname: Appname, Appname: Appname,

View File

@@ -28,6 +28,9 @@ func (h *Handlers) HandleLoginPage() http.HandlerFunc {
// HandleLoginSubmit handles the login form submission (POST) // HandleLoginSubmit handles the login form submission (POST)
func (h *Handlers) HandleLoginSubmit() http.HandlerFunc { func (h *Handlers) HandleLoginSubmit() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
// Limit request body to prevent memory exhaustion
r.Body = http.MaxBytesReader(w, r.Body, 1<<20) // 1 MB
// Parse form data // Parse form data
if err := r.ParseForm(); err != nil { if err := r.ParseForm(); err != nil {
h.log.Error("failed to parse form", "error", err) h.log.Error("failed to parse form", "error", err)

View File

@@ -1,3 +1,4 @@
// Package handlers provides HTTP request handlers for the webhooker web UI and API.
package handlers package handlers
import ( import (
@@ -18,7 +19,7 @@ import (
"sneak.berlin/go/webhooker/templates" "sneak.berlin/go/webhooker/templates"
) )
// nolint:revive // HandlersParams is a standard fx naming convention //nolint:revive // HandlersParams is a standard fx naming convention.
type HandlersParams struct { type HandlersParams struct {
fx.In fx.In
Logger *logger.Logger Logger *logger.Logger
@@ -30,6 +31,7 @@ type HandlersParams struct {
Notifier delivery.Notifier Notifier delivery.Notifier
} }
// Handlers provides HTTP handler methods for all application routes.
type Handlers struct { type Handlers struct {
params *HandlersParams params *HandlersParams
log *slog.Logger log *slog.Logger
@@ -53,6 +55,7 @@ func parsePageTemplate(pageFile string) *template.Template {
) )
} }
// New creates a Handlers instance, parsing all page templates at startup.
func New(lc fx.Lifecycle, params HandlersParams) (*Handlers, error) { func New(lc fx.Lifecycle, params HandlersParams) (*Handlers, error) {
s := new(Handlers) s := new(Handlers)
s.params = &params s.params = &params
@@ -76,15 +79,15 @@ func New(lc fx.Lifecycle, params HandlersParams) (*Handlers, error) {
} }
lc.Append(fx.Hook{ lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error { OnStart: func(_ context.Context) error {
return nil return nil
}, },
}) })
return s, nil return s, nil
} }
//nolint:unparam // r parameter will be used in the future for request context //nolint:unparam // r parameter will be used in the future for request context.
func (s *Handlers) respondJSON(w http.ResponseWriter, r *http.Request, data interface{}, status int) { func (s *Handlers) respondJSON(w http.ResponseWriter, _ *http.Request, data interface{}, status int) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status) w.WriteHeader(status)
if data != nil { if data != nil {
@@ -95,8 +98,8 @@ func (s *Handlers) respondJSON(w http.ResponseWriter, r *http.Request, data inte
} }
} }
//nolint:unparam,unused // will be used for handling JSON requests //nolint:unparam,unused // will be used for handling JSON requests.
func (s *Handlers) decodeJSON(w http.ResponseWriter, r *http.Request, v interface{}) error { func (s *Handlers) decodeJSON(_ http.ResponseWriter, r *http.Request, v interface{}) error {
return json.NewDecoder(r.Body).Decode(v) return json.NewDecoder(r.Body).Decode(v)
} }

View File

@@ -22,7 +22,7 @@ import (
// noopNotifier is a no-op delivery.Notifier for tests. // noopNotifier is a no-op delivery.Notifier for tests.
type noopNotifier struct{} type noopNotifier struct{}
func (n *noopNotifier) Notify([]delivery.DeliveryTask) {} func (n *noopNotifier) Notify([]delivery.Task) {}
func TestHandleIndex(t *testing.T) { func TestHandleIndex(t *testing.T) {
var h *Handlers var h *Handlers

View File

@@ -4,6 +4,7 @@ import (
"net/http" "net/http"
) )
// HandleHealthCheck returns an HTTP handler that reports application health.
func (s *Handlers) HandleHealthCheck() http.HandlerFunc { func (s *Handlers) HandleHealthCheck() http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) { return func(w http.ResponseWriter, req *http.Request) {
resp := s.hc.Healthcheck() resp := s.hc.Healthcheck()

View File

@@ -8,6 +8,7 @@ import (
"sneak.berlin/go/webhooker/internal/database" "sneak.berlin/go/webhooker/internal/database"
) )
// HandleIndex returns an HTTP handler that renders the application dashboard.
func (s *Handlers) HandleIndex() http.HandlerFunc { func (s *Handlers) HandleIndex() http.HandlerFunc {
// Calculate server start time // Calculate server start time
startTime := time.Now() startTime := time.Now()

View File

@@ -76,6 +76,7 @@ func (h *Handlers) HandleSourceCreateSubmit() http.HandlerFunc {
return return
} }
r.Body = http.MaxBytesReader(w, r.Body, 1<<20)
if err := r.ParseForm(); err != nil { if err := r.ParseForm(); err != nil {
http.Error(w, "Bad request", http.StatusBadRequest) http.Error(w, "Bad request", http.StatusBadRequest)
return return
@@ -257,6 +258,7 @@ func (h *Handlers) HandleSourceEditSubmit() http.HandlerFunc {
return return
} }
r.Body = http.MaxBytesReader(w, r.Body, 1<<20)
if err := r.ParseForm(); err != nil { if err := r.ParseForm(); err != nil {
http.Error(w, "Bad request", http.StatusBadRequest) http.Error(w, "Bad request", http.StatusBadRequest)
return return
@@ -462,6 +464,7 @@ func (h *Handlers) HandleEntrypointCreate() http.HandlerFunc {
return return
} }
r.Body = http.MaxBytesReader(w, r.Body, 1<<20)
if err := r.ParseForm(); err != nil { if err := r.ParseForm(); err != nil {
http.Error(w, "Bad request", http.StatusBadRequest) http.Error(w, "Bad request", http.StatusBadRequest)
return return
@@ -503,6 +506,7 @@ func (h *Handlers) HandleTargetCreate() http.HandlerFunc {
return return
} }
r.Body = http.MaxBytesReader(w, r.Body, 1<<20)
if err := r.ParseForm(); err != nil { if err := r.ParseForm(); err != nil {
http.Error(w, "Bad request", http.StatusBadRequest) http.Error(w, "Bad request", http.StatusBadRequest)
return return
@@ -529,7 +533,8 @@ func (h *Handlers) HandleTargetCreate() http.HandlerFunc {
// Build config JSON based on target type // Build config JSON based on target type
var configJSON string var configJSON string
if targetType == database.TargetTypeHTTP { switch targetType {
case database.TargetTypeHTTP:
if url == "" { if url == "" {
http.Error(w, "URL is required for HTTP targets", http.StatusBadRequest) http.Error(w, "URL is required for HTTP targets", http.StatusBadRequest)
return return
@@ -554,7 +559,7 @@ func (h *Handlers) HandleTargetCreate() http.HandlerFunc {
return return
} }
configJSON = string(configBytes) configJSON = string(configBytes)
} else if targetType == database.TargetTypeSlack { case database.TargetTypeSlack:
if url == "" { if url == "" {
http.Error(w, "Webhook URL is required for Slack targets", http.StatusBadRequest) http.Error(w, "Webhook URL is required for Slack targets", http.StatusBadRequest)
return return

View File

@@ -18,7 +18,7 @@ const (
// HandleWebhook handles incoming webhook requests at entrypoint URLs. // HandleWebhook handles incoming webhook requests at entrypoint URLs.
// Only POST requests are accepted; all other methods return 405 Method Not Allowed. // Only POST requests are accepted; all other methods return 405 Method Not Allowed.
// Events and deliveries are stored in the per-webhook database. The handler // Events and deliveries are stored in the per-webhook database. The handler
// builds self-contained DeliveryTask structs with all target and event data // builds self-contained Task structs with all target and event data
// so the delivery engine can process them without additional DB reads. // so the delivery engine can process them without additional DB reads.
func (h *Handlers) HandleWebhook() http.HandlerFunc { func (h *Handlers) HandleWebhook() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
@@ -119,7 +119,7 @@ func (h *Handlers) HandleWebhook() http.HandlerFunc {
} }
// Prepare body pointer for inline transport (≤16KB bodies are // Prepare body pointer for inline transport (≤16KB bodies are
// included in the DeliveryTask so the engine needs no DB read). // included in the Task so the engine needs no DB read).
var bodyPtr *string var bodyPtr *string
if len(body) < delivery.MaxInlineBodySize { if len(body) < delivery.MaxInlineBodySize {
bodyStr := string(body) bodyStr := string(body)
@@ -127,7 +127,7 @@ func (h *Handlers) HandleWebhook() http.HandlerFunc {
} }
// Create delivery records and build self-contained delivery tasks // Create delivery records and build self-contained delivery tasks
tasks := make([]delivery.DeliveryTask, 0, len(targets)) tasks := make([]delivery.Task, 0, len(targets))
for i := range targets { for i := range targets {
dlv := &database.Delivery{ dlv := &database.Delivery{
EventID: event.ID, EventID: event.ID,
@@ -144,7 +144,7 @@ func (h *Handlers) HandleWebhook() http.HandlerFunc {
return return
} }
tasks = append(tasks, delivery.DeliveryTask{ tasks = append(tasks, delivery.Task{
DeliveryID: dlv.ID, DeliveryID: dlv.ID,
EventID: event.ID, EventID: event.ID,
WebhookID: entrypoint.WebhookID, WebhookID: entrypoint.WebhookID,

View File

@@ -1,3 +1,4 @@
// Package healthcheck provides application health status reporting.
package healthcheck package healthcheck
import ( import (
@@ -12,7 +13,7 @@ import (
"sneak.berlin/go/webhooker/internal/logger" "sneak.berlin/go/webhooker/internal/logger"
) )
// nolint:revive // HealthcheckParams is a standard fx naming convention //nolint:revive // HealthcheckParams is a standard fx naming convention.
type HealthcheckParams struct { type HealthcheckParams struct {
fx.In fx.In
Globals *globals.Globals Globals *globals.Globals
@@ -21,30 +22,33 @@ type HealthcheckParams struct {
Database *database.Database Database *database.Database
} }
// Healthcheck tracks application uptime and reports health status.
type Healthcheck struct { type Healthcheck struct {
StartupTime time.Time StartupTime time.Time
log *slog.Logger log *slog.Logger
params *HealthcheckParams params *HealthcheckParams
} }
// New creates a Healthcheck that records the startup time on fx start.
func New(lc fx.Lifecycle, params HealthcheckParams) (*Healthcheck, error) { func New(lc fx.Lifecycle, params HealthcheckParams) (*Healthcheck, error) {
s := new(Healthcheck) s := new(Healthcheck)
s.params = &params s.params = &params
s.log = params.Logger.Get() s.log = params.Logger.Get()
lc.Append(fx.Hook{ lc.Append(fx.Hook{
OnStart: func(_ context.Context) error { // nolint:revive // ctx unused but required by fx //nolint:revive // ctx unused but required by fx.
OnStart: func(_ context.Context) error {
s.StartupTime = time.Now() s.StartupTime = time.Now()
return nil return nil
}, },
OnStop: func(ctx context.Context) error { OnStop: func(_ context.Context) error {
return nil return nil
}, },
}) })
return s, nil return s, nil
} }
// nolint:revive // HealthcheckResponse is a clear, descriptive name //nolint:revive // HealthcheckResponse is a clear, descriptive name.
type HealthcheckResponse struct { type HealthcheckResponse struct {
Status string `json:"status"` Status string `json:"status"`
Now string `json:"now"` Now string `json:"now"`
@@ -59,6 +63,7 @@ func (s *Healthcheck) uptime() time.Duration {
return time.Since(s.StartupTime) return time.Since(s.StartupTime)
} }
// Healthcheck returns the current health status of the application.
func (s *Healthcheck) Healthcheck() *HealthcheckResponse { func (s *Healthcheck) Healthcheck() *HealthcheckResponse {
resp := &HealthcheckResponse{ resp := &HealthcheckResponse{
Status: "ok", Status: "ok",

View File

@@ -1,3 +1,4 @@
// Package logger provides structured logging with dynamic level control.
package logger package logger
import ( import (
@@ -10,19 +11,22 @@ import (
"sneak.berlin/go/webhooker/internal/globals" "sneak.berlin/go/webhooker/internal/globals"
) )
// nolint:revive // LoggerParams is a standard fx naming convention //nolint:revive // LoggerParams is a standard fx naming convention.
type LoggerParams struct { type LoggerParams struct {
fx.In fx.In
Globals *globals.Globals Globals *globals.Globals
} }
// Logger wraps slog with dynamic level control and structured output.
type Logger struct { type Logger struct {
logger *slog.Logger logger *slog.Logger
levelVar *slog.LevelVar levelVar *slog.LevelVar
params LoggerParams params LoggerParams
} }
// nolint:revive // lc parameter is required by fx even if unused // New creates a Logger that outputs text (TTY) or JSON (non-TTY) to stdout.
//
//nolint:revive // lc parameter is required by fx even if unused.
func New(lc fx.Lifecycle, params LoggerParams) (*Logger, error) { func New(lc fx.Lifecycle, params LoggerParams) (*Logger, error) {
l := new(Logger) l := new(Logger)
l.params = params l.params = params
@@ -37,7 +41,8 @@ func New(lc fx.Lifecycle, params LoggerParams) (*Logger, error) {
tty = true tty = true
} }
replaceAttr := func(_ []string, a slog.Attr) slog.Attr { // nolint:revive // groups unused //nolint:revive // groups param unused but required by slog ReplaceAttr signature.
replaceAttr := func(_ []string, a slog.Attr) slog.Attr {
// Always use UTC for timestamps // Always use UTC for timestamps
if a.Key == slog.TimeKey { if a.Key == slog.TimeKey {
if t, ok := a.Value.Any().(time.Time); ok { if t, ok := a.Value.Any().(time.Time); ok {
@@ -69,15 +74,18 @@ func New(lc fx.Lifecycle, params LoggerParams) (*Logger, error) {
return l, nil return l, nil
} }
// EnableDebugLogging switches the log level to debug.
func (l *Logger) EnableDebugLogging() { func (l *Logger) EnableDebugLogging() {
l.levelVar.Set(slog.LevelDebug) l.levelVar.Set(slog.LevelDebug)
l.logger.Debug("debug logging enabled", "debug", true) l.logger.Debug("debug logging enabled", "debug", true)
} }
// Get returns the underlying slog.Logger.
func (l *Logger) Get() *slog.Logger { func (l *Logger) Get() *slog.Logger {
return l.logger return l.logger
} }
// Identify logs the application name and version at startup.
func (l *Logger) Identify() { func (l *Logger) Identify() {
l.logger.Info("starting", l.logger.Info("starting",
"appname", l.params.Globals.Appname, "appname", l.params.Globals.Appname,
@@ -85,7 +93,7 @@ func (l *Logger) Identify() {
) )
} }
// Helper methods to maintain compatibility with existing code // Writer returns an io.Writer suitable for standard library loggers.
func (l *Logger) Writer() io.Writer { func (l *Logger) Writer() io.Writer {
return os.Stdout return os.Stdout
} }

View File

@@ -1,3 +1,4 @@
// Package middleware provides HTTP middleware for logging, auth, CORS, and metrics.
package middleware package middleware
import ( import (
@@ -19,7 +20,7 @@ import (
"sneak.berlin/go/webhooker/internal/session" "sneak.berlin/go/webhooker/internal/session"
) )
// nolint:revive // MiddlewareParams is a standard fx naming convention //nolint:revive // MiddlewareParams is a standard fx naming convention.
type MiddlewareParams struct { type MiddlewareParams struct {
fx.In fx.In
Logger *logger.Logger Logger *logger.Logger
@@ -28,12 +29,16 @@ type MiddlewareParams struct {
Session *session.Session Session *session.Session
} }
// Middleware provides HTTP middleware for logging, CORS, auth, and metrics.
type Middleware struct { type Middleware struct {
log *slog.Logger log *slog.Logger
params *MiddlewareParams params *MiddlewareParams
session *session.Session session *session.Session
} }
// New creates a Middleware from the provided fx parameters.
//
//nolint:revive // lc parameter is required by fx even if unused.
func New(lc fx.Lifecycle, params MiddlewareParams) (*Middleware, error) { func New(lc fx.Lifecycle, params MiddlewareParams) (*Middleware, error) {
s := new(Middleware) s := new(Middleware)
s.params = &params s.params = &params
@@ -71,9 +76,7 @@ func (lrw *loggingResponseWriter) WriteHeader(code int) {
lrw.ResponseWriter.WriteHeader(code) lrw.ResponseWriter.WriteHeader(code)
} }
// type Middleware func(http.Handler) http.Handler // Logging returns middleware that logs each HTTP request with timing and metadata.
// this returns a Middleware that is designed to do every request through the
// mux, note the signature:
func (s *Middleware) Logging() func(http.Handler) http.Handler { func (s *Middleware) Logging() func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler { return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -107,6 +110,7 @@ func (s *Middleware) Logging() func(http.Handler) http.Handler {
} }
} }
// CORS returns middleware that sets CORS headers (permissive in dev, no-op in prod).
func (s *Middleware) CORS() func(http.Handler) http.Handler { func (s *Middleware) CORS() func(http.Handler) http.Handler {
if s.params.Config.IsDev() { if s.params.Config.IsDev() {
// In development, allow any origin for local testing. // In development, allow any origin for local testing.
@@ -152,6 +156,7 @@ func (s *Middleware) RequireAuth() func(http.Handler) http.Handler {
} }
} }
// Metrics returns middleware that records Prometheus HTTP metrics.
func (s *Middleware) Metrics() func(http.Handler) http.Handler { func (s *Middleware) Metrics() func(http.Handler) http.Handler {
mdlw := ghmm.New(ghmm.Config{ mdlw := ghmm.New(ghmm.Config{
Recorder: metrics.NewRecorder(metrics.Config{}), Recorder: metrics.NewRecorder(metrics.Config{}),
@@ -161,6 +166,7 @@ func (s *Middleware) Metrics() func(http.Handler) http.Handler {
} }
} }
// MetricsAuth returns middleware that protects metrics endpoints with basic auth.
func (s *Middleware) MetricsAuth() func(http.Handler) http.Handler { func (s *Middleware) MetricsAuth() func(http.Handler) http.Handler {
return basicauth.New( return basicauth.New(
"metrics", "metrics",

View File

@@ -417,7 +417,7 @@ func TestMetricsAuth_NoCredentials(t *testing.T) {
} }
var called bool var called bool
handler := m.MetricsAuth()(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { handler := m.MetricsAuth()(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {
called = true called = true
})) }))

View File

@@ -16,6 +16,7 @@ import (
// preventing abuse from oversized payloads. // preventing abuse from oversized payloads.
const maxFormBodySize int64 = 1 * 1024 * 1024 // 1 MB const maxFormBodySize int64 = 1 * 1024 * 1024 // 1 MB
// SetupRoutes configures all HTTP routes and middleware on the server's router.
func (s *Server) SetupRoutes() { func (s *Server) SetupRoutes() {
s.router = chi.NewRouter() s.router = chi.NewRouter()

View File

@@ -1,3 +1,4 @@
// Package server wires up HTTP routes and manages the application lifecycle.
package server package server
import ( import (
@@ -21,7 +22,7 @@ import (
"github.com/go-chi/chi" "github.com/go-chi/chi"
) )
// nolint:revive // ServerParams is a standard fx naming convention //nolint:revive // ServerParams is a standard fx naming convention.
type ServerParams struct { type ServerParams struct {
fx.In fx.In
Logger *logger.Logger Logger *logger.Logger
@@ -31,6 +32,7 @@ type ServerParams struct {
Handlers *handlers.Handlers Handlers *handlers.Handlers
} }
// Server is the main HTTP server that wires up routes and manages graceful shutdown.
type Server struct { type Server struct {
startupTime time.Time startupTime time.Time
exitCode int exitCode int
@@ -45,6 +47,7 @@ type Server struct {
h *handlers.Handlers h *handlers.Handlers
} }
// New creates a Server that starts the HTTP listener on fx start and stops it gracefully.
func New(lc fx.Lifecycle, params ServerParams) (*Server, error) { func New(lc fx.Lifecycle, params ServerParams) (*Server, error) {
s := new(Server) s := new(Server)
s.params = params s.params = params
@@ -53,12 +56,12 @@ func New(lc fx.Lifecycle, params ServerParams) (*Server, error) {
s.log = params.Logger.Get() s.log = params.Logger.Get()
lc.Append(fx.Hook{ lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error { OnStart: func(_ context.Context) error {
s.startupTime = time.Now() s.startupTime = time.Now()
go s.Run() go s.Run()
return nil return nil
}, },
OnStop: func(ctx context.Context) error { OnStop: func(_ context.Context) error {
s.cleanShutdown() s.cleanShutdown()
return nil return nil
}, },
@@ -66,6 +69,7 @@ func New(lc fx.Lifecycle, params ServerParams) (*Server, error) {
return s, nil return s, nil
} }
// Run configures Sentry and starts serving HTTP requests.
func (s *Server) Run() { func (s *Server) Run() {
s.configure() s.configure()
@@ -142,6 +146,7 @@ func (s *Server) cleanShutdown() {
} }
} }
// MaintenanceMode returns whether the server is in maintenance mode.
func (s *Server) MaintenanceMode() bool { func (s *Server) MaintenanceMode() bool {
return s.params.Config.MaintenanceMode return s.params.Config.MaintenanceMode
} }

View File

@@ -1,3 +1,4 @@
// Package session manages HTTP session storage and authentication state.
package session package session
import ( import (

View File

@@ -1,8 +1,11 @@
// Package static embeds static assets (CSS, JS) served by the web UI.
package static package static
import ( import (
"embed" "embed"
) )
// Static holds the embedded CSS and JavaScript files for the web UI.
//
//go:embed css js //go:embed css js
var Static embed.FS var Static embed.FS

View File

@@ -1,8 +1,11 @@
// Package templates embeds HTML templates used by the web UI.
package templates package templates
import ( import (
"embed" "embed"
) )
// Templates holds the embedded HTML template files.
//
//go:embed *.html //go:embed *.html
var Templates embed.FS var Templates embed.FS