refactor: use pinned golangci-lint Docker image for linting
All checks were successful
check / check (push) Successful in 1m41s

Refactor Dockerfile to use a separate lint stage with a pinned
golangci-lint v2.11.3 Docker image instead of installing
golangci-lint via curl in the builder stage. This follows the
pattern used by sneak/pixa.

Changes:
- Dockerfile: separate lint stage using golangci/golangci-lint:v2.11.3
  (Debian-based, pinned by sha256) with COPY --from=lint dependency
- Bump Go from 1.24 to 1.26.1 (golang:1.26.1-bookworm, pinned)
- Bump golangci-lint from v1.64.8 to v2.11.3
- Migrate .golangci.yml from v1 to v2 format (same linters, format only)
- All Docker images pinned by sha256 digest
- Fix all lint issues from the v2 linter upgrade:
  - Add package comments to all packages
  - Add doc comments to all exported types, functions, and methods
  - Fix unchecked errors (errcheck)
  - Fix unused parameters (revive)
  - Fix gosec warnings (MaxBytesReader for form parsing)
  - Fix staticcheck suggestions (fmt.Fprintf instead of WriteString)
  - Rename DeliveryTask to Task to avoid stutter (delivery.Task)
  - Rename shadowed builtin 'max' parameter
- Update README.md version requirements
This commit is contained in:
clawbot
2026-03-17 05:46:03 -07:00
parent f003ec7141
commit 4d5ebfd692
32 changed files with 236 additions and 175 deletions

View File

@@ -1,3 +1,4 @@
// Package delivery manages asynchronous event delivery to configured targets.
package delivery
import (
@@ -20,7 +21,7 @@ import (
const (
// deliveryChannelSize is the buffer size for the delivery channel.
// New DeliveryTasks from the webhook handler are sent here. Workers
// New Tasks from the webhook handler are sent here. Workers
// drain this channel. Sized large enough that the webhook handler
// should never block under normal load.
deliveryChannelSize = 10000
@@ -41,7 +42,7 @@ const (
retrySweepInterval = 60 * time.Second
// MaxInlineBodySize is the maximum event body size that will be carried
// inline in a DeliveryTask through the channel. Bodies at or above this
// inline in a Task through the channel. Bodies at or above this
// size are left nil and fetched from the per-webhook database on demand.
// This keeps channel buffer memory bounded under high traffic.
MaxInlineBodySize = 16 * 1024
@@ -53,7 +54,7 @@ const (
maxBodyLog = 4096
)
// DeliveryTask contains everything needed to deliver an event to a single
// Task contains everything needed to deliver an event to a single
// target. In the ≤16KB happy path, Body is non-nil and the engine delivers
// without touching any database — it trusts that the webhook handler wrote
// the records correctly. Only after a delivery attempt (success or failure)
@@ -61,7 +62,7 @@ const (
//
// When Body is nil (payload ≥ MaxInlineBodySize), the engine fetches the
// body from the per-webhook database using EventID before delivering.
type DeliveryTask struct {
type Task struct {
DeliveryID string // ID of the Delivery record (for recording results)
EventID string // Event ID (for DB lookup if body is nil)
WebhookID string // Webhook ID (for per-webhook DB access)
@@ -88,7 +89,7 @@ type DeliveryTask struct {
// Notifier is the interface for notifying the delivery engine about new
// deliveries. Implemented by Engine and injected into handlers.
type Notifier interface {
Notify(tasks []DeliveryTask)
Notify(tasks []Task)
}
// HTTPTargetConfig holds configuration for http target types.
@@ -116,7 +117,7 @@ type EngineParams struct {
// Engine processes queued deliveries in the background using a bounded
// worker pool architecture. New deliveries arrive as individual
// DeliveryTask values via a buffered delivery channel from the webhook
// Task values via a buffered delivery channel from the webhook
// handler. Failed deliveries that need retry are scheduled via Go timers
// with exponential backoff; each timer fires into a separate retry
// channel. A fixed number of worker goroutines drain both channels,
@@ -135,8 +136,8 @@ type Engine struct {
client *http.Client
cancel context.CancelFunc
wg sync.WaitGroup
deliveryCh chan DeliveryTask
retryCh chan DeliveryTask
deliveryCh chan Task
retryCh chan Task
workers int
// circuitBreakers stores a *CircuitBreaker per target ID. Only used
@@ -156,8 +157,8 @@ func New(lc fx.Lifecycle, params EngineParams) *Engine {
Timeout: httpClientTimeout,
Transport: NewSSRFSafeTransport(),
},
deliveryCh: make(chan DeliveryTask, deliveryChannelSize),
retryCh: make(chan DeliveryTask, retryChannelSize),
deliveryCh: make(chan Task, deliveryChannelSize),
retryCh: make(chan Task, retryChannelSize),
workers: defaultWorkers,
}
@@ -208,11 +209,11 @@ func (e *Engine) stop() {
// Notify signals the delivery engine that new deliveries are ready.
// Called by the webhook handler after creating delivery records. Each
// DeliveryTask carries all data needed for delivery in the ≤16KB case.
// Task carries all data needed for delivery in the ≤16KB case.
// Tasks are sent individually to the delivery channel. The call is
// non-blocking; if the channel is full, a warning is logged and the
// delivery will be recovered on the next engine restart.
func (e *Engine) Notify(tasks []DeliveryTask) {
func (e *Engine) Notify(tasks []Task) {
for i := range tasks {
select {
case e.deliveryCh <- tasks[i]:
@@ -255,7 +256,7 @@ func (e *Engine) recoverPending(ctx context.Context) {
// channel. It builds the event and target context from the task's inline
// data and executes the delivery. For large bodies (≥ MaxInlineBodySize),
// the body is fetched from the per-webhook database on demand.
func (e *Engine) processNewTask(ctx context.Context, task *DeliveryTask) {
func (e *Engine) processNewTask(ctx context.Context, task *Task) {
webhookDB, err := e.dbManager.GetDB(task.WebhookID)
if err != nil {
e.log.Error("failed to get webhook database",
@@ -316,7 +317,7 @@ func (e *Engine) processNewTask(ctx context.Context, task *DeliveryTask) {
// The task carries all data needed for delivery (same as the initial
// notification). The only DB read is a status check to verify the delivery
// hasn't been cancelled or resolved while the timer was pending.
func (e *Engine) processRetryTask(ctx context.Context, task *DeliveryTask) {
func (e *Engine) processRetryTask(ctx context.Context, task *Task) {
webhookDB, err := e.dbManager.GetDB(task.WebhookID)
if err != nil {
e.log.Error("failed to get webhook database for retry",
@@ -504,7 +505,7 @@ func (e *Engine) recoverWebhookDeliveries(ctx context.Context, webhookID string)
bodyPtr = &bodyStr
}
task := DeliveryTask{
task := Task{
DeliveryID: d.ID,
EventID: d.EventID,
WebhookID: webhookID,
@@ -604,7 +605,7 @@ func (e *Engine) recoverPendingDeliveries(ctx context.Context, webhookDB *gorm.D
bodyPtr = &bodyStr
}
task := DeliveryTask{
task := Task{
DeliveryID: deliveries[i].ID,
EventID: deliveries[i].EventID,
WebhookID: webhookID,
@@ -632,7 +633,7 @@ func (e *Engine) recoverPendingDeliveries(ctx context.Context, webhookDB *gorm.D
}
// scheduleRetry creates a Go timer that fires after the given delay and
// sends the full DeliveryTask to the engine's retry channel. The task
// sends the full Task to the engine's retry channel. The task
// carries all data needed for the retry attempt, so when it fires, a
// worker can deliver without reading event or target data from the DB.
//
@@ -640,7 +641,7 @@ func (e *Engine) recoverPendingDeliveries(ctx context.Context, webhookDB *gorm.D
// dropped. The delivery remains in `retrying` status in the database
// and will be picked up by the periodic retry sweep (DB-mediated
// fallback path). No goroutines are blocked or re-armed.
func (e *Engine) scheduleRetry(task DeliveryTask, delay time.Duration) {
func (e *Engine) scheduleRetry(task Task, delay time.Duration) {
e.log.Debug("scheduling delivery retry",
"webhook_id", task.WebhookID,
"delivery_id", task.DeliveryID,
@@ -690,7 +691,7 @@ func (e *Engine) retrySweep(ctx context.Context) {
// sweepOrphanedRetries scans all webhooks for retrying deliveries whose
// backoff period has elapsed. For each eligible delivery, it builds a
// DeliveryTask and sends it to the retry channel. If the channel is
// Task and sends it to the retry channel. If the channel is
// still full, the delivery is skipped and will be retried on the next
// sweep cycle.
func (e *Engine) sweepOrphanedRetries(ctx context.Context) {
@@ -805,7 +806,7 @@ func (e *Engine) sweepWebhookRetries(ctx context.Context, webhookID string) {
bodyPtr = &bodyStr
}
task := DeliveryTask{
task := Task{
DeliveryID: d.ID,
EventID: d.EventID,
WebhookID: webhookID,
@@ -835,7 +836,7 @@ func (e *Engine) sweepWebhookRetries(ctx context.Context, webhookID string) {
}
}
func (e *Engine) processDelivery(ctx context.Context, webhookDB *gorm.DB, d *database.Delivery, task *DeliveryTask) {
func (e *Engine) processDelivery(ctx context.Context, webhookDB *gorm.DB, d *database.Delivery, task *Task) {
switch d.Target.Type {
case database.TargetTypeHTTP:
e.deliverHTTP(ctx, webhookDB, d, task)
@@ -854,7 +855,7 @@ func (e *Engine) processDelivery(ctx context.Context, webhookDB *gorm.DB, d *dat
}
}
func (e *Engine) deliverHTTP(_ context.Context, webhookDB *gorm.DB, d *database.Delivery, task *DeliveryTask) {
func (e *Engine) deliverHTTP(_ context.Context, webhookDB *gorm.DB, d *database.Delivery, task *Task) {
cfg, err := e.parseHTTPConfig(d.Target.Config)
if err != nil {
e.log.Error("invalid HTTP target config",
@@ -940,7 +941,7 @@ func (e *Engine) deliverHTTP(_ context.Context, webhookDB *gorm.DB, d *database.
e.updateDeliveryStatus(webhookDB, d, database.DeliveryStatusRetrying)
// Schedule a timer for the next retry with exponential backoff.
// The timer fires a DeliveryTask into the retry channel carrying
// The timer fires a Task into the retry channel carrying
// all data needed for the next attempt.
shift := attemptNum - 1
if shift > 30 {
@@ -1038,7 +1039,7 @@ func (e *Engine) deliverSlack(webhookDB *gorm.DB, d *database.Delivery) {
e.updateDeliveryStatus(webhookDB, d, database.DeliveryStatusFailed)
return
}
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
body, readErr := io.ReadAll(io.LimitReader(resp.Body, maxBodyLog))
if readErr != nil {
@@ -1082,10 +1083,10 @@ func FormatSlackMessage(event *database.Event) string {
var b strings.Builder
b.WriteString("*Webhook Event Received*\n")
b.WriteString(fmt.Sprintf("*Method:* `%s`\n", event.Method))
b.WriteString(fmt.Sprintf("*Content-Type:* `%s`\n", event.ContentType))
b.WriteString(fmt.Sprintf("*Timestamp:* `%s`\n", event.CreatedAt.UTC().Format(time.RFC3339)))
b.WriteString(fmt.Sprintf("*Body Size:* %d bytes\n", len(event.Body)))
fmt.Fprintf(&b, "*Method:* `%s`\n", event.Method)
fmt.Fprintf(&b, "*Content-Type:* `%s`\n", event.ContentType)
fmt.Fprintf(&b, "*Timestamp:* `%s`\n", event.CreatedAt.UTC().Format(time.RFC3339))
fmt.Fprintf(&b, "*Body Size:* %d bytes\n", len(event.Body))
if event.Body == "" {
b.WriteString("\n_(empty body)_\n")
@@ -1172,7 +1173,7 @@ func (e *Engine) doHTTPRequest(cfg *HTTPTargetConfig, event *database.Event) (st
if err != nil {
return 0, "", durationMs, fmt.Errorf("sending request: %w", err)
}
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
body, readErr := io.ReadAll(io.LimitReader(resp.Body, maxBodyLog))
if readErr != nil {