refactor: use pinned golangci-lint Docker image for linting
All checks were successful
check / check (push) Successful in 1m41s

Refactor Dockerfile to use a separate lint stage with a pinned
golangci-lint v2.11.3 Docker image instead of installing
golangci-lint via curl in the builder stage. This follows the
pattern used by sneak/pixa.

Changes:
- Dockerfile: separate lint stage using golangci/golangci-lint:v2.11.3
  (Debian-based, pinned by sha256) with COPY --from=lint dependency
- Bump Go from 1.24 to 1.26.1 (golang:1.26.1-bookworm, pinned)
- Bump golangci-lint from v1.64.8 to v2.11.3
- Migrate .golangci.yml from v1 to v2 format (same linters, format only)
- All Docker images pinned by sha256 digest
- Fix all lint issues from the v2 linter upgrade:
  - Add package comments to all packages
  - Add doc comments to all exported types, functions, and methods
  - Fix unchecked errors (errcheck)
  - Fix unused parameters (revive)
  - Fix gosec warnings (MaxBytesReader for form parsing)
  - Fix staticcheck suggestions (fmt.Fprintf instead of WriteString)
  - Rename DeliveryTask to Task to avoid stutter (delivery.Task)
  - Rename shadowed builtin 'max' parameter
- Update README.md version requirements
This commit is contained in:
clawbot
2026-03-17 05:46:03 -07:00
parent f003ec7141
commit 4d5ebfd692
32 changed files with 236 additions and 175 deletions

View File

@@ -35,7 +35,7 @@ func testWebhookDB(t *testing.T) *gorm.DB {
sqlDB, err := sql.Open("sqlite", dsn)
require.NoError(t, err)
t.Cleanup(func() { sqlDB.Close() })
t.Cleanup(func() { _ = sqlDB.Close() })
db, err := gorm.Open(sqlite.Dialector{Conn: sqlDB}, &gorm.Config{})
require.NoError(t, err)
@@ -56,8 +56,8 @@ func testEngine(t *testing.T, workers int) *Engine {
return &Engine{
log: slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelDebug})),
client: &http.Client{Timeout: 5 * time.Second},
deliveryCh: make(chan DeliveryTask, deliveryChannelSize),
retryCh: make(chan DeliveryTask, retryChannelSize),
deliveryCh: make(chan Task, deliveryChannelSize),
retryCh: make(chan Task, retryChannelSize),
workers: workers,
}
}
@@ -108,13 +108,13 @@ func TestNotify_NonBlocking(t *testing.T) {
// Fill the delivery channel to capacity
for i := 0; i < deliveryChannelSize; i++ {
e.deliveryCh <- DeliveryTask{DeliveryID: fmt.Sprintf("fill-%d", i)}
e.deliveryCh <- Task{DeliveryID: fmt.Sprintf("fill-%d", i)}
}
// Notify should NOT block even though channel is full
done := make(chan struct{})
go func() {
e.Notify([]DeliveryTask{
e.Notify([]Task{
{DeliveryID: "overflow-1"},
{DeliveryID: "overflow-2"},
})
@@ -134,10 +134,10 @@ func TestDeliverHTTP_Success(t *testing.T) {
db := testWebhookDB(t)
var received atomic.Bool
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
received.Store(true)
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, `{"ok":true}`)
_, _ = fmt.Fprint(w, `{"ok":true}`)
}))
defer ts.Close()
@@ -147,7 +147,7 @@ func TestDeliverHTTP_Success(t *testing.T) {
event := seedEvent(t, db, `{"hello":"world"}`)
delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending)
task := &DeliveryTask{
task := &Task{
DeliveryID: delivery.ID,
EventID: event.ID,
WebhookID: event.WebhookID,
@@ -194,7 +194,7 @@ func TestDeliverHTTP_Failure(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprint(w, "internal error")
_, _ = fmt.Fprint(w, "internal error")
}))
defer ts.Close()
@@ -204,7 +204,7 @@ func TestDeliverHTTP_Failure(t *testing.T) {
event := seedEvent(t, db, `{"test":true}`)
delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending)
task := &DeliveryTask{
task := &Task{
DeliveryID: delivery.ID,
EventID: event.ID,
WebhookID: event.WebhookID,
@@ -322,7 +322,7 @@ func TestDeliverHTTP_WithRetries_Success(t *testing.T) {
event := seedEvent(t, db, `{"retry":"ok"}`)
delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending)
task := &DeliveryTask{
task := &Task{
DeliveryID: delivery.ID,
EventID: event.ID,
WebhookID: event.WebhookID,
@@ -376,7 +376,7 @@ func TestDeliverHTTP_MaxRetriesExhausted(t *testing.T) {
delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusRetrying)
maxRetries := 3
task := &DeliveryTask{
task := &Task{
DeliveryID: delivery.ID,
EventID: event.ID,
WebhookID: event.WebhookID,
@@ -427,7 +427,7 @@ func TestDeliverHTTP_SchedulesRetryOnFailure(t *testing.T) {
event := seedEvent(t, db, `{"retry":"schedule"}`)
delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending)
task := &DeliveryTask{
task := &Task{
DeliveryID: delivery.ID,
EventID: event.ID,
WebhookID: event.WebhookID,
@@ -494,8 +494,8 @@ func TestExponentialBackoff_Durations(t *testing.T) {
shift = 30
}
backoff := time.Duration(1<<uint(shift)) * time.Second //nolint:gosec // bounded above
assert.Equal(t, expected[attemptNum-1], backoff,
"backoff for attempt %d should be %v", attemptNum, expected[attemptNum-1])
assert.Equal(t, expected[attemptNum-1], backoff, //nolint:gosec // bounded by loop range
"backoff for attempt %d should be %v", attemptNum, expected[attemptNum-1]) //nolint:gosec // bounded by loop range
}
}
@@ -618,10 +618,10 @@ func TestWorkerPool_BoundedConcurrency(t *testing.T) {
tasks[i].ID = delivery.ID
}
// Build DeliveryTask structs for each delivery (needed by deliverHTTP)
deliveryTasks := make([]DeliveryTask, numTasks)
// Build Task structs for each delivery (needed by deliverHTTP)
deliveryTasks := make([]Task, numTasks)
for i := 0; i < numTasks; i++ {
deliveryTasks[i] = DeliveryTask{
deliveryTasks[i] = Task{
DeliveryID: tasks[i].ID,
EventID: tasks[i].EventID,
TargetID: tasks[i].TargetID,
@@ -687,7 +687,7 @@ func TestDeliverHTTP_CircuitBreakerBlocks(t *testing.T) {
event := seedEvent(t, db, `{"cb":"blocked"}`)
delivery := seedDelivery(t, db, event.ID, targetID, database.DeliveryStatusPending)
task := &DeliveryTask{
task := &Task{
DeliveryID: delivery.ID,
EventID: event.ID,
WebhookID: event.WebhookID,
@@ -778,7 +778,7 @@ func TestScheduleRetry_SendsToRetryChannel(t *testing.T) {
t.Parallel()
e := testEngine(t, 1)
task := DeliveryTask{
task := Task{
DeliveryID: uuid.New().String(),
EventID: uuid.New().String(),
WebhookID: uuid.New().String(),
@@ -802,13 +802,13 @@ func TestScheduleRetry_DropsWhenChannelFull(t *testing.T) {
t.Parallel()
e := &Engine{
log: slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelDebug})),
retryCh: make(chan DeliveryTask, 1), // tiny buffer
retryCh: make(chan Task, 1), // tiny buffer
}
// Fill the retry channel
e.retryCh <- DeliveryTask{DeliveryID: "fill"}
e.retryCh <- Task{DeliveryID: "fill"}
task := DeliveryTask{
task := Task{
DeliveryID: "overflow",
AttemptNum: 2,
}
@@ -915,7 +915,7 @@ func TestProcessDelivery_RoutesToCorrectHandler(t *testing.T) {
}
d.ID = delivery.ID
task := &DeliveryTask{
task := &Task{
DeliveryID: delivery.ID,
TargetType: tt.targetType,
}
@@ -1054,7 +1054,7 @@ func TestDeliverSlack_Success(t *testing.T) {
receivedBody = string(bodyBytes)
assert.Equal(t, "application/json", r.Header.Get("Content-Type"))
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, "ok")
_, _ = fmt.Fprint(w, "ok")
}))
defer ts.Close()
@@ -1107,7 +1107,7 @@ func TestDeliverSlack_Failure(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusForbidden)
fmt.Fprint(w, "invalid_token")
_, _ = fmt.Fprint(w, "invalid_token")
}))
defer ts.Close()
@@ -1203,7 +1203,7 @@ func TestProcessDelivery_RoutesToSlack(t *testing.T) {
}
d.ID = dlv.ID
task := &DeliveryTask{
task := &Task{
DeliveryID: dlv.ID,
TargetType: database.TargetTypeSlack,
}