Compare commits
1 Commits
d36e511032
...
feat/upaas
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
35af9c99d5 |
@@ -6,3 +6,4 @@
|
||||
node_modules
|
||||
bin/
|
||||
data/
|
||||
deploy/
|
||||
|
||||
@@ -75,4 +75,7 @@ WORKDIR /var/lib/pixa
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
|
||||
CMD wget -q --spider http://localhost:8080/.well-known/healthcheck.json
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/pixad", "--config", "/etc/pixa/config.yml"]
|
||||
|
||||
11
README.md
11
README.md
@@ -125,6 +125,17 @@ See `config.example.yml` for all options with defaults.
|
||||
- **Metrics**: Prometheus
|
||||
- **Logging**: stdlib slog
|
||||
|
||||
## Deployment
|
||||
|
||||
Pixa is deployed via
|
||||
[µPaaS](https://git.eeqj.de/sneak/upaas) on `fsn1app1`
|
||||
(paas.datavi.be). Pushes to `main` trigger automatic builds and
|
||||
deployments. The Dockerfile includes a `HEALTHCHECK` that probes
|
||||
`/.well-known/healthcheck.json`.
|
||||
|
||||
See [deploy/README.md](deploy/README.md) for the full µPaaS app
|
||||
configuration, volume mounts, and production setup instructions.
|
||||
|
||||
## TODO
|
||||
|
||||
See [TODO.md](TODO.md) for the full prioritized task list.
|
||||
|
||||
78
deploy/README.md
Normal file
78
deploy/README.md
Normal file
@@ -0,0 +1,78 @@
|
||||
# Pixa Deployment via µPaaS
|
||||
|
||||
Pixa is deployed on `fsn1app1` via
|
||||
[µPaaS](https://git.eeqj.de/sneak/upaas) (paas.datavi.be).
|
||||
|
||||
## µPaaS App Configuration
|
||||
|
||||
Create the app in the µPaaS web UI with these settings:
|
||||
|
||||
| Setting | Value |
|
||||
| --- | --- |
|
||||
| **App name** | `pixa` |
|
||||
| **Repo URL** | `git@git.eeqj.de:sneak/pixa.git` |
|
||||
| **Branch** | `main` |
|
||||
| **Dockerfile path** | `Dockerfile` |
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Description | Required |
|
||||
| --- | --- | --- |
|
||||
| `PORT` | HTTP listen port (default: 8080) | No |
|
||||
|
||||
Configuration is provided via the config file baked into the Docker
|
||||
image at `/etc/pixa/config.yml`. To override it, mount a custom
|
||||
config file as a volume (see below).
|
||||
|
||||
### Volumes
|
||||
|
||||
| Host Path | Container Path | Description |
|
||||
| --- | --- | --- |
|
||||
| `/srv/pixa/data` | `/var/lib/pixa` | SQLite database and image cache |
|
||||
| `/srv/pixa/config.yml` | `/etc/pixa/config.yml` | Production config (signing key, whitelist, etc.) |
|
||||
|
||||
### Ports
|
||||
|
||||
| Host Port | Container Port | Protocol |
|
||||
| --- | --- | --- |
|
||||
| (assigned) | 8080 | TCP |
|
||||
|
||||
### Docker Network
|
||||
|
||||
Attach to the shared reverse-proxy network if using Caddy/Traefik
|
||||
for TLS termination.
|
||||
|
||||
## Production Configuration
|
||||
|
||||
Copy `config.example.yml` from the repo root and customize for
|
||||
production:
|
||||
|
||||
```yaml
|
||||
port: 8080
|
||||
debug: false
|
||||
maintenance_mode: false
|
||||
state_dir: /var/lib/pixa
|
||||
signing_key: "<generate with: openssl rand -base64 32>"
|
||||
whitelist_hosts:
|
||||
- s3.sneak.cloud
|
||||
- static.sneak.cloud
|
||||
- sneak.berlin
|
||||
allow_http: false
|
||||
```
|
||||
|
||||
**Important:** Generate a unique `signing_key` for production. Never
|
||||
use the default placeholder value.
|
||||
|
||||
## Health Check
|
||||
|
||||
The Dockerfile includes a `HEALTHCHECK` instruction that probes
|
||||
`/.well-known/healthcheck.json` every 30 seconds. µPaaS verifies
|
||||
container health 60 seconds after deployment.
|
||||
|
||||
## Deployment Flow
|
||||
|
||||
1. Push to `main` triggers the Gitea webhook
|
||||
2. µPaaS clones the repo and runs `docker build .`
|
||||
3. The Dockerfile runs `make check` (format, lint, test) during build
|
||||
4. On success, µPaaS stops the old container and starts the new one
|
||||
5. After 60 seconds, µPaaS checks container health
|
||||
@@ -35,41 +35,6 @@ type Database struct {
|
||||
config *config.Config
|
||||
}
|
||||
|
||||
// ParseMigrationVersion extracts the numeric version prefix from a migration
|
||||
// filename. Filenames must follow the pattern "<version>.sql" or
|
||||
// "<version>_<description>.sql", where version is a zero-padded numeric
|
||||
// string (e.g. "001", "002"). Returns the version string and an error if
|
||||
// the filename does not match the expected pattern.
|
||||
func ParseMigrationVersion(filename string) (string, error) {
|
||||
name := strings.TrimSuffix(filename, filepath.Ext(filename))
|
||||
if name == "" {
|
||||
return "", fmt.Errorf("invalid migration filename %q: empty name", filename)
|
||||
}
|
||||
|
||||
// Split on underscore to separate version from description.
|
||||
// If there's no underscore, the entire stem is the version.
|
||||
version := name
|
||||
if idx := strings.IndexByte(name, '_'); idx >= 0 {
|
||||
version = name[:idx]
|
||||
}
|
||||
|
||||
if version == "" {
|
||||
return "", fmt.Errorf("invalid migration filename %q: empty version prefix", filename)
|
||||
}
|
||||
|
||||
// Validate the version is purely numeric.
|
||||
for _, ch := range version {
|
||||
if ch < '0' || ch > '9' {
|
||||
return "", fmt.Errorf(
|
||||
"invalid migration filename %q: version %q contains non-numeric character %q",
|
||||
filename, version, string(ch),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// New creates a new Database instance.
|
||||
func New(lc fx.Lifecycle, params Params) (*Database, error) {
|
||||
s := &Database{
|
||||
@@ -119,33 +84,96 @@ func (s *Database) connect(ctx context.Context) error {
|
||||
s.db = db
|
||||
s.log.Info("database connected")
|
||||
|
||||
return ApplyMigrations(ctx, s.db, s.log)
|
||||
return s.runMigrations(ctx)
|
||||
}
|
||||
|
||||
// collectMigrations reads the embedded schema directory and returns
|
||||
// migration filenames sorted lexicographically.
|
||||
func collectMigrations() ([]string, error) {
|
||||
entries, err := schemaFS.ReadDir("schema")
|
||||
func (s *Database) runMigrations(ctx context.Context) error {
|
||||
// Create migrations tracking table
|
||||
_, err := s.db.ExecContext(ctx, `
|
||||
CREATE TABLE IF NOT EXISTS schema_migrations (
|
||||
version TEXT PRIMARY KEY,
|
||||
applied_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read schema directory: %w", err)
|
||||
return fmt.Errorf("failed to create migrations table: %w", err)
|
||||
}
|
||||
|
||||
var migrations []string
|
||||
// Get list of migration files
|
||||
entries, err := schemaFS.ReadDir("schema")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read schema directory: %w", err)
|
||||
}
|
||||
|
||||
// Sort migration files by name (001.sql, 002.sql, etc.)
|
||||
var migrations []string
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".sql") {
|
||||
migrations = append(migrations, entry.Name())
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(migrations)
|
||||
|
||||
return migrations, nil
|
||||
// Apply each migration that hasn't been applied yet
|
||||
for _, migration := range migrations {
|
||||
version := strings.TrimSuffix(migration, filepath.Ext(migration))
|
||||
|
||||
// Check if already applied
|
||||
var count int
|
||||
err := s.db.QueryRowContext(ctx,
|
||||
"SELECT COUNT(*) FROM schema_migrations WHERE version = ?",
|
||||
version,
|
||||
).Scan(&count)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check migration status: %w", err)
|
||||
}
|
||||
|
||||
if count > 0 {
|
||||
s.log.Debug("migration already applied", "version", version)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Read and apply migration
|
||||
content, err := schemaFS.ReadFile(filepath.Join("schema", migration))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read migration %s: %w", migration, err)
|
||||
}
|
||||
|
||||
s.log.Info("applying migration", "version", version)
|
||||
|
||||
_, err = s.db.ExecContext(ctx, string(content))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to apply migration %s: %w", migration, err)
|
||||
}
|
||||
|
||||
// Record migration as applied
|
||||
_, err = s.db.ExecContext(ctx,
|
||||
"INSERT INTO schema_migrations (version) VALUES (?)",
|
||||
version,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to record migration %s: %w", migration, err)
|
||||
}
|
||||
|
||||
s.log.Info("migration applied successfully", "version", version)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureMigrationsTable creates the schema_migrations tracking table if
|
||||
// it does not already exist.
|
||||
func ensureMigrationsTable(ctx context.Context, db *sql.DB) error {
|
||||
// DB returns the underlying sql.DB.
|
||||
func (s *Database) DB() *sql.DB {
|
||||
return s.db
|
||||
}
|
||||
|
||||
// ApplyMigrations applies all migrations to the given database.
|
||||
// This is useful for testing where you want to use the real schema
|
||||
// without the full fx lifecycle.
|
||||
func ApplyMigrations(db *sql.DB) error {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create migrations tracking table
|
||||
_, err := db.ExecContext(ctx, `
|
||||
CREATE TABLE IF NOT EXISTS schema_migrations (
|
||||
version TEXT PRIMARY KEY,
|
||||
@@ -156,32 +184,27 @@ func ensureMigrationsTable(ctx context.Context, db *sql.DB) error {
|
||||
return fmt.Errorf("failed to create migrations table: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyMigrations applies all pending migrations to db. An optional logger
|
||||
// may be provided for informational output; pass nil for silent operation.
|
||||
// This is exported so tests can apply the real schema without the full fx
|
||||
// lifecycle.
|
||||
func ApplyMigrations(ctx context.Context, db *sql.DB, log *slog.Logger) error {
|
||||
if err := ensureMigrationsTable(ctx, db); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
migrations, err := collectMigrations()
|
||||
// Get list of migration files
|
||||
entries, err := schemaFS.ReadDir("schema")
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to read schema directory: %w", err)
|
||||
}
|
||||
|
||||
for _, migration := range migrations {
|
||||
version, parseErr := ParseMigrationVersion(migration)
|
||||
if parseErr != nil {
|
||||
return parseErr
|
||||
// Sort migration files by name (001.sql, 002.sql, etc.)
|
||||
var migrations []string
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".sql") {
|
||||
migrations = append(migrations, entry.Name())
|
||||
}
|
||||
}
|
||||
sort.Strings(migrations)
|
||||
|
||||
// Check if already applied.
|
||||
// Apply each migration that hasn't been applied yet
|
||||
for _, migration := range migrations {
|
||||
version := strings.TrimSuffix(migration, filepath.Ext(migration))
|
||||
|
||||
// Check if already applied
|
||||
var count int
|
||||
|
||||
err := db.QueryRowContext(ctx,
|
||||
"SELECT COUNT(*) FROM schema_migrations WHERE version = ?",
|
||||
version,
|
||||
@@ -191,46 +214,29 @@ func ApplyMigrations(ctx context.Context, db *sql.DB, log *slog.Logger) error {
|
||||
}
|
||||
|
||||
if count > 0 {
|
||||
if log != nil {
|
||||
log.Debug("migration already applied", "version", version)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Read and apply migration.
|
||||
content, readErr := schemaFS.ReadFile(filepath.Join("schema", migration))
|
||||
if readErr != nil {
|
||||
return fmt.Errorf("failed to read migration %s: %w", migration, readErr)
|
||||
// Read and apply migration
|
||||
content, err := schemaFS.ReadFile(filepath.Join("schema", migration))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read migration %s: %w", migration, err)
|
||||
}
|
||||
|
||||
if log != nil {
|
||||
log.Info("applying migration", "version", version)
|
||||
_, err = db.ExecContext(ctx, string(content))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to apply migration %s: %w", migration, err)
|
||||
}
|
||||
|
||||
_, execErr := db.ExecContext(ctx, string(content))
|
||||
if execErr != nil {
|
||||
return fmt.Errorf("failed to apply migration %s: %w", migration, execErr)
|
||||
}
|
||||
|
||||
// Record migration as applied.
|
||||
_, recErr := db.ExecContext(ctx,
|
||||
// Record migration as applied
|
||||
_, err = db.ExecContext(ctx,
|
||||
"INSERT INTO schema_migrations (version) VALUES (?)",
|
||||
version,
|
||||
)
|
||||
if recErr != nil {
|
||||
return fmt.Errorf("failed to record migration %s: %w", migration, recErr)
|
||||
}
|
||||
|
||||
if log != nil {
|
||||
log.Info("migration applied successfully", "version", version)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to record migration %s: %w", migration, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DB returns the underlying sql.DB.
|
||||
func (s *Database) DB() *sql.DB {
|
||||
return s.db
|
||||
}
|
||||
|
||||
@@ -1,155 +0,0 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
_ "modernc.org/sqlite" // SQLite driver registration
|
||||
)
|
||||
|
||||
func TestParseMigrationVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
filename string
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "version only",
|
||||
filename: "001.sql",
|
||||
want: "001",
|
||||
},
|
||||
{
|
||||
name: "version with description",
|
||||
filename: "001_initial_schema.sql",
|
||||
want: "001",
|
||||
},
|
||||
{
|
||||
name: "multi-digit version",
|
||||
filename: "042_add_indexes.sql",
|
||||
want: "042",
|
||||
},
|
||||
{
|
||||
name: "long version number",
|
||||
filename: "00001_long_prefix.sql",
|
||||
want: "00001",
|
||||
},
|
||||
{
|
||||
name: "description with multiple underscores",
|
||||
filename: "003_add_user_auth_tables.sql",
|
||||
want: "003",
|
||||
},
|
||||
{
|
||||
name: "empty filename",
|
||||
filename: ".sql",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "leading underscore",
|
||||
filename: "_description.sql",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non-numeric version",
|
||||
filename: "abc_migration.sql",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "mixed alphanumeric version",
|
||||
filename: "001a_migration.sql",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := ParseMigrationVersion(tt.filename)
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("ParseMigrationVersion(%q) expected error, got %q", tt.filename, got)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("ParseMigrationVersion(%q) unexpected error: %v", tt.filename, err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if got != tt.want {
|
||||
t.Errorf("ParseMigrationVersion(%q) = %q, want %q", tt.filename, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyMigrations(t *testing.T) {
|
||||
db, err := sql.Open("sqlite", ":memory:")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open in-memory database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Apply migrations should succeed.
|
||||
if err := ApplyMigrations(context.Background(), db, nil); err != nil {
|
||||
t.Fatalf("ApplyMigrations failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify the schema_migrations table recorded the version.
|
||||
var version string
|
||||
|
||||
err = db.QueryRowContext(context.Background(),
|
||||
"SELECT version FROM schema_migrations LIMIT 1",
|
||||
).Scan(&version)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to query schema_migrations: %v", err)
|
||||
}
|
||||
|
||||
if version != "001" {
|
||||
t.Errorf("expected version %q, got %q", "001", version)
|
||||
}
|
||||
|
||||
// Verify a table from the migration exists (source_content).
|
||||
var tableName string
|
||||
|
||||
err = db.QueryRowContext(context.Background(),
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='source_content'",
|
||||
).Scan(&tableName)
|
||||
if err != nil {
|
||||
t.Fatalf("expected source_content table to exist: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyMigrationsIdempotent(t *testing.T) {
|
||||
db, err := sql.Open("sqlite", ":memory:")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open in-memory database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Apply twice should succeed (idempotent).
|
||||
if err := ApplyMigrations(context.Background(), db, nil); err != nil {
|
||||
t.Fatalf("first ApplyMigrations failed: %v", err)
|
||||
}
|
||||
|
||||
if err := ApplyMigrations(context.Background(), db, nil); err != nil {
|
||||
t.Fatalf("second ApplyMigrations failed: %v", err)
|
||||
}
|
||||
|
||||
// Should still have exactly one migration recorded.
|
||||
var count int
|
||||
|
||||
err = db.QueryRowContext(context.Background(),
|
||||
"SELECT COUNT(*) FROM schema_migrations",
|
||||
).Scan(&count)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to count schema_migrations: %v", err)
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
t.Errorf("expected 1 migration record, got %d", count)
|
||||
}
|
||||
}
|
||||
@@ -82,7 +82,7 @@ func setupTestDB(t *testing.T) *sql.DB {
|
||||
t.Fatalf("failed to open test db: %v", err)
|
||||
}
|
||||
|
||||
if err := database.ApplyMigrations(context.Background(), db, nil); err != nil {
|
||||
if err := database.ApplyMigrations(db); err != nil {
|
||||
t.Fatalf("failed to apply migrations: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -26,45 +26,20 @@ func initVips() {
|
||||
// Images larger than this are rejected to prevent DoS via decompression bombs.
|
||||
const MaxInputDimension = 8192
|
||||
|
||||
// DefaultMaxInputBytes is the default maximum input size in bytes (50 MiB).
|
||||
// This matches the default upstream fetcher limit.
|
||||
const DefaultMaxInputBytes = 50 << 20
|
||||
|
||||
// ErrInputTooLarge is returned when input image dimensions exceed MaxInputDimension.
|
||||
var ErrInputTooLarge = errors.New("input image dimensions exceed maximum")
|
||||
|
||||
// ErrInputDataTooLarge is returned when the raw input data exceeds the configured byte limit.
|
||||
var ErrInputDataTooLarge = errors.New("input data exceeds maximum allowed size")
|
||||
|
||||
// ErrUnsupportedOutputFormat is returned when the requested output format is not supported.
|
||||
var ErrUnsupportedOutputFormat = errors.New("unsupported output format")
|
||||
|
||||
// ImageProcessor implements the Processor interface using libvips via govips.
|
||||
type ImageProcessor struct {
|
||||
maxInputBytes int64
|
||||
}
|
||||
type ImageProcessor struct{}
|
||||
|
||||
// Params holds configuration for creating an ImageProcessor.
|
||||
// Zero values use sensible defaults (MaxInputBytes defaults to DefaultMaxInputBytes).
|
||||
type Params struct {
|
||||
// MaxInputBytes is the maximum allowed input size in bytes.
|
||||
// If <= 0, DefaultMaxInputBytes is used.
|
||||
MaxInputBytes int64
|
||||
}
|
||||
|
||||
// New creates a new image processor with the given parameters.
|
||||
// A zero-value Params{} uses sensible defaults.
|
||||
func New(params Params) *ImageProcessor {
|
||||
// NewImageProcessor creates a new image processor.
|
||||
func NewImageProcessor() *ImageProcessor {
|
||||
initVips()
|
||||
|
||||
maxInputBytes := params.MaxInputBytes
|
||||
if maxInputBytes <= 0 {
|
||||
maxInputBytes = DefaultMaxInputBytes
|
||||
}
|
||||
|
||||
return &ImageProcessor{
|
||||
maxInputBytes: maxInputBytes,
|
||||
}
|
||||
return &ImageProcessor{}
|
||||
}
|
||||
|
||||
// Process transforms an image according to the request.
|
||||
@@ -73,20 +48,12 @@ func (p *ImageProcessor) Process(
|
||||
input io.Reader,
|
||||
req *ImageRequest,
|
||||
) (*ProcessResult, error) {
|
||||
// Read input with a size limit to prevent unbounded memory consumption.
|
||||
// We read at most maxInputBytes+1 so we can detect if the input exceeds
|
||||
// the limit without consuming additional memory.
|
||||
limited := io.LimitReader(input, p.maxInputBytes+1)
|
||||
|
||||
data, err := io.ReadAll(limited)
|
||||
// Read input
|
||||
data, err := io.ReadAll(input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read input: %w", err)
|
||||
}
|
||||
|
||||
if int64(len(data)) > p.maxInputBytes {
|
||||
return nil, ErrInputDataTooLarge
|
||||
}
|
||||
|
||||
// Decode image
|
||||
img, err := vips.NewImageFromBuffer(data)
|
||||
if err != nil {
|
||||
|
||||
@@ -71,7 +71,7 @@ func createTestPNG(t *testing.T, width, height int) []byte {
|
||||
}
|
||||
|
||||
func TestImageProcessor_ResizeJPEG(t *testing.T) {
|
||||
proc := New(Params{})
|
||||
proc := NewImageProcessor()
|
||||
ctx := context.Background()
|
||||
|
||||
input := createTestJPEG(t, 800, 600)
|
||||
@@ -118,7 +118,7 @@ func TestImageProcessor_ResizeJPEG(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageProcessor_ConvertToPNG(t *testing.T) {
|
||||
proc := New(Params{})
|
||||
proc := NewImageProcessor()
|
||||
ctx := context.Background()
|
||||
|
||||
input := createTestJPEG(t, 200, 150)
|
||||
@@ -151,7 +151,7 @@ func TestImageProcessor_ConvertToPNG(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageProcessor_OriginalSize(t *testing.T) {
|
||||
proc := New(Params{})
|
||||
proc := NewImageProcessor()
|
||||
ctx := context.Background()
|
||||
|
||||
input := createTestJPEG(t, 640, 480)
|
||||
@@ -179,7 +179,7 @@ func TestImageProcessor_OriginalSize(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageProcessor_FitContain(t *testing.T) {
|
||||
proc := New(Params{})
|
||||
proc := NewImageProcessor()
|
||||
ctx := context.Background()
|
||||
|
||||
// 800x400 image (2:1 aspect) into 400x400 box with contain
|
||||
@@ -206,7 +206,7 @@ func TestImageProcessor_FitContain(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageProcessor_ProportionalScale_WidthOnly(t *testing.T) {
|
||||
proc := New(Params{})
|
||||
proc := NewImageProcessor()
|
||||
ctx := context.Background()
|
||||
|
||||
// 800x600 image, request width=400 height=0
|
||||
@@ -236,7 +236,7 @@ func TestImageProcessor_ProportionalScale_WidthOnly(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageProcessor_ProportionalScale_HeightOnly(t *testing.T) {
|
||||
proc := New(Params{})
|
||||
proc := NewImageProcessor()
|
||||
ctx := context.Background()
|
||||
|
||||
// 800x600 image, request width=0 height=300
|
||||
@@ -266,7 +266,7 @@ func TestImageProcessor_ProportionalScale_HeightOnly(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageProcessor_ProcessPNG(t *testing.T) {
|
||||
proc := New(Params{})
|
||||
proc := NewImageProcessor()
|
||||
ctx := context.Background()
|
||||
|
||||
input := createTestPNG(t, 400, 300)
|
||||
@@ -298,7 +298,7 @@ func TestImageProcessor_ImplementsInterface(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageProcessor_SupportedFormats(t *testing.T) {
|
||||
proc := New(Params{})
|
||||
proc := NewImageProcessor()
|
||||
|
||||
inputFormats := proc.SupportedInputFormats()
|
||||
if len(inputFormats) == 0 {
|
||||
@@ -312,7 +312,7 @@ func TestImageProcessor_SupportedFormats(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageProcessor_RejectsOversizedInput(t *testing.T) {
|
||||
proc := New(Params{})
|
||||
proc := NewImageProcessor()
|
||||
ctx := context.Background()
|
||||
|
||||
// Create an image that exceeds MaxInputDimension (e.g., 10000x100)
|
||||
@@ -337,7 +337,7 @@ func TestImageProcessor_RejectsOversizedInput(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageProcessor_RejectsOversizedInputHeight(t *testing.T) {
|
||||
proc := New(Params{})
|
||||
proc := NewImageProcessor()
|
||||
ctx := context.Background()
|
||||
|
||||
// Create an image with oversized height
|
||||
@@ -361,7 +361,7 @@ func TestImageProcessor_RejectsOversizedInputHeight(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageProcessor_AcceptsMaxDimensionInput(t *testing.T) {
|
||||
proc := New(Params{})
|
||||
proc := NewImageProcessor()
|
||||
ctx := context.Background()
|
||||
|
||||
// Create an image at exactly MaxInputDimension - should be accepted
|
||||
@@ -383,7 +383,7 @@ func TestImageProcessor_AcceptsMaxDimensionInput(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageProcessor_EncodeWebP(t *testing.T) {
|
||||
proc := New(Params{})
|
||||
proc := NewImageProcessor()
|
||||
ctx := context.Background()
|
||||
|
||||
input := createTestJPEG(t, 200, 150)
|
||||
@@ -426,7 +426,7 @@ func TestImageProcessor_EncodeWebP(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageProcessor_DecodeAVIF(t *testing.T) {
|
||||
proc := New(Params{})
|
||||
proc := NewImageProcessor()
|
||||
ctx := context.Background()
|
||||
|
||||
// Load test AVIF file
|
||||
@@ -465,73 +465,8 @@ func TestImageProcessor_DecodeAVIF(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageProcessor_RejectsOversizedInputData(t *testing.T) {
|
||||
// Create a processor with a very small byte limit
|
||||
const limit = 1024
|
||||
proc := New(Params{MaxInputBytes: limit})
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a valid JPEG that exceeds the byte limit
|
||||
input := createTestJPEG(t, 800, 600) // will be well over 1 KiB
|
||||
if int64(len(input)) <= limit {
|
||||
t.Fatalf("test JPEG must exceed %d bytes, got %d", limit, len(input))
|
||||
}
|
||||
|
||||
req := &ImageRequest{
|
||||
Size: Size{Width: 100, Height: 75},
|
||||
Format: FormatJPEG,
|
||||
Quality: 85,
|
||||
FitMode: FitCover,
|
||||
}
|
||||
|
||||
_, err := proc.Process(ctx, bytes.NewReader(input), req)
|
||||
if err == nil {
|
||||
t.Fatal("Process() should reject input exceeding maxInputBytes")
|
||||
}
|
||||
|
||||
if err != ErrInputDataTooLarge {
|
||||
t.Errorf("Process() error = %v, want ErrInputDataTooLarge", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageProcessor_AcceptsInputWithinLimit(t *testing.T) {
|
||||
// Create a small image and set limit well above its size
|
||||
input := createTestJPEG(t, 10, 10)
|
||||
limit := int64(len(input)) * 10 // 10× headroom
|
||||
|
||||
proc := New(Params{MaxInputBytes: limit})
|
||||
ctx := context.Background()
|
||||
|
||||
req := &ImageRequest{
|
||||
Size: Size{Width: 10, Height: 10},
|
||||
Format: FormatJPEG,
|
||||
Quality: 85,
|
||||
FitMode: FitCover,
|
||||
}
|
||||
|
||||
result, err := proc.Process(ctx, bytes.NewReader(input), req)
|
||||
if err != nil {
|
||||
t.Fatalf("Process() error = %v, want nil", err)
|
||||
}
|
||||
defer result.Content.Close()
|
||||
}
|
||||
|
||||
func TestImageProcessor_DefaultMaxInputBytes(t *testing.T) {
|
||||
// Passing 0 should use the default
|
||||
proc := New(Params{})
|
||||
if proc.maxInputBytes != DefaultMaxInputBytes {
|
||||
t.Errorf("maxInputBytes = %d, want %d", proc.maxInputBytes, DefaultMaxInputBytes)
|
||||
}
|
||||
|
||||
// Passing negative should also use the default
|
||||
proc = New(Params{MaxInputBytes: -1})
|
||||
if proc.maxInputBytes != DefaultMaxInputBytes {
|
||||
t.Errorf("maxInputBytes = %d, want %d", proc.maxInputBytes, DefaultMaxInputBytes)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageProcessor_EncodeAVIF(t *testing.T) {
|
||||
proc := New(Params{})
|
||||
proc := NewImageProcessor()
|
||||
ctx := context.Background()
|
||||
|
||||
input := createTestJPEG(t, 200, 150)
|
||||
|
||||
@@ -15,14 +15,13 @@ import (
|
||||
|
||||
// Service implements the ImageCache interface, orchestrating cache, fetcher, and processor.
|
||||
type Service struct {
|
||||
cache *Cache
|
||||
fetcher Fetcher
|
||||
processor Processor
|
||||
signer *Signer
|
||||
whitelist *HostWhitelist
|
||||
log *slog.Logger
|
||||
allowHTTP bool
|
||||
maxResponseSize int64
|
||||
cache *Cache
|
||||
fetcher Fetcher
|
||||
processor Processor
|
||||
signer *Signer
|
||||
whitelist *HostWhitelist
|
||||
log *slog.Logger
|
||||
allowHTTP bool
|
||||
}
|
||||
|
||||
// ServiceConfig holds configuration for the image service.
|
||||
@@ -51,17 +50,15 @@ func NewService(cfg *ServiceConfig) (*Service, error) {
|
||||
return nil, errors.New("signing key is required")
|
||||
}
|
||||
|
||||
// Resolve fetcher config for defaults
|
||||
fetcherCfg := cfg.FetcherConfig
|
||||
if fetcherCfg == nil {
|
||||
fetcherCfg = DefaultFetcherConfig()
|
||||
}
|
||||
|
||||
// Use custom fetcher if provided, otherwise create HTTP fetcher
|
||||
var fetcher Fetcher
|
||||
if cfg.Fetcher != nil {
|
||||
fetcher = cfg.Fetcher
|
||||
} else {
|
||||
fetcherCfg := cfg.FetcherConfig
|
||||
if fetcherCfg == nil {
|
||||
fetcherCfg = DefaultFetcherConfig()
|
||||
}
|
||||
fetcher = NewHTTPFetcher(fetcherCfg)
|
||||
}
|
||||
|
||||
@@ -77,17 +74,14 @@ func NewService(cfg *ServiceConfig) (*Service, error) {
|
||||
allowHTTP = cfg.FetcherConfig.AllowHTTP
|
||||
}
|
||||
|
||||
maxResponseSize := fetcherCfg.MaxResponseSize
|
||||
|
||||
return &Service{
|
||||
cache: cfg.Cache,
|
||||
fetcher: fetcher,
|
||||
processor: New(Params{MaxInputBytes: maxResponseSize}),
|
||||
signer: signer,
|
||||
whitelist: NewHostWhitelist(cfg.Whitelist),
|
||||
log: log,
|
||||
allowHTTP: allowHTTP,
|
||||
maxResponseSize: maxResponseSize,
|
||||
cache: cfg.Cache,
|
||||
fetcher: fetcher,
|
||||
processor: NewImageProcessor(),
|
||||
signer: signer,
|
||||
whitelist: NewHostWhitelist(cfg.Whitelist),
|
||||
log: log,
|
||||
allowHTTP: allowHTTP,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -152,40 +146,6 @@ func (s *Service) Get(ctx context.Context, req *ImageRequest) (*ImageResponse, e
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// loadCachedSource attempts to load source content from cache, returning nil
|
||||
// if the cached data is unavailable or exceeds maxResponseSize.
|
||||
func (s *Service) loadCachedSource(contentHash ContentHash) []byte {
|
||||
reader, err := s.cache.GetSourceContent(contentHash)
|
||||
if err != nil {
|
||||
s.log.Warn("failed to load cached source, fetching", "error", err)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bound the read to maxResponseSize to prevent unbounded memory use
|
||||
// from unexpectedly large cached files.
|
||||
limited := io.LimitReader(reader, s.maxResponseSize+1)
|
||||
data, err := io.ReadAll(limited)
|
||||
_ = reader.Close()
|
||||
|
||||
if err != nil {
|
||||
s.log.Warn("failed to read cached source, fetching", "error", err)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if int64(len(data)) > s.maxResponseSize {
|
||||
s.log.Warn("cached source exceeds max response size, discarding",
|
||||
"hash", contentHash,
|
||||
"max_bytes", s.maxResponseSize,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// processFromSourceOrFetch processes an image, using cached source content if available.
|
||||
func (s *Service) processFromSourceOrFetch(
|
||||
ctx context.Context,
|
||||
@@ -202,8 +162,22 @@ func (s *Service) processFromSourceOrFetch(
|
||||
var fetchBytes int64
|
||||
|
||||
if contentHash != "" {
|
||||
// We have cached source - load it
|
||||
s.log.Debug("using cached source", "hash", contentHash)
|
||||
sourceData = s.loadCachedSource(contentHash)
|
||||
|
||||
reader, err := s.cache.GetSourceContent(contentHash)
|
||||
if err != nil {
|
||||
s.log.Warn("failed to load cached source, fetching", "error", err)
|
||||
// Fall through to fetch
|
||||
} else {
|
||||
sourceData, err = io.ReadAll(reader)
|
||||
_ = reader.Close()
|
||||
|
||||
if err != nil {
|
||||
s.log.Warn("failed to read cached source, fetching", "error", err)
|
||||
// Fall through to fetch
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch from upstream if we don't have source data or it's empty
|
||||
|
||||
@@ -16,7 +16,7 @@ func setupStatsTestDB(t *testing.T) *sql.DB {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := database.ApplyMigrations(context.Background(), db, nil); err != nil {
|
||||
if err := database.ApplyMigrations(db); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(func() { db.Close() })
|
||||
|
||||
@@ -2,7 +2,6 @@ package imgcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"image"
|
||||
"image/color"
|
||||
@@ -194,7 +193,7 @@ func setupServiceTestDB(t *testing.T) *sql.DB {
|
||||
}
|
||||
|
||||
// Use the real production schema via migrations
|
||||
if err := database.ApplyMigrations(context.Background(), db, nil); err != nil {
|
||||
if err := database.ApplyMigrations(db); err != nil {
|
||||
t.Fatalf("failed to apply migrations: %v", err)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user