Compare commits
1 Commits
schema-con
...
58460b502b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
58460b502b |
180
BUGS.md
180
BUGS.md
@@ -1,180 +0,0 @@
|
|||||||
# Bugs in µPaaS
|
|
||||||
|
|
||||||
## 1. Potential Race Condition in Log Writing
|
|
||||||
|
|
||||||
### Description
|
|
||||||
In the deployment service, when a deployment fails, the `failDeployment` function calls `writeLogsToFile` which may be called concurrently with the async log writer's flush operations. This could lead to partial or corrupted log files.
|
|
||||||
|
|
||||||
### Location
|
|
||||||
`internal/service/deploy/deploy.go:1169` in `failDeployment` function
|
|
||||||
|
|
||||||
### Proposed Fix
|
|
||||||
1. Add synchronization to ensure only one log write operation occurs at a time
|
|
||||||
2. Modify the `deploymentLogWriter` to track completion status and prevent concurrent writes
|
|
||||||
3. Add a wait mechanism in `failDeployment` to ensure any ongoing flush operations complete before writing logs to file
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Add a mutex to deploymentLogWriter
|
|
||||||
type deploymentLogWriter struct {
|
|
||||||
// existing fields...
|
|
||||||
mu sync.Mutex
|
|
||||||
writeMu sync.Mutex // Add this for file writing synchronization
|
|
||||||
done chan struct{}
|
|
||||||
flushed sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
// In writeLogsToFile, ensure exclusive access
|
|
||||||
func (svc *Service) writeLogsToFile(app *models.App, deployment *models.Deployment) {
|
|
||||||
svc.writeMu.Lock() // Add this mutex to Service struct
|
|
||||||
defer svc.writeMu.Unlock()
|
|
||||||
// existing code...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## 2. Incomplete Error Handling in Container Operations
|
|
||||||
|
|
||||||
### Description
|
|
||||||
In the Docker client's `performClone` function, if `createGitContainer` fails, the SSH key file created earlier is not cleaned up, causing a potential security risk.
|
|
||||||
|
|
||||||
### Location
|
|
||||||
`internal/docker/client.go:597` in `performClone` function
|
|
||||||
|
|
||||||
### Proposed Fix
|
|
||||||
Add proper cleanup using `defer` immediately after creating the SSH key file:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// After writing SSH key file (line 578)
|
|
||||||
keyFileCreated := false
|
|
||||||
err = os.WriteFile(cfg.keyFile, []byte(cfg.sshPrivateKey), sshKeyPermissions)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to write SSH key: %w", err)
|
|
||||||
}
|
|
||||||
keyFileCreated = true
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if keyFileCreated {
|
|
||||||
removeErr := os.Remove(cfg.keyFile)
|
|
||||||
if removeErr != nil {
|
|
||||||
c.log.Error("failed to remove SSH key file", "error", removeErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
```
|
|
||||||
|
|
||||||
## 3. Missing Context Cancellation Check During Build
|
|
||||||
|
|
||||||
### Description
|
|
||||||
In the deployment service's `streamBuildOutput` function, long-running Docker build operations may not properly respond to context cancellation, causing deployments to hang even when cancelled.
|
|
||||||
|
|
||||||
### Location
|
|
||||||
`internal/docker/client.go:542` in `streamBuildOutput` function
|
|
||||||
|
|
||||||
### Proposed Fix
|
|
||||||
Add context checking in the scanner loop:
|
|
||||||
|
|
||||||
```go
|
|
||||||
for scanner.Scan() {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
line := scanner.Bytes()
|
|
||||||
// existing code...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## 4. Inconsistent Container Removal in Error Cases
|
|
||||||
|
|
||||||
### Description
|
|
||||||
When deployment fails during container creation, the already-created container is not removed, leading to orphaned containers that consume resources.
|
|
||||||
|
|
||||||
### Location
|
|
||||||
`internal/service/deploy/deploy.go:969` in `createAndStartContainer` function
|
|
||||||
|
|
||||||
### Proposed Fix
|
|
||||||
Add cleanup of created container on start failure:
|
|
||||||
|
|
||||||
```go
|
|
||||||
containerID, err := svc.docker.CreateContainer(ctx, containerOpts)
|
|
||||||
if err != nil {
|
|
||||||
svc.notify.NotifyDeployFailed(ctx, app, deployment, err)
|
|
||||||
svc.failDeployment(ctx, app, deployment, fmt.Errorf("failed to create container: %w", err))
|
|
||||||
return "", fmt.Errorf("failed to create container: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add cleanup defer for error cases
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
// If we have a container ID but returning an error, clean it up
|
|
||||||
_ = svc.docker.RemoveContainer(context.Background(), containerID, true)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
startErr := svc.docker.StartContainer(ctx, containerID)
|
|
||||||
if startErr != nil {
|
|
||||||
svc.notify.NotifyDeployFailed(ctx, app, deployment, startErr)
|
|
||||||
svc.failDeployment(ctx, app, deployment, fmt.Errorf("failed to start container: %w", startErr))
|
|
||||||
err = startErr // Set err so defer cleanup runs
|
|
||||||
return "", fmt.Errorf("failed to start container: %w", startErr)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## 5. Potential Data Race in Active Deployments Tracking
|
|
||||||
|
|
||||||
### Description
|
|
||||||
The `activeDeploys` sync.Map in the deployment service may have race conditions when multiple concurrent deployments try to access the same app's deployment state.
|
|
||||||
|
|
||||||
### Location
|
|
||||||
`internal/service/deploy/deploy.go:226` and related functions
|
|
||||||
|
|
||||||
### Proposed Fix
|
|
||||||
Add proper locking around active deploy operations:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Add a mutex for active deploy operations
|
|
||||||
type Service struct {
|
|
||||||
// existing fields...
|
|
||||||
activeDeployMu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// In Deploy function
|
|
||||||
func (svc *Service) Deploy(ctx context.Context, app *models.App, webhookEventID *int64, cancelExisting bool) error {
|
|
||||||
svc.activeDeployMu.Lock()
|
|
||||||
if cancelExisting {
|
|
||||||
svc.cancelActiveDeploy(app.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to acquire per-app deployment lock
|
|
||||||
if !svc.tryLockApp(app.ID) {
|
|
||||||
svc.activeDeployMu.Unlock()
|
|
||||||
svc.log.Warn("deployment already in progress", "app", app.Name)
|
|
||||||
return ErrDeploymentInProgress
|
|
||||||
}
|
|
||||||
svc.activeDeployMu.Unlock()
|
|
||||||
|
|
||||||
defer svc.unlockApp(app.ID)
|
|
||||||
// rest of function...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## 6. Incomplete Error Propagation in Git Clone
|
|
||||||
|
|
||||||
### Description
|
|
||||||
In the Docker client's `runGitClone` function, if `ContainerLogs` fails, the error is silently ignored, which could hide important debugging information.
|
|
||||||
|
|
||||||
### Location
|
|
||||||
`internal/docker/client.go:679` in `runGitClone` function
|
|
||||||
|
|
||||||
### Proposed Fix
|
|
||||||
Handle the ContainerLogs error properly:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Always capture logs for the result
|
|
||||||
logs, logErr := c.ContainerLogs(ctx, containerID, "100")
|
|
||||||
if logErr != nil {
|
|
||||||
c.log.Warn("failed to get git clone logs", "error", logErr)
|
|
||||||
logs = "Failed to retrieve logs: " + logErr.Error()
|
|
||||||
}
|
|
||||||
```
|
|
||||||
68
CLAUDE.md
68
CLAUDE.md
@@ -1,68 +0,0 @@
|
|||||||
# Repository Rules
|
|
||||||
|
|
||||||
Last Updated 2026-01-08
|
|
||||||
|
|
||||||
These rules MUST be followed at all times, it is very important.
|
|
||||||
|
|
||||||
* Never use `git add -A` - add specific changes to a deliberate commit. A
|
|
||||||
commit should contain one change. After each change, make a commit with a
|
|
||||||
good one-line summary.
|
|
||||||
|
|
||||||
* NEVER modify the linter config without asking first.
|
|
||||||
|
|
||||||
* NEVER modify tests to exclude special cases or otherwise get them to pass
|
|
||||||
without asking first. In almost all cases, the code should be changed,
|
|
||||||
NOT the tests. If you think the test needs to be changed, make your case
|
|
||||||
for that and ask for permission to proceed, then stop. You need explicit
|
|
||||||
user approval to modify existing tests. (You do not need user approval
|
|
||||||
for writing NEW tests.)
|
|
||||||
|
|
||||||
* When linting, assume the linter config is CORRECT, and that each item
|
|
||||||
output by the linter is something that legitimately needs fixing in the
|
|
||||||
code.
|
|
||||||
|
|
||||||
* When running tests, use `make test`.
|
|
||||||
|
|
||||||
* Before commits, run `make check`. This runs `make lint` and `make test`
|
|
||||||
and `make check-fmt`. Any issues discovered MUST be resolved before
|
|
||||||
committing unless explicitly told otherwise.
|
|
||||||
|
|
||||||
* When fixing a bug, write a failing test for the bug FIRST. Add
|
|
||||||
appropriate logging to the test to ensure it is written correctly. Commit
|
|
||||||
that. Then go about fixing the bug until the test passes (without
|
|
||||||
modifying the test further). Then commit that.
|
|
||||||
|
|
||||||
* When adding a new feature, do the same - implement a test first (TDD). It
|
|
||||||
doesn't have to be super complex. Commit the test, then commit the
|
|
||||||
feature.
|
|
||||||
|
|
||||||
* When adding a new feature, use a feature branch. When the feature is
|
|
||||||
completely finished and the code is up to standards (passes `make check`)
|
|
||||||
then and only then can the feature branch be merged into `main` and the
|
|
||||||
branch deleted.
|
|
||||||
|
|
||||||
* Write godoc documentation comments for all exported types and functions as
|
|
||||||
you go along.
|
|
||||||
|
|
||||||
* ALWAYS be consistent in naming. If you name something one thing in one
|
|
||||||
place, name it the EXACT SAME THING in another place.
|
|
||||||
|
|
||||||
* Be descriptive and specific in naming. `wl` is bad;
|
|
||||||
`SourceHostWhitelist` is good. `ConnsPerHost` is bad;
|
|
||||||
`MaxConnectionsPerHost` is good.
|
|
||||||
|
|
||||||
* This is not prototype or teaching code - this is designed for production.
|
|
||||||
Any security issues (such as denial of service) or other web
|
|
||||||
vulnerabilities are P1 bugs and must be added to TODO.md at the top.
|
|
||||||
|
|
||||||
* As this is production code, no stubbing of implementations unless
|
|
||||||
specifically instructed. We need working implementations.
|
|
||||||
|
|
||||||
* Avoid vendoring deps unless specifically instructed to. NEVER commit
|
|
||||||
the vendor directory, NEVER commit compiled binaries. If these
|
|
||||||
directories or files exist, add them to .gitignore (and commit the
|
|
||||||
.gitignore) if they are not already in there. Keep the entire git
|
|
||||||
repository (with history) small - under 20MiB, unless you specifically
|
|
||||||
must commit larger files (e.g. test fixture example media files). Only
|
|
||||||
OUR source code and immediately supporting files (such as test examples)
|
|
||||||
goes into the repo/history.
|
|
||||||
@@ -51,7 +51,7 @@ type Config struct {
|
|||||||
MaintenanceMode bool
|
MaintenanceMode bool
|
||||||
MetricsUsername string
|
MetricsUsername string
|
||||||
MetricsPassword string
|
MetricsPassword string
|
||||||
SessionSecret string
|
SessionSecret string //nolint:gosec // not a hardcoded credential, loaded from env/file
|
||||||
params *Params
|
params *Params
|
||||||
log *slog.Logger
|
log *slog.Logger
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
-- Initialize migrations table for tracking applied migrations
|
|
||||||
CREATE TABLE IF NOT EXISTS migrations (
|
|
||||||
id INTEGER PRIMARY KEY,
|
|
||||||
name TEXT NOT NULL UNIQUE,
|
|
||||||
applied_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
|
||||||
);
|
|
||||||
@@ -1,8 +1,7 @@
|
|||||||
-- Complete schema for upaas (consolidated)
|
-- Initial schema for upaas
|
||||||
-- This represents the final state of all migrations applied
|
|
||||||
|
|
||||||
-- Users table (single admin user)
|
-- Users table (single admin user)
|
||||||
CREATE TABLE IF NOT EXISTS users (
|
CREATE TABLE users (
|
||||||
id INTEGER PRIMARY KEY,
|
id INTEGER PRIMARY KEY,
|
||||||
username TEXT UNIQUE NOT NULL,
|
username TEXT UNIQUE NOT NULL,
|
||||||
password_hash TEXT NOT NULL,
|
password_hash TEXT NOT NULL,
|
||||||
@@ -10,7 +9,7 @@ CREATE TABLE IF NOT EXISTS users (
|
|||||||
);
|
);
|
||||||
|
|
||||||
-- Apps table
|
-- Apps table
|
||||||
CREATE TABLE IF NOT EXISTS apps (
|
CREATE TABLE apps (
|
||||||
id TEXT PRIMARY KEY,
|
id TEXT PRIMARY KEY,
|
||||||
name TEXT UNIQUE NOT NULL,
|
name TEXT UNIQUE NOT NULL,
|
||||||
repo_url TEXT NOT NULL,
|
repo_url TEXT NOT NULL,
|
||||||
@@ -19,19 +18,18 @@ CREATE TABLE IF NOT EXISTS apps (
|
|||||||
webhook_secret TEXT NOT NULL,
|
webhook_secret TEXT NOT NULL,
|
||||||
ssh_private_key TEXT NOT NULL,
|
ssh_private_key TEXT NOT NULL,
|
||||||
ssh_public_key TEXT NOT NULL,
|
ssh_public_key TEXT NOT NULL,
|
||||||
|
container_id TEXT,
|
||||||
image_id TEXT,
|
image_id TEXT,
|
||||||
previous_image_id TEXT,
|
|
||||||
status TEXT DEFAULT 'pending',
|
status TEXT DEFAULT 'pending',
|
||||||
docker_network TEXT,
|
docker_network TEXT,
|
||||||
ntfy_topic TEXT,
|
ntfy_topic TEXT,
|
||||||
slack_webhook TEXT,
|
slack_webhook TEXT,
|
||||||
webhook_secret_hash TEXT NOT NULL DEFAULT '',
|
|
||||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||||
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||||
);
|
);
|
||||||
|
|
||||||
-- App environment variables
|
-- App environment variables
|
||||||
CREATE TABLE IF NOT EXISTS app_env_vars (
|
CREATE TABLE app_env_vars (
|
||||||
id INTEGER PRIMARY KEY,
|
id INTEGER PRIMARY KEY,
|
||||||
app_id TEXT NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
app_id TEXT NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
||||||
key TEXT NOT NULL,
|
key TEXT NOT NULL,
|
||||||
@@ -40,7 +38,7 @@ CREATE TABLE IF NOT EXISTS app_env_vars (
|
|||||||
);
|
);
|
||||||
|
|
||||||
-- App labels
|
-- App labels
|
||||||
CREATE TABLE IF NOT EXISTS app_labels (
|
CREATE TABLE app_labels (
|
||||||
id INTEGER PRIMARY KEY,
|
id INTEGER PRIMARY KEY,
|
||||||
app_id TEXT NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
app_id TEXT NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
||||||
key TEXT NOT NULL,
|
key TEXT NOT NULL,
|
||||||
@@ -49,7 +47,7 @@ CREATE TABLE IF NOT EXISTS app_labels (
|
|||||||
);
|
);
|
||||||
|
|
||||||
-- App volume mounts
|
-- App volume mounts
|
||||||
CREATE TABLE IF NOT EXISTS app_volumes (
|
CREATE TABLE app_volumes (
|
||||||
id INTEGER PRIMARY KEY,
|
id INTEGER PRIMARY KEY,
|
||||||
app_id TEXT NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
app_id TEXT NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
||||||
host_path TEXT NOT NULL,
|
host_path TEXT NOT NULL,
|
||||||
@@ -57,24 +55,13 @@ CREATE TABLE IF NOT EXISTS app_volumes (
|
|||||||
readonly INTEGER DEFAULT 0
|
readonly INTEGER DEFAULT 0
|
||||||
);
|
);
|
||||||
|
|
||||||
-- App port mappings
|
|
||||||
CREATE TABLE IF NOT EXISTS app_ports (
|
|
||||||
id INTEGER PRIMARY KEY,
|
|
||||||
app_id TEXT NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
|
||||||
host_port INTEGER NOT NULL,
|
|
||||||
container_port INTEGER NOT NULL,
|
|
||||||
protocol TEXT NOT NULL DEFAULT 'tcp' CHECK(protocol IN ('tcp', 'udp')),
|
|
||||||
UNIQUE(host_port, protocol)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Webhook events log
|
-- Webhook events log
|
||||||
CREATE TABLE IF NOT EXISTS webhook_events (
|
CREATE TABLE webhook_events (
|
||||||
id INTEGER PRIMARY KEY,
|
id INTEGER PRIMARY KEY,
|
||||||
app_id TEXT NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
app_id TEXT NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
||||||
event_type TEXT NOT NULL,
|
event_type TEXT NOT NULL,
|
||||||
branch TEXT NOT NULL,
|
branch TEXT NOT NULL,
|
||||||
commit_sha TEXT,
|
commit_sha TEXT,
|
||||||
commit_url TEXT,
|
|
||||||
payload TEXT,
|
payload TEXT,
|
||||||
matched INTEGER NOT NULL,
|
matched INTEGER NOT NULL,
|
||||||
processed INTEGER DEFAULT 0,
|
processed INTEGER DEFAULT 0,
|
||||||
@@ -82,13 +69,13 @@ CREATE TABLE IF NOT EXISTS webhook_events (
|
|||||||
);
|
);
|
||||||
|
|
||||||
-- Deployments log
|
-- Deployments log
|
||||||
CREATE TABLE IF NOT EXISTS deployments (
|
CREATE TABLE deployments (
|
||||||
id INTEGER PRIMARY KEY,
|
id INTEGER PRIMARY KEY,
|
||||||
app_id TEXT NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
app_id TEXT NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
||||||
webhook_event_id INTEGER REFERENCES webhook_events(id),
|
webhook_event_id INTEGER REFERENCES webhook_events(id),
|
||||||
commit_sha TEXT,
|
commit_sha TEXT,
|
||||||
commit_url TEXT,
|
|
||||||
image_id TEXT,
|
image_id TEXT,
|
||||||
|
container_id TEXT,
|
||||||
status TEXT NOT NULL,
|
status TEXT NOT NULL,
|
||||||
logs TEXT,
|
logs TEXT,
|
||||||
started_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
started_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||||
@@ -96,14 +83,12 @@ CREATE TABLE IF NOT EXISTS deployments (
|
|||||||
);
|
);
|
||||||
|
|
||||||
-- Indexes
|
-- Indexes
|
||||||
CREATE INDEX IF NOT EXISTS idx_apps_status ON apps(status);
|
CREATE INDEX idx_apps_status ON apps(status);
|
||||||
CREATE INDEX IF NOT EXISTS idx_apps_webhook_secret ON apps(webhook_secret);
|
CREATE INDEX idx_apps_webhook_secret ON apps(webhook_secret);
|
||||||
CREATE INDEX IF NOT EXISTS idx_apps_webhook_secret_hash ON apps(webhook_secret_hash);
|
CREATE INDEX idx_app_env_vars_app_id ON app_env_vars(app_id);
|
||||||
CREATE INDEX IF NOT EXISTS idx_app_env_vars_app_id ON app_env_vars(app_id);
|
CREATE INDEX idx_app_labels_app_id ON app_labels(app_id);
|
||||||
CREATE INDEX IF NOT EXISTS idx_app_labels_app_id ON app_labels(app_id);
|
CREATE INDEX idx_app_volumes_app_id ON app_volumes(app_id);
|
||||||
CREATE INDEX IF NOT EXISTS idx_app_volumes_app_id ON app_volumes(app_id);
|
CREATE INDEX idx_webhook_events_app_id ON webhook_events(app_id);
|
||||||
CREATE INDEX IF NOT EXISTS idx_app_ports_app_id ON app_ports(app_id);
|
CREATE INDEX idx_webhook_events_created_at ON webhook_events(created_at);
|
||||||
CREATE INDEX IF NOT EXISTS idx_webhook_events_app_id ON webhook_events(app_id);
|
CREATE INDEX idx_deployments_app_id ON deployments(app_id);
|
||||||
CREATE INDEX IF NOT EXISTS idx_webhook_events_created_at ON webhook_events(created_at);
|
CREATE INDEX idx_deployments_started_at ON deployments(started_at);
|
||||||
CREATE INDEX IF NOT EXISTS idx_deployments_app_id ON deployments(app_id);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_deployments_started_at ON deployments(started_at);
|
|
||||||
44
internal/database/migrations/002_remove_container_id.sql
Normal file
44
internal/database/migrations/002_remove_container_id.sql
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
-- Remove container_id from apps table
|
||||||
|
-- Container is now looked up via Docker label (upaas.id) instead of stored in database
|
||||||
|
|
||||||
|
-- SQLite doesn't support DROP COLUMN before version 3.35.0 (2021-03-12)
|
||||||
|
-- Use table rebuild for broader compatibility
|
||||||
|
|
||||||
|
-- Create new table without container_id
|
||||||
|
CREATE TABLE apps_new (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
name TEXT UNIQUE NOT NULL,
|
||||||
|
repo_url TEXT NOT NULL,
|
||||||
|
branch TEXT NOT NULL DEFAULT 'main',
|
||||||
|
dockerfile_path TEXT DEFAULT 'Dockerfile',
|
||||||
|
webhook_secret TEXT NOT NULL,
|
||||||
|
ssh_private_key TEXT NOT NULL,
|
||||||
|
ssh_public_key TEXT NOT NULL,
|
||||||
|
image_id TEXT,
|
||||||
|
status TEXT DEFAULT 'pending',
|
||||||
|
docker_network TEXT,
|
||||||
|
ntfy_topic TEXT,
|
||||||
|
slack_webhook TEXT,
|
||||||
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Copy data (excluding container_id)
|
||||||
|
INSERT INTO apps_new (
|
||||||
|
id, name, repo_url, branch, dockerfile_path, webhook_secret,
|
||||||
|
ssh_private_key, ssh_public_key, image_id, status,
|
||||||
|
docker_network, ntfy_topic, slack_webhook, created_at, updated_at
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
id, name, repo_url, branch, dockerfile_path, webhook_secret,
|
||||||
|
ssh_private_key, ssh_public_key, image_id, status,
|
||||||
|
docker_network, ntfy_topic, slack_webhook, created_at, updated_at
|
||||||
|
FROM apps;
|
||||||
|
|
||||||
|
-- Drop old table and rename new one
|
||||||
|
DROP TABLE apps;
|
||||||
|
ALTER TABLE apps_new RENAME TO apps;
|
||||||
|
|
||||||
|
-- Recreate indexes
|
||||||
|
CREATE INDEX idx_apps_status ON apps(status);
|
||||||
|
CREATE INDEX idx_apps_webhook_secret ON apps(webhook_secret);
|
||||||
12
internal/database/migrations/003_add_ports.sql
Normal file
12
internal/database/migrations/003_add_ports.sql
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
-- Add port mappings for apps
|
||||||
|
|
||||||
|
CREATE TABLE app_ports (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
app_id TEXT NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
||||||
|
host_port INTEGER NOT NULL,
|
||||||
|
container_port INTEGER NOT NULL,
|
||||||
|
protocol TEXT NOT NULL DEFAULT 'tcp' CHECK(protocol IN ('tcp', 'udp')),
|
||||||
|
UNIQUE(host_port, protocol)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_app_ports_app_id ON app_ports(app_id);
|
||||||
3
internal/database/migrations/004_add_commit_url.sql
Normal file
3
internal/database/migrations/004_add_commit_url.sql
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
-- Add commit_url column to webhook_events and deployments tables
|
||||||
|
ALTER TABLE webhook_events ADD COLUMN commit_url TEXT;
|
||||||
|
ALTER TABLE deployments ADD COLUMN commit_url TEXT;
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- Add webhook_secret_hash column for constant-time secret lookup
|
||||||
|
ALTER TABLE apps ADD COLUMN webhook_secret_hash TEXT NOT NULL DEFAULT '';
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- Add previous_image_id to apps for deployment rollback support
|
||||||
|
ALTER TABLE apps ADD COLUMN previous_image_id TEXT;
|
||||||
@@ -76,7 +76,7 @@ func deploymentToAPI(d *models.Deployment) apiDeploymentResponse {
|
|||||||
func (h *Handlers) HandleAPILoginPOST() http.HandlerFunc {
|
func (h *Handlers) HandleAPILoginPOST() http.HandlerFunc {
|
||||||
type loginRequest struct {
|
type loginRequest struct {
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Password string `json:"password"`
|
Password string `json:"password"` //nolint:gosec // request field, not a hardcoded credential
|
||||||
}
|
}
|
||||||
|
|
||||||
type loginResponse struct {
|
type loginResponse struct {
|
||||||
|
|||||||
@@ -499,7 +499,7 @@ func (h *Handlers) HandleAppLogs() http.HandlerFunc {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _ = writer.Write([]byte(logs))
|
_, _ = writer.Write([]byte(logs)) //nolint:gosec // logs are from trusted container output, not user input
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -582,7 +582,7 @@ func (h *Handlers) HandleDeploymentLogDownload() http.HandlerFunc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if file exists
|
// Check if file exists
|
||||||
_, err := os.Stat(logPath)
|
_, err := os.Stat(logPath) //nolint:gosec // logPath is constructed by deploy service, not from user input
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
http.NotFound(writer, request)
|
http.NotFound(writer, request)
|
||||||
|
|
||||||
|
|||||||
@@ -235,9 +235,9 @@ func (m *Middleware) CSRF() func(http.Handler) http.Handler {
|
|||||||
// loginRateLimit configures the login rate limiter.
|
// loginRateLimit configures the login rate limiter.
|
||||||
const (
|
const (
|
||||||
loginRateLimit = rate.Limit(5.0 / 60.0) // 5 requests per 60 seconds
|
loginRateLimit = rate.Limit(5.0 / 60.0) // 5 requests per 60 seconds
|
||||||
loginBurst = 5 // allow burst of 5
|
loginBurst = 5 // allow burst of 5
|
||||||
limiterExpiry = 10 * time.Minute // evict entries not seen in 10 minutes
|
limiterExpiry = 10 * time.Minute // evict entries not seen in 10 minutes
|
||||||
limiterCleanupEvery = 1 * time.Minute // sweep interval
|
limiterCleanupEvery = 1 * time.Minute // sweep interval
|
||||||
)
|
)
|
||||||
|
|
||||||
// ipLimiterEntry stores a rate limiter with its last-seen timestamp.
|
// ipLimiterEntry stores a rate limiter with its last-seen timestamp.
|
||||||
@@ -249,8 +249,8 @@ type ipLimiterEntry struct {
|
|||||||
// ipLimiter tracks per-IP rate limiters for login attempts with automatic
|
// ipLimiter tracks per-IP rate limiters for login attempts with automatic
|
||||||
// eviction of stale entries to prevent unbounded memory growth.
|
// eviction of stale entries to prevent unbounded memory growth.
|
||||||
type ipLimiter struct {
|
type ipLimiter struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
limiters map[string]*ipLimiterEntry
|
limiters map[string]*ipLimiterEntry
|
||||||
lastSweep time.Time
|
lastSweep time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -32,23 +32,23 @@ const (
|
|||||||
type App struct {
|
type App struct {
|
||||||
db *database.Database
|
db *database.Database
|
||||||
|
|
||||||
ID string
|
ID string
|
||||||
Name string
|
Name string
|
||||||
RepoURL string
|
RepoURL string
|
||||||
Branch string
|
Branch string
|
||||||
DockerfilePath string
|
DockerfilePath string
|
||||||
WebhookSecret string
|
WebhookSecret string
|
||||||
WebhookSecretHash string
|
WebhookSecretHash string
|
||||||
SSHPrivateKey string
|
SSHPrivateKey string
|
||||||
SSHPublicKey string
|
SSHPublicKey string
|
||||||
ImageID sql.NullString
|
ImageID sql.NullString
|
||||||
PreviousImageID sql.NullString
|
PreviousImageID sql.NullString
|
||||||
Status AppStatus
|
Status AppStatus
|
||||||
DockerNetwork sql.NullString
|
DockerNetwork sql.NullString
|
||||||
NtfyTopic sql.NullString
|
NtfyTopic sql.NullString
|
||||||
SlackWebhook sql.NullString
|
SlackWebhook sql.NullString
|
||||||
CreatedAt time.Time
|
CreatedAt time.Time
|
||||||
UpdatedAt time.Time
|
UpdatedAt time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewApp creates a new App with a database reference.
|
// NewApp creates a new App with a database reference.
|
||||||
|
|||||||
@@ -54,51 +54,51 @@ func (s *Server) SetupRoutes() {
|
|||||||
r.Group(func(r chi.Router) {
|
r.Group(func(r chi.Router) {
|
||||||
r.Use(s.mw.SessionAuth())
|
r.Use(s.mw.SessionAuth())
|
||||||
|
|
||||||
// Dashboard
|
// Dashboard
|
||||||
r.Get("/", s.handlers.HandleDashboard())
|
r.Get("/", s.handlers.HandleDashboard())
|
||||||
|
|
||||||
// Logout
|
// Logout
|
||||||
r.Post("/logout", s.handlers.HandleLogout())
|
r.Post("/logout", s.handlers.HandleLogout())
|
||||||
|
|
||||||
// App routes
|
// App routes
|
||||||
r.Get("/apps/new", s.handlers.HandleAppNew())
|
r.Get("/apps/new", s.handlers.HandleAppNew())
|
||||||
r.Post("/apps", s.handlers.HandleAppCreate())
|
r.Post("/apps", s.handlers.HandleAppCreate())
|
||||||
r.Get("/apps/{id}", s.handlers.HandleAppDetail())
|
r.Get("/apps/{id}", s.handlers.HandleAppDetail())
|
||||||
r.Get("/apps/{id}/edit", s.handlers.HandleAppEdit())
|
r.Get("/apps/{id}/edit", s.handlers.HandleAppEdit())
|
||||||
r.Post("/apps/{id}", s.handlers.HandleAppUpdate())
|
r.Post("/apps/{id}", s.handlers.HandleAppUpdate())
|
||||||
r.Post("/apps/{id}/delete", s.handlers.HandleAppDelete())
|
r.Post("/apps/{id}/delete", s.handlers.HandleAppDelete())
|
||||||
r.Post("/apps/{id}/deploy", s.handlers.HandleAppDeploy())
|
r.Post("/apps/{id}/deploy", s.handlers.HandleAppDeploy())
|
||||||
r.Post("/apps/{id}/deployments/cancel", s.handlers.HandleCancelDeploy())
|
r.Post("/apps/{id}/deployments/cancel", s.handlers.HandleCancelDeploy())
|
||||||
r.Get("/apps/{id}/deployments", s.handlers.HandleAppDeployments())
|
r.Get("/apps/{id}/deployments", s.handlers.HandleAppDeployments())
|
||||||
r.Get("/apps/{id}/deployments/{deploymentID}/logs", s.handlers.HandleDeploymentLogsAPI())
|
r.Get("/apps/{id}/deployments/{deploymentID}/logs", s.handlers.HandleDeploymentLogsAPI())
|
||||||
r.Get("/apps/{id}/deployments/{deploymentID}/download", s.handlers.HandleDeploymentLogDownload())
|
r.Get("/apps/{id}/deployments/{deploymentID}/download", s.handlers.HandleDeploymentLogDownload())
|
||||||
r.Get("/apps/{id}/logs", s.handlers.HandleAppLogs())
|
r.Get("/apps/{id}/logs", s.handlers.HandleAppLogs())
|
||||||
r.Get("/apps/{id}/container-logs", s.handlers.HandleContainerLogsAPI())
|
r.Get("/apps/{id}/container-logs", s.handlers.HandleContainerLogsAPI())
|
||||||
r.Get("/apps/{id}/status", s.handlers.HandleAppStatusAPI())
|
r.Get("/apps/{id}/status", s.handlers.HandleAppStatusAPI())
|
||||||
r.Get("/apps/{id}/recent-deployments", s.handlers.HandleRecentDeploymentsAPI())
|
r.Get("/apps/{id}/recent-deployments", s.handlers.HandleRecentDeploymentsAPI())
|
||||||
r.Post("/apps/{id}/rollback", s.handlers.HandleAppRollback())
|
r.Post("/apps/{id}/rollback", s.handlers.HandleAppRollback())
|
||||||
r.Post("/apps/{id}/restart", s.handlers.HandleAppRestart())
|
r.Post("/apps/{id}/restart", s.handlers.HandleAppRestart())
|
||||||
r.Post("/apps/{id}/stop", s.handlers.HandleAppStop())
|
r.Post("/apps/{id}/stop", s.handlers.HandleAppStop())
|
||||||
r.Post("/apps/{id}/start", s.handlers.HandleAppStart())
|
r.Post("/apps/{id}/start", s.handlers.HandleAppStart())
|
||||||
|
|
||||||
// Environment variables
|
// Environment variables
|
||||||
r.Post("/apps/{id}/env-vars", s.handlers.HandleEnvVarAdd())
|
r.Post("/apps/{id}/env-vars", s.handlers.HandleEnvVarAdd())
|
||||||
r.Post("/apps/{id}/env-vars/{varID}/edit", s.handlers.HandleEnvVarEdit())
|
r.Post("/apps/{id}/env-vars/{varID}/edit", s.handlers.HandleEnvVarEdit())
|
||||||
r.Post("/apps/{id}/env-vars/{varID}/delete", s.handlers.HandleEnvVarDelete())
|
r.Post("/apps/{id}/env-vars/{varID}/delete", s.handlers.HandleEnvVarDelete())
|
||||||
|
|
||||||
// Labels
|
// Labels
|
||||||
r.Post("/apps/{id}/labels", s.handlers.HandleLabelAdd())
|
r.Post("/apps/{id}/labels", s.handlers.HandleLabelAdd())
|
||||||
r.Post("/apps/{id}/labels/{labelID}/edit", s.handlers.HandleLabelEdit())
|
r.Post("/apps/{id}/labels/{labelID}/edit", s.handlers.HandleLabelEdit())
|
||||||
r.Post("/apps/{id}/labels/{labelID}/delete", s.handlers.HandleLabelDelete())
|
r.Post("/apps/{id}/labels/{labelID}/delete", s.handlers.HandleLabelDelete())
|
||||||
|
|
||||||
// Volumes
|
// Volumes
|
||||||
r.Post("/apps/{id}/volumes", s.handlers.HandleVolumeAdd())
|
r.Post("/apps/{id}/volumes", s.handlers.HandleVolumeAdd())
|
||||||
r.Post("/apps/{id}/volumes/{volumeID}/edit", s.handlers.HandleVolumeEdit())
|
r.Post("/apps/{id}/volumes/{volumeID}/edit", s.handlers.HandleVolumeEdit())
|
||||||
r.Post("/apps/{id}/volumes/{volumeID}/delete", s.handlers.HandleVolumeDelete())
|
r.Post("/apps/{id}/volumes/{volumeID}/delete", s.handlers.HandleVolumeDelete())
|
||||||
|
|
||||||
// Ports
|
// Ports
|
||||||
r.Post("/apps/{id}/ports", s.handlers.HandlePortAdd())
|
r.Post("/apps/{id}/ports", s.handlers.HandlePortAdd())
|
||||||
r.Post("/apps/{id}/ports/{portID}/delete", s.handlers.HandlePortDelete())
|
r.Post("/apps/{id}/ports/{portID}/delete", s.handlers.HandlePortDelete())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ type deploymentLogWriter struct {
|
|||||||
lineBuffer bytes.Buffer // buffer for incomplete lines
|
lineBuffer bytes.Buffer // buffer for incomplete lines
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
flushed sync.WaitGroup // waits for flush goroutine to finish
|
flushed sync.WaitGroup // waits for flush goroutine to finish
|
||||||
flushCtx context.Context //nolint:containedctx // needed for async flush goroutine
|
flushCtx context.Context //nolint:containedctx // needed for async flush goroutine
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -260,7 +260,7 @@ func (svc *Service) sendNtfy(
|
|||||||
request.Header.Set("Title", title)
|
request.Header.Set("Title", title)
|
||||||
request.Header.Set("Priority", svc.ntfyPriority(priority))
|
request.Header.Set("Priority", svc.ntfyPriority(priority))
|
||||||
|
|
||||||
resp, err := svc.client.Do(request)
|
resp, err := svc.client.Do(request) //nolint:gosec // URL constructed from trusted config, not user input
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to send ntfy request: %w", err)
|
return fmt.Errorf("failed to send ntfy request: %w", err)
|
||||||
}
|
}
|
||||||
@@ -352,7 +352,7 @@ func (svc *Service) sendSlack(
|
|||||||
|
|
||||||
request.Header.Set("Content-Type", "application/json")
|
request.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
resp, err := svc.client.Do(request)
|
resp, err := svc.client.Do(request) //nolint:gosec // URL from trusted webhook config
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to send slack request: %w", err)
|
return fmt.Errorf("failed to send slack request: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import (
|
|||||||
|
|
||||||
// KeyPair contains an SSH key pair.
|
// KeyPair contains an SSH key pair.
|
||||||
type KeyPair struct {
|
type KeyPair struct {
|
||||||
PrivateKey string
|
PrivateKey string //nolint:gosec // field name describes SSH key material, not a hardcoded secret
|
||||||
PublicKey string
|
PublicKey string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user