Add deployment improvements and UI enhancements

- Clone specific commit SHA from webhook instead of just branch HEAD
- Log webhook payload in deployment logs
- Add build/deploy timing to ntfy and Slack notifications
- Implement container rollback on deploy failure
- Remove old container only after successful deployment
- Show relative times in deployment history (hover for full date)
- Update port mappings UI with labeled text inputs
- Add footer with version info, license, and repo link
- Format deploy key comment as upaas_DATE_appname
This commit is contained in:
2025-12-30 15:05:26 +07:00
parent bc275f7b9c
commit b3ac3c60c2
15 changed files with 1111 additions and 141 deletions

View File

@@ -2,13 +2,16 @@
package deploy
import (
"bytes"
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"log/slog"
"os"
"path/filepath"
"sync"
"time"
"go.uber.org/fx"
@@ -28,14 +31,151 @@ const (
upaasLabelCount = 1
// buildsDirPermissions is the permission mode for the builds directory.
buildsDirPermissions = 0o750
// buildTimeout is the maximum duration for the build phase.
buildTimeout = 30 * time.Minute
// deployTimeout is the maximum duration for the deploy phase (container swap).
deployTimeout = 5 * time.Minute
)
// Sentinel errors for deployment failures.
var (
// ErrContainerUnhealthy indicates the container failed health check.
ErrContainerUnhealthy = errors.New("container unhealthy after 60 seconds")
// ErrDeploymentInProgress indicates another deployment is already running.
ErrDeploymentInProgress = errors.New("deployment already in progress for this app")
// ErrBuildTimeout indicates the build phase exceeded the timeout.
ErrBuildTimeout = errors.New("build timeout exceeded")
// ErrDeployTimeout indicates the deploy phase exceeded the timeout.
ErrDeployTimeout = errors.New("deploy timeout exceeded")
)
// logFlushInterval is how often to flush buffered logs to the database.
const logFlushInterval = time.Second
// dockerLogMessage represents a Docker build log message.
type dockerLogMessage struct {
Stream string `json:"stream"`
Error string `json:"error"`
}
// deploymentLogWriter implements io.Writer and periodically flushes to deployment logs.
// It parses Docker JSON log format and extracts the stream content.
type deploymentLogWriter struct {
deployment *models.Deployment
buffer bytes.Buffer
lineBuffer bytes.Buffer // buffer for incomplete lines
mu sync.Mutex
done chan struct{}
flushCtx context.Context //nolint:containedctx // needed for async flush goroutine
}
func newDeploymentLogWriter(ctx context.Context, deployment *models.Deployment) *deploymentLogWriter {
w := &deploymentLogWriter{
deployment: deployment,
done: make(chan struct{}),
flushCtx: ctx,
}
go w.runFlushLoop()
return w
}
// Write implements io.Writer.
// It parses Docker JSON log format and extracts the stream content.
func (w *deploymentLogWriter) Write(p []byte) (int, error) {
w.mu.Lock()
defer w.mu.Unlock()
// Add incoming data to line buffer
w.lineBuffer.Write(p)
// Process complete lines
data := w.lineBuffer.Bytes()
lastNewline := bytes.LastIndexByte(data, '\n')
if lastNewline == -1 {
// No complete lines yet
return len(p), nil
}
// Process all complete lines
completeData := data[:lastNewline+1]
remaining := data[lastNewline+1:]
for line := range bytes.SplitSeq(completeData, []byte{'\n'}) {
w.processLine(line)
}
// Keep any remaining incomplete line data
w.lineBuffer.Reset()
if len(remaining) > 0 {
w.lineBuffer.Write(remaining)
}
return len(p), nil
}
// Close stops the flush loop and performs a final flush.
func (w *deploymentLogWriter) Close() {
close(w.done)
}
func (w *deploymentLogWriter) runFlushLoop() {
ticker := time.NewTicker(logFlushInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
w.doFlush()
case <-w.done:
w.doFlush()
return
}
}
}
func (w *deploymentLogWriter) doFlush() {
w.mu.Lock()
data := w.buffer.String()
w.buffer.Reset()
w.mu.Unlock()
if data != "" {
_ = w.deployment.AppendLog(w.flushCtx, data)
}
}
// processLine parses a JSON log line and extracts the stream content.
func (w *deploymentLogWriter) processLine(line []byte) {
if len(line) == 0 {
return
}
var msg dockerLogMessage
err := json.Unmarshal(line, &msg)
if err != nil {
// Not valid JSON, write as-is
w.buffer.Write(line)
w.buffer.WriteByte('\n')
return
}
if msg.Error != "" {
w.buffer.WriteString("ERROR: ")
w.buffer.WriteString(msg.Error)
w.buffer.WriteByte('\n')
}
if msg.Stream != "" {
w.buffer.WriteString(msg.Stream)
}
}
// ServiceParams contains dependencies for Service.
type ServiceParams struct {
fx.In
@@ -49,24 +189,35 @@ type ServiceParams struct {
// Service provides deployment functionality.
type Service struct {
log *slog.Logger
db *database.Database
docker *docker.Client
notify *notify.Service
config *config.Config
params *ServiceParams
log *slog.Logger
db *database.Database
docker *docker.Client
notify *notify.Service
config *config.Config
params *ServiceParams
appLocks sync.Map // map[string]*sync.Mutex - per-app deployment locks
}
// New creates a new deploy Service.
func New(_ fx.Lifecycle, params ServiceParams) (*Service, error) {
return &Service{
func New(lc fx.Lifecycle, params ServiceParams) (*Service, error) {
svc := &Service{
log: params.Logger.Get(),
db: params.Database,
docker: params.Docker,
notify: params.Notify,
config: params.Config,
params: &params,
}, nil
}
if lc != nil {
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
return svc.cleanupStuckDeployments(ctx)
},
})
}
return svc, nil
}
// GetBuildDir returns the build directory path for an app.
@@ -80,11 +231,24 @@ func (svc *Service) Deploy(
app *models.App,
webhookEventID *int64,
) error {
deployment, err := svc.createDeploymentRecord(ctx, app, webhookEventID)
// Try to acquire per-app deployment lock
if !svc.tryLockApp(app.ID) {
svc.log.Warn("deployment already in progress", "app", app.Name)
return ErrDeploymentInProgress
}
defer svc.unlockApp(app.ID)
// Fetch webhook event and create deployment record
webhookEvent := svc.fetchWebhookEvent(ctx, webhookEventID)
deployment, err := svc.createDeploymentRecord(ctx, app, webhookEventID, webhookEvent)
if err != nil {
return err
}
svc.logWebhookPayload(ctx, deployment, webhookEvent)
err = svc.updateAppStatusBuilding(ctx, app)
if err != nil {
return err
@@ -92,21 +256,16 @@ func (svc *Service) Deploy(
svc.notify.NotifyBuildStart(ctx, app, deployment)
imageID, err := svc.buildImage(ctx, app, deployment)
// Build phase with timeout
imageID, err := svc.buildImageWithTimeout(ctx, app, deployment)
if err != nil {
return err
}
svc.notify.NotifyBuildSuccess(ctx, app, deployment)
err = svc.updateDeploymentDeploying(ctx, deployment)
if err != nil {
return err
}
svc.removeOldContainer(ctx, app, deployment)
_, err = svc.createAndStartContainer(ctx, app, deployment, imageID)
// Deploy phase with timeout
err = svc.deployContainerWithTimeout(ctx, app, deployment, imageID)
if err != nil {
return err
}
@@ -123,10 +282,162 @@ func (svc *Service) Deploy(
return nil
}
// buildImageWithTimeout runs the build phase with a timeout.
func (svc *Service) buildImageWithTimeout(
ctx context.Context,
app *models.App,
deployment *models.Deployment,
) (string, error) {
buildCtx, cancel := context.WithTimeout(ctx, buildTimeout)
defer cancel()
imageID, err := svc.buildImage(buildCtx, app, deployment)
if err != nil {
if errors.Is(buildCtx.Err(), context.DeadlineExceeded) {
timeoutErr := fmt.Errorf("%w after %v", ErrBuildTimeout, buildTimeout)
svc.failDeployment(ctx, app, deployment, timeoutErr)
return "", timeoutErr
}
return "", err
}
return imageID, nil
}
// deployContainerWithTimeout runs the deploy phase with a timeout.
// It stops the old container, starts the new one, and handles rollback on failure.
func (svc *Service) deployContainerWithTimeout(
ctx context.Context,
app *models.App,
deployment *models.Deployment,
imageID string,
) error {
deployCtx, cancel := context.WithTimeout(ctx, deployTimeout)
defer cancel()
err := svc.updateDeploymentDeploying(deployCtx, deployment)
if err != nil {
return err
}
// Stop old container (but don't remove yet - keep for potential rollback)
oldContainerID := svc.stopOldContainer(deployCtx, app, deployment)
// Try to create and start the new container
_, err = svc.createAndStartContainer(deployCtx, app, deployment, imageID)
if err != nil {
// Rollback: restart the old container if we have one
if oldContainerID != "" {
svc.rollbackContainer(ctx, oldContainerID, deployment)
}
if errors.Is(deployCtx.Err(), context.DeadlineExceeded) {
timeoutErr := fmt.Errorf("%w after %v", ErrDeployTimeout, deployTimeout)
svc.failDeployment(ctx, app, deployment, timeoutErr)
return timeoutErr
}
return err
}
// Success: remove the old container
if oldContainerID != "" {
svc.removeContainer(ctx, oldContainerID, deployment)
}
return nil
}
// cleanupStuckDeployments marks any deployments stuck in building/deploying as failed.
func (svc *Service) cleanupStuckDeployments(ctx context.Context) error {
query := `
UPDATE deployments
SET status = ?, finished_at = ?, logs = COALESCE(logs, '') || ?
WHERE status IN (?, ?)
`
msg := "\n[System] Deployment marked as failed: process was interrupted\n"
_, err := svc.db.DB().ExecContext(
ctx,
query,
models.DeploymentStatusFailed,
time.Now(),
msg,
models.DeploymentStatusBuilding,
models.DeploymentStatusDeploying,
)
if err != nil {
svc.log.Error("failed to cleanup stuck deployments", "error", err)
return fmt.Errorf("failed to cleanup stuck deployments: %w", err)
}
svc.log.Info("cleaned up stuck deployments")
return nil
}
func (svc *Service) getAppLock(appID string) *sync.Mutex {
lock, _ := svc.appLocks.LoadOrStore(appID, &sync.Mutex{})
mu, ok := lock.(*sync.Mutex)
if !ok {
// This should never happen, but handle it gracefully
newMu := &sync.Mutex{}
svc.appLocks.Store(appID, newMu)
return newMu
}
return mu
}
func (svc *Service) tryLockApp(appID string) bool {
return svc.getAppLock(appID).TryLock()
}
func (svc *Service) unlockApp(appID string) {
svc.getAppLock(appID).Unlock()
}
func (svc *Service) fetchWebhookEvent(
ctx context.Context,
webhookEventID *int64,
) *models.WebhookEvent {
if webhookEventID == nil {
return nil
}
event, err := models.FindWebhookEvent(ctx, svc.db, *webhookEventID)
if err != nil {
svc.log.Warn("failed to fetch webhook event", "error", err)
return nil
}
return event
}
func (svc *Service) logWebhookPayload(
ctx context.Context,
deployment *models.Deployment,
webhookEvent *models.WebhookEvent,
) {
if webhookEvent == nil || !webhookEvent.Payload.Valid {
return
}
_ = deployment.AppendLog(ctx, "Webhook received:\n"+webhookEvent.Payload.String+"\n")
}
func (svc *Service) createDeploymentRecord(
ctx context.Context,
app *models.App,
webhookEventID *int64,
webhookEvent *models.WebhookEvent,
) (*models.Deployment, error) {
deployment := models.NewDeployment(svc.db)
deployment.AppID = app.ID
@@ -138,6 +449,11 @@ func (svc *Service) createDeploymentRecord(
}
}
// Set commit SHA from webhook event
if webhookEvent != nil && webhookEvent.CommitSHA.Valid {
deployment.CommitSHA = webhookEvent.CommitSHA
}
deployment.Status = models.DeploymentStatusBuilding
saveErr := deployment.Save(ctx)
@@ -167,7 +483,7 @@ func (svc *Service) buildImage(
app *models.App,
deployment *models.Deployment,
) (string, error) {
tempDir, cleanup, err := svc.cloneRepository(ctx, app, deployment)
workDir, cleanup, err := svc.cloneRepository(ctx, app, deployment)
if err != nil {
return "", err
}
@@ -176,10 +492,17 @@ func (svc *Service) buildImage(
imageTag := "upaas/" + app.Name + ":latest"
// Create log writer that flushes build output to deployment logs every second
logWriter := newDeploymentLogWriter(ctx, deployment)
defer logWriter.Close()
// BuildImage creates a tar archive from the local filesystem,
// so it needs the container path where files exist, not the host path.
imageID, err := svc.docker.BuildImage(ctx, docker.BuildImageOptions{
ContextDir: tempDir,
ContextDir: workDir,
DockerfilePath: app.DockerfilePath,
Tags: []string{imageTag},
LogWriter: logWriter,
})
if err != nil {
svc.notify.NotifyBuildFailed(ctx, app, deployment, err)
@@ -206,7 +529,9 @@ func (svc *Service) cloneRepository(
) (string, func(), error) {
// Use a subdirectory of DataDir for builds since it's mounted from the host
// and accessible to Docker for bind mounts (unlike /tmp inside the container).
// Structure: builds/<appname>/<deployment-id>-<random>
// Structure: builds/<appname>/<deployment-id>-<random>/
// deploy_key <- SSH key for cloning
// work/ <- cloned repository
appBuildsDir := filepath.Join(svc.config.DataDir, "builds", app.Name)
err := os.MkdirAll(appBuildsDir, buildsDirPermissions)
@@ -216,16 +541,37 @@ func (svc *Service) cloneRepository(
return "", nil, fmt.Errorf("failed to create builds dir: %w", err)
}
tempDir, err := os.MkdirTemp(appBuildsDir, fmt.Sprintf("%d-*", deployment.ID))
buildDir, err := os.MkdirTemp(appBuildsDir, fmt.Sprintf("%d-*", deployment.ID))
if err != nil {
svc.failDeployment(ctx, app, deployment, fmt.Errorf("failed to create temp dir: %w", err))
return "", nil, fmt.Errorf("failed to create temp dir: %w", err)
}
cleanup := func() { _ = os.RemoveAll(tempDir) }
cleanup := func() { _ = os.RemoveAll(buildDir) }
cloneErr := svc.docker.CloneRepo(ctx, app.RepoURL, app.Branch, app.SSHPrivateKey, tempDir)
// Calculate the host path for Docker bind mounts.
// When upaas runs in a container, DataDir is the container path but Docker
// needs the host path. HostDataDir provides the host-side equivalent.
// CloneRepo needs both: container path for writing files, host path for Docker mounts.
hostBuildDir := svc.containerToHostPath(buildDir)
// Get commit SHA from deployment if available
var commitSHA string
if deployment.CommitSHA.Valid {
commitSHA = deployment.CommitSHA.String
}
cloneResult, cloneErr := svc.docker.CloneRepo(
ctx,
app.RepoURL,
app.Branch,
commitSHA,
app.SSHPrivateKey,
buildDir,
hostBuildDir,
)
if cloneErr != nil {
cleanup()
svc.failDeployment(ctx, app, deployment, fmt.Errorf("failed to clone repo: %w", cloneErr))
@@ -233,9 +579,38 @@ func (svc *Service) cloneRepository(
return "", nil, fmt.Errorf("failed to clone repo: %w", cloneErr)
}
_ = deployment.AppendLog(ctx, "Repository cloned successfully")
// Log clone success with git output
if commitSHA != "" {
_ = deployment.AppendLog(ctx, "Repository cloned at commit "+commitSHA)
} else {
_ = deployment.AppendLog(ctx, "Repository cloned (branch: "+app.Branch+")")
}
return tempDir, cleanup, nil
if cloneResult != nil && cloneResult.Output != "" {
_ = deployment.AppendLog(ctx, cloneResult.Output)
}
// Return the 'work' subdirectory where the repo was cloned
workDir := filepath.Join(buildDir, "work")
return workDir, cleanup, nil
}
// containerToHostPath converts a container-local path to the equivalent host path.
// This is needed when upaas runs inside a container but needs to pass paths to Docker.
func (svc *Service) containerToHostPath(containerPath string) string {
if svc.config.HostDataDir == svc.config.DataDir {
return containerPath
}
// Get relative path from DataDir
relPath, err := filepath.Rel(svc.config.DataDir, containerPath)
if err != nil {
// Fall back to original path if we can't compute relative path
return containerPath
}
return filepath.Join(svc.config.HostDataDir, relPath)
}
func (svc *Service) updateDeploymentDeploying(
@@ -252,24 +627,68 @@ func (svc *Service) updateDeploymentDeploying(
return nil
}
func (svc *Service) removeOldContainer(
// stopOldContainer stops the old container but keeps it for potential rollback.
// Returns the container ID if found, empty string otherwise.
func (svc *Service) stopOldContainer(
ctx context.Context,
app *models.App,
deployment *models.Deployment,
) {
) string {
containerInfo, err := svc.docker.FindContainerByAppID(ctx, app.ID)
if err != nil || containerInfo == nil {
return ""
}
svc.log.Info("stopping old container", "id", containerInfo.ID)
if containerInfo.Running {
stopErr := svc.docker.StopContainer(ctx, containerInfo.ID)
if stopErr != nil {
svc.log.Warn("failed to stop old container", "error", stopErr)
}
}
_ = deployment.AppendLog(ctx, "Old container stopped: "+containerInfo.ID[:12])
return containerInfo.ID
}
// rollbackContainer restarts the old container after a failed deployment.
func (svc *Service) rollbackContainer(
ctx context.Context,
containerID string,
deployment *models.Deployment,
) {
svc.log.Info("rolling back to old container", "id", containerID)
_ = deployment.AppendLog(ctx, "Rolling back to previous container: "+containerID[:12])
startErr := svc.docker.StartContainer(ctx, containerID)
if startErr != nil {
svc.log.Error("failed to restart old container during rollback", "error", startErr)
_ = deployment.AppendLog(ctx, "ERROR: Failed to rollback: "+startErr.Error())
return
}
svc.log.Info("removing old container", "id", containerInfo.ID)
_ = deployment.AppendLog(ctx, "Rollback successful - previous container restarted")
}
removeErr := svc.docker.RemoveContainer(ctx, containerInfo.ID, true)
// removeContainer removes a container after successful deployment.
func (svc *Service) removeContainer(
ctx context.Context,
containerID string,
deployment *models.Deployment,
) {
svc.log.Info("removing old container", "id", containerID)
removeErr := svc.docker.RemoveContainer(ctx, containerID, true)
if removeErr != nil {
svc.log.Warn("failed to remove old container", "error", removeErr)
return
}
_ = deployment.AppendLog(ctx, "Old container removed")
_ = deployment.AppendLog(ctx, "Old container removed: "+containerID[:12])
}
func (svc *Service) createAndStartContainer(

View File

@@ -27,6 +27,12 @@ const (
httpStatusClientError = 400
)
// Display constants.
const (
shortCommitLength = 12
secondsPerMinute = 60
)
// Sentinel errors for notification failures.
var (
// ErrNtfyFailed indicates the ntfy notification request failed.
@@ -64,10 +70,16 @@ func New(_ fx.Lifecycle, params ServiceParams) (*Service, error) {
func (svc *Service) NotifyBuildStart(
ctx context.Context,
app *models.App,
_ *models.Deployment,
deployment *models.Deployment,
) {
title := "Build started: " + app.Name
message := "Building from branch " + app.Branch
if deployment.CommitSHA.Valid {
shortSHA := deployment.CommitSHA.String[:minInt(shortCommitLength, len(deployment.CommitSHA.String))]
message += " at " + shortSHA
}
svc.sendNotifications(ctx, app, title, message, "info")
}
@@ -75,10 +87,12 @@ func (svc *Service) NotifyBuildStart(
func (svc *Service) NotifyBuildSuccess(
ctx context.Context,
app *models.App,
_ *models.Deployment,
deployment *models.Deployment,
) {
duration := time.Since(deployment.StartedAt)
title := "Build success: " + app.Name
message := "Image built successfully from branch " + app.Branch
message := "Image built successfully in " + formatDuration(duration)
svc.sendNotifications(ctx, app, title, message, "success")
}
@@ -86,11 +100,13 @@ func (svc *Service) NotifyBuildSuccess(
func (svc *Service) NotifyBuildFailed(
ctx context.Context,
app *models.App,
_ *models.Deployment,
deployment *models.Deployment,
buildErr error,
) {
duration := time.Since(deployment.StartedAt)
title := "Build failed: " + app.Name
message := "Build failed: " + buildErr.Error()
message := "Build failed after " + formatDuration(duration) + ": " + buildErr.Error()
svc.sendNotifications(ctx, app, title, message, "error")
}
@@ -98,10 +114,17 @@ func (svc *Service) NotifyBuildFailed(
func (svc *Service) NotifyDeploySuccess(
ctx context.Context,
app *models.App,
_ *models.Deployment,
deployment *models.Deployment,
) {
duration := time.Since(deployment.StartedAt)
title := "Deploy success: " + app.Name
message := "Successfully deployed from branch " + app.Branch
message := "Successfully deployed in " + formatDuration(duration)
if deployment.CommitSHA.Valid {
shortSHA := deployment.CommitSHA.String[:minInt(shortCommitLength, len(deployment.CommitSHA.String))]
message += " (commit " + shortSHA + ")"
}
svc.sendNotifications(ctx, app, title, message, "success")
}
@@ -109,14 +132,37 @@ func (svc *Service) NotifyDeploySuccess(
func (svc *Service) NotifyDeployFailed(
ctx context.Context,
app *models.App,
_ *models.Deployment,
deployment *models.Deployment,
deployErr error,
) {
duration := time.Since(deployment.StartedAt)
title := "Deploy failed: " + app.Name
message := "Deployment failed: " + deployErr.Error()
message := "Deployment failed after " + formatDuration(duration) + ": " + deployErr.Error()
svc.sendNotifications(ctx, app, title, message, "error")
}
// formatDuration formats a duration for display.
func formatDuration(d time.Duration) string {
if d < time.Minute {
return fmt.Sprintf("%ds", int(d.Seconds()))
}
minutes := int(d.Minutes())
seconds := int(d.Seconds()) % secondsPerMinute
return fmt.Sprintf("%dm %ds", minutes, seconds)
}
// minInt returns the smaller of two integers.
func minInt(a, b int) int {
if a < b {
return a
}
return b
}
func (svc *Service) sendNotifications(
ctx context.Context,
app *models.App,
@@ -153,7 +199,7 @@ func (svc *Service) sendNotifications(
// even if the parent context is cancelled.
notifyCtx := context.WithoutCancel(ctx)
slackErr := svc.sendSlack(notifyCtx, slackWebhook, title, message)
slackErr := svc.sendSlack(notifyCtx, slackWebhook, title, message, priority)
if slackErr != nil {
svc.log.Error(
"failed to send slack notification",
@@ -213,6 +259,19 @@ func (svc *Service) ntfyPriority(priority string) string {
}
}
func (svc *Service) slackColor(priority string) string {
switch priority {
case "error":
return "#dc3545" // red
case "success":
return "#28a745" // green
case "info":
return "#17a2b8" // blue
default:
return "#6c757d" // gray
}
}
// SlackPayload represents a Slack webhook payload.
type SlackPayload struct {
Text string `json:"text"`
@@ -228,7 +287,7 @@ type SlackAttachment struct {
func (svc *Service) sendSlack(
ctx context.Context,
webhookURL, title, message string,
webhookURL, title, message, priority string,
) error {
svc.log.Debug(
"sending slack notification",
@@ -239,7 +298,7 @@ func (svc *Service) sendSlack(
payload := SlackPayload{
Attachments: []SlackAttachment{
{
Color: "#36a64f",
Color: svc.slackColor(priority),
Title: title,
Text: message,
},