Fix real-time build log streaming and scroll behavior

- Use line-by-line reading for Docker build output instead of io.Copy
  to ensure each log line is written immediately without buffering
- Add isNearBottom() helper to check scroll position before auto-scroll
- Only auto-scroll logs if user was already near bottom (better UX)
- Use requestAnimationFrame for smoother scroll-to-bottom animation
This commit is contained in:
Jeffrey Paul 2025-12-31 14:44:15 -08:00
parent f1cc7d65a6
commit d2f2747ae6
2 changed files with 80 additions and 22 deletions

View File

@ -2,6 +2,7 @@
package docker
import (
"bufio"
"context"
"errors"
"fmt"
@ -489,15 +490,10 @@ func (c *Client) performBuild(
}
}()
// Read build output - write to stdout and optional log writer
var output io.Writer = os.Stdout
if opts.LogWriter != nil {
output = io.MultiWriter(os.Stdout, opts.LogWriter)
}
_, err = io.Copy(output, resp.Body)
// Stream build output line by line for real-time log updates
err = c.streamBuildOutput(resp.Body, opts.LogWriter)
if err != nil {
return "", fmt.Errorf("failed to read build output: %w", err)
return "", err
}
// Get image ID
@ -513,6 +509,41 @@ func (c *Client) performBuild(
return "", nil
}
// scannerInitialBufferSize is the initial buffer size for the build log scanner.
const scannerInitialBufferSize = 64 * 1024 // 64KB
// scannerMaxBufferSize is the max buffer size for build log lines (base64 layers can be large).
const scannerMaxBufferSize = 1024 * 1024 // 1MB
// streamBuildOutput reads Docker build output line by line and writes to stdout and optional log writer.
// Docker sends newline-delimited JSON, so reading line by line ensures each log entry is written immediately.
func (c *Client) streamBuildOutput(body io.Reader, logWriter io.Writer) error {
scanner := bufio.NewScanner(body)
buf := make([]byte, 0, scannerInitialBufferSize)
scanner.Buffer(buf, scannerMaxBufferSize)
newline := []byte{'\n'}
for scanner.Scan() {
line := scanner.Bytes()
// Write to stdout
_, _ = os.Stdout.Write(line)
_, _ = os.Stdout.Write(newline)
// Write to log writer if provided
if logWriter != nil {
_, _ = logWriter.Write(line)
_, _ = logWriter.Write(newline)
}
}
scanErr := scanner.Err()
if scanErr != nil {
return fmt.Errorf("failed to read build output: %w", scanErr)
}
return nil
}
func (c *Client) performClone(ctx context.Context, cfg *cloneConfig) (*CloneResult, error) {
// Create work directory for clone destination
err := os.MkdirAll(cfg.containerDir, workDirPermissions)

View File

@ -59,12 +59,23 @@ document.addEventListener("alpine:init", () => {
return status === "building" || status === "deploying";
},
/**
* Check if element is scrolled near the bottom (within threshold)
*/
isNearBottom(el, threshold = 100) {
if (!el) return true;
return el.scrollHeight - el.scrollTop - el.clientHeight < threshold;
},
/**
* Scroll an element to the bottom
*/
scrollToBottom(el) {
if (el) {
// Use requestAnimationFrame for smoother scrolling after DOM update
requestAnimationFrame(() => {
el.scrollTop = el.scrollHeight;
});
}
},
@ -210,14 +221,18 @@ document.addEventListener("alpine:init", () => {
async fetchContainerLogs() {
try {
const wrapper = this.$refs.containerLogsWrapper;
const wasNearBottom =
Alpine.store("utils").isNearBottom(wrapper);
const res = await fetch(`/apps/${this.appId}/container-logs`);
const data = await res.json();
this.containerLogs = data.logs || "No logs available";
this.containerStatus = data.status;
if (wasNearBottom) {
this.$nextTick(() => {
const wrapper = this.$refs.containerLogsWrapper;
if (wrapper) Alpine.store("utils").scrollToBottom(wrapper);
Alpine.store("utils").scrollToBottom(wrapper);
});
}
} catch (err) {
this.containerLogs = "Failed to fetch logs";
}
@ -226,16 +241,20 @@ document.addEventListener("alpine:init", () => {
async fetchBuildLogs() {
if (!this.currentDeploymentId) return;
try {
const wrapper = this.$refs.buildLogsWrapper;
const wasNearBottom =
Alpine.store("utils").isNearBottom(wrapper);
const res = await fetch(
`/apps/${this.appId}/deployments/${this.currentDeploymentId}/logs`,
);
const data = await res.json();
this.buildLogs = data.logs || "No build logs available";
this.buildStatus = data.status;
if (wasNearBottom) {
this.$nextTick(() => {
const wrapper = this.$refs.buildLogsWrapper;
if (wrapper) Alpine.store("utils").scrollToBottom(wrapper);
Alpine.store("utils").scrollToBottom(wrapper);
});
}
} catch (err) {
this.buildLogs = "Failed to fetch logs";
}
@ -364,16 +383,22 @@ document.addEventListener("alpine:init", () => {
async fetchLiveLogs() {
if (!this.currentDeploymentId || !this.isDeploying) return;
try {
const wrapper = this.$refs.liveLogsWrapper;
const wasNearBottom =
Alpine.store("utils").isNearBottom(wrapper);
const res = await fetch(
`/apps/${this.appId}/deployments/${this.currentDeploymentId}/logs`,
);
const data = await res.json();
this.liveLogs = data.logs || "Waiting for logs...";
this.liveStatus = data.status;
if (wasNearBottom) {
this.$nextTick(() => {
const wrapper = this.$refs.liveLogsWrapper;
if (wrapper) Alpine.store("utils").scrollToBottom(wrapper);
Alpine.store("utils").scrollToBottom(wrapper);
});
}
// Update matching deployment card if present
const card = document.querySelector(
@ -382,11 +407,13 @@ document.addEventListener("alpine:init", () => {
if (card) {
const logsContent = card.querySelector(".logs-content");
const logsWrapper = card.querySelector(".logs-wrapper");
const cardWasNearBottom =
Alpine.store("utils").isNearBottom(logsWrapper);
const statusBadge =
card.querySelector(".deployment-status");
if (logsContent)
logsContent.textContent = data.logs || "Loading...";
if (logsWrapper)
if (logsWrapper && cardWasNearBottom)
Alpine.store("utils").scrollToBottom(logsWrapper);
if (statusBadge) {
statusBadge.className =