Compare commits
1 Commits
6522ccea75
...
feature/re
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ee161fb3a3 |
@@ -1,8 +0,0 @@
|
|||||||
.git
|
|
||||||
.gitea
|
|
||||||
*.md
|
|
||||||
LICENSE
|
|
||||||
vaultik
|
|
||||||
coverage.out
|
|
||||||
coverage.html
|
|
||||||
.DS_Store
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
name: check
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [main]
|
|
||||||
pull_request:
|
|
||||||
branches: [main]
|
|
||||||
jobs:
|
|
||||||
check:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
# actions/checkout v4, 2024-09-16
|
|
||||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5
|
|
||||||
- name: Build and check
|
|
||||||
run: docker build .
|
|
||||||
61
Dockerfile
61
Dockerfile
@@ -1,61 +0,0 @@
|
|||||||
# Lint stage
|
|
||||||
# golangci/golangci-lint:v2.11.3-alpine, 2026-03-17
|
|
||||||
FROM golangci/golangci-lint:v2.11.3-alpine@sha256:b1c3de5862ad0a95b4e45a993b0f00415835d687e4f12c845c7493b86c13414e AS lint
|
|
||||||
|
|
||||||
RUN apk add --no-cache make build-base
|
|
||||||
|
|
||||||
WORKDIR /src
|
|
||||||
|
|
||||||
# Copy go mod files first for better layer caching
|
|
||||||
COPY go.mod go.sum ./
|
|
||||||
RUN go mod download
|
|
||||||
|
|
||||||
# Copy source code
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Run formatting check and linter
|
|
||||||
RUN make fmt-check
|
|
||||||
RUN make lint
|
|
||||||
|
|
||||||
# Build stage
|
|
||||||
# golang:1.26.1-alpine, 2026-03-17
|
|
||||||
FROM golang:1.26.1-alpine@sha256:2389ebfa5b7f43eeafbd6be0c3700cc46690ef842ad962f6c5bd6be49ed82039 AS builder
|
|
||||||
|
|
||||||
# Depend on lint stage passing
|
|
||||||
COPY --from=lint /src/go.sum /dev/null
|
|
||||||
|
|
||||||
ARG VERSION=dev
|
|
||||||
|
|
||||||
# Install build dependencies for CGO (mattn/go-sqlite3) and sqlite3 CLI (tests)
|
|
||||||
RUN apk add --no-cache make build-base sqlite
|
|
||||||
|
|
||||||
WORKDIR /src
|
|
||||||
|
|
||||||
# Copy go mod files first for better layer caching
|
|
||||||
COPY go.mod go.sum ./
|
|
||||||
RUN go mod download
|
|
||||||
|
|
||||||
# Copy source code
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Run tests
|
|
||||||
RUN make test
|
|
||||||
|
|
||||||
# Build with CGO enabled (required for mattn/go-sqlite3)
|
|
||||||
RUN CGO_ENABLED=1 go build -ldflags "-X 'git.eeqj.de/sneak/vaultik/internal/globals.Version=${VERSION}' -X 'git.eeqj.de/sneak/vaultik/internal/globals.Commit=$(git rev-parse HEAD 2>/dev/null || echo unknown)'" -o /vaultik ./cmd/vaultik
|
|
||||||
|
|
||||||
# Runtime stage
|
|
||||||
# alpine:3.21, 2026-02-25
|
|
||||||
FROM alpine:3.21@sha256:c3f8e73fdb79deaebaa2037150150191b9dcbfba68b4a46d70103204c53f4709
|
|
||||||
|
|
||||||
RUN apk add --no-cache ca-certificates sqlite
|
|
||||||
|
|
||||||
# Copy binary from builder
|
|
||||||
COPY --from=builder /vaultik /usr/local/bin/vaultik
|
|
||||||
|
|
||||||
# Create non-root user
|
|
||||||
RUN adduser -D -H -s /sbin/nologin vaultik
|
|
||||||
|
|
||||||
USER vaultik
|
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/vaultik"]
|
|
||||||
40
Makefile
40
Makefile
@@ -1,4 +1,4 @@
|
|||||||
.PHONY: test fmt lint fmt-check check build clean all docker hooks
|
.PHONY: test fmt lint build clean all
|
||||||
|
|
||||||
# Version number
|
# Version number
|
||||||
VERSION := 0.0.1
|
VERSION := 0.0.1
|
||||||
@@ -14,12 +14,21 @@ LDFLAGS := -X 'git.eeqj.de/sneak/vaultik/internal/globals.Version=$(VERSION)' \
|
|||||||
all: vaultik
|
all: vaultik
|
||||||
|
|
||||||
# Run tests
|
# Run tests
|
||||||
test:
|
test: lint fmt-check
|
||||||
go test -race -timeout 30s ./...
|
@echo "Running tests..."
|
||||||
|
@if ! go test -v -timeout 10s ./... 2>&1; then \
|
||||||
|
echo ""; \
|
||||||
|
echo "TEST FAILURES DETECTED"; \
|
||||||
|
echo "Run 'go test -v ./internal/database' to see database test details"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
# Check if code is formatted (read-only)
|
# Check if code is formatted
|
||||||
fmt-check:
|
fmt-check:
|
||||||
@test -z "$$(gofmt -l .)" || (echo "Files not formatted:" && gofmt -l . && exit 1)
|
@if [ -n "$$(go fmt ./...)" ]; then \
|
||||||
|
echo "Error: Code is not formatted. Run 'make fmt' to fix."; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
# Format code
|
# Format code
|
||||||
fmt:
|
fmt:
|
||||||
@@ -27,7 +36,7 @@ fmt:
|
|||||||
|
|
||||||
# Run linter
|
# Run linter
|
||||||
lint:
|
lint:
|
||||||
golangci-lint run ./...
|
golangci-lint run
|
||||||
|
|
||||||
# Build binary
|
# Build binary
|
||||||
vaultik: internal/*/*.go cmd/vaultik/*.go
|
vaultik: internal/*/*.go cmd/vaultik/*.go
|
||||||
@@ -38,6 +47,11 @@ clean:
|
|||||||
rm -f vaultik
|
rm -f vaultik
|
||||||
go clean
|
go clean
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
deps:
|
||||||
|
go mod download
|
||||||
|
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||||
|
|
||||||
# Run tests with coverage
|
# Run tests with coverage
|
||||||
test-coverage:
|
test-coverage:
|
||||||
go test -v -coverprofile=coverage.out ./...
|
go test -v -coverprofile=coverage.out ./...
|
||||||
@@ -53,17 +67,3 @@ local:
|
|||||||
|
|
||||||
install: vaultik
|
install: vaultik
|
||||||
cp ./vaultik $(HOME)/bin/
|
cp ./vaultik $(HOME)/bin/
|
||||||
|
|
||||||
# Run all checks (formatting, linting, tests) without modifying files
|
|
||||||
check: fmt-check lint test
|
|
||||||
|
|
||||||
# Build Docker image
|
|
||||||
docker:
|
|
||||||
docker build -t vaultik .
|
|
||||||
|
|
||||||
# Install pre-commit hook
|
|
||||||
hooks:
|
|
||||||
@printf '#!/bin/sh\nset -e\n' > .git/hooks/pre-commit
|
|
||||||
@printf 'go mod tidy\ngo fmt ./...\ngit diff --exit-code -- go.mod go.sum || { echo "go mod tidy changed files; please stage and retry"; exit 1; }\n' >> .git/hooks/pre-commit
|
|
||||||
@printf 'make check\n' >> .git/hooks/pre-commit
|
|
||||||
@chmod +x .git/hooks/pre-commit
|
|
||||||
|
|||||||
2
go.mod
2
go.mod
@@ -1,6 +1,6 @@
|
|||||||
module git.eeqj.de/sneak/vaultik
|
module git.eeqj.de/sneak/vaultik
|
||||||
|
|
||||||
go 1.26.1
|
go 1.24.4
|
||||||
|
|
||||||
require (
|
require (
|
||||||
filippo.io/age v1.2.1
|
filippo.io/age v1.2.1
|
||||||
|
|||||||
@@ -1,64 +0,0 @@
|
|||||||
package blobgen
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/rand"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// testRecipient is a static age recipient for tests.
|
|
||||||
const testRecipient = "age1cplgrwj77ta54dnmydvvmzn64ltk83ankxl5sww04mrtmu62kv3s89gmvv"
|
|
||||||
|
|
||||||
// TestCompressStreamNoDoubleClose is a regression test for issue #28.
|
|
||||||
// It verifies that CompressStream does not panic or return an error due to
|
|
||||||
// double-closing the underlying blobgen.Writer. Before the fix in PR #33,
|
|
||||||
// the explicit Close() on the happy path combined with defer Close() would
|
|
||||||
// cause a double close.
|
|
||||||
func TestCompressStreamNoDoubleClose(t *testing.T) {
|
|
||||||
input := []byte("regression test data for issue #28 double-close fix")
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
written, hash, err := CompressStream(&buf, bytes.NewReader(input), 3, []string{testRecipient})
|
|
||||||
require.NoError(t, err, "CompressStream should not return an error")
|
|
||||||
assert.True(t, written > 0, "expected bytes written > 0")
|
|
||||||
assert.NotEmpty(t, hash, "expected non-empty hash")
|
|
||||||
assert.True(t, buf.Len() > 0, "expected non-empty output")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestCompressStreamLargeInput exercises CompressStream with a larger payload
|
|
||||||
// to ensure no double-close issues surface under heavier I/O.
|
|
||||||
func TestCompressStreamLargeInput(t *testing.T) {
|
|
||||||
data := make([]byte, 512*1024) // 512 KB
|
|
||||||
_, err := rand.Read(data)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
written, hash, err := CompressStream(&buf, bytes.NewReader(data), 3, []string{testRecipient})
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, written > 0)
|
|
||||||
assert.NotEmpty(t, hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestCompressStreamEmptyInput verifies CompressStream handles empty input
|
|
||||||
// without double-close issues.
|
|
||||||
func TestCompressStreamEmptyInput(t *testing.T) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
_, hash, err := CompressStream(&buf, strings.NewReader(""), 3, []string{testRecipient})
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotEmpty(t, hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestCompressDataNoDoubleClose mirrors the stream test for CompressData,
|
|
||||||
// ensuring the explicit Close + error-path Close pattern is also safe.
|
|
||||||
func TestCompressDataNoDoubleClose(t *testing.T) {
|
|
||||||
input := []byte("CompressData regression test for double-close")
|
|
||||||
result, err := CompressData(input, 3, []string{testRecipient})
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, result.CompressedSize > 0)
|
|
||||||
assert.True(t, result.UncompressedSize == int64(len(input)))
|
|
||||||
assert.NotEmpty(t, result.SHA256)
|
|
||||||
}
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
package vaultik
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"filippo.io/age"
|
|
||||||
"git.eeqj.de/sneak/vaultik/internal/blobgen"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FetchAndDecryptBlobResult holds the result of fetching and decrypting a blob.
|
|
||||||
type FetchAndDecryptBlobResult struct {
|
|
||||||
Data []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchAndDecryptBlob downloads a blob, decrypts it, and returns the plaintext data.
|
|
||||||
func (v *Vaultik) FetchAndDecryptBlob(ctx context.Context, blobHash string, expectedSize int64, identity age.Identity) (*FetchAndDecryptBlobResult, error) {
|
|
||||||
rc, _, err := v.FetchBlob(ctx, blobHash, expectedSize)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer func() { _ = rc.Close() }()
|
|
||||||
|
|
||||||
reader, err := blobgen.NewReader(rc, identity)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("creating blob reader: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = reader.Close() }()
|
|
||||||
|
|
||||||
data, err := io.ReadAll(reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("reading blob data: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &FetchAndDecryptBlobResult{Data: data}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchBlob downloads a blob and returns a reader for the encrypted data.
|
|
||||||
func (v *Vaultik) FetchBlob(ctx context.Context, blobHash string, expectedSize int64) (io.ReadCloser, int64, error) {
|
|
||||||
blobPath := fmt.Sprintf("blobs/%s/%s/%s", blobHash[:2], blobHash[2:4], blobHash)
|
|
||||||
|
|
||||||
rc, err := v.Storage.Get(ctx, blobPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, fmt.Errorf("downloading blob %s: %w", blobHash[:16], err)
|
|
||||||
}
|
|
||||||
|
|
||||||
info, err := v.Storage.Stat(ctx, blobPath)
|
|
||||||
if err != nil {
|
|
||||||
_ = rc.Close()
|
|
||||||
return nil, 0, fmt.Errorf("stat blob %s: %w", blobHash[:16], err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return rc, info.Size, nil
|
|
||||||
}
|
|
||||||
@@ -7,6 +7,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// defaultMaxBlobCacheBytes is the default maximum size of the disk blob cache (10 GB).
|
||||||
|
const defaultMaxBlobCacheBytes = 10 << 30 // 10 GiB
|
||||||
|
|
||||||
// blobDiskCacheEntry tracks a cached blob on disk.
|
// blobDiskCacheEntry tracks a cached blob on disk.
|
||||||
type blobDiskCacheEntry struct {
|
type blobDiskCacheEntry struct {
|
||||||
key string
|
key string
|
||||||
|
|||||||
@@ -35,7 +35,6 @@ func (v *Vaultik) PruneBlobs(opts *PruneOptions) error {
|
|||||||
log.Info("Listing remote snapshots")
|
log.Info("Listing remote snapshots")
|
||||||
objectCh := v.Storage.ListStream(v.ctx, "metadata/")
|
objectCh := v.Storage.ListStream(v.ctx, "metadata/")
|
||||||
|
|
||||||
seen := make(map[string]bool)
|
|
||||||
var snapshotIDs []string
|
var snapshotIDs []string
|
||||||
for object := range objectCh {
|
for object := range objectCh {
|
||||||
if object.Err != nil {
|
if object.Err != nil {
|
||||||
@@ -48,8 +47,15 @@ func (v *Vaultik) PruneBlobs(opts *PruneOptions) error {
|
|||||||
// Check if this is a directory by looking for trailing slash
|
// Check if this is a directory by looking for trailing slash
|
||||||
if strings.HasSuffix(object.Key, "/") || strings.Contains(object.Key, "/manifest.json.zst") {
|
if strings.HasSuffix(object.Key, "/") || strings.Contains(object.Key, "/manifest.json.zst") {
|
||||||
snapshotID := parts[1]
|
snapshotID := parts[1]
|
||||||
if !seen[snapshotID] {
|
// Only add unique snapshot IDs
|
||||||
seen[snapshotID] = true
|
found := false
|
||||||
|
for _, id := range snapshotIDs {
|
||||||
|
if id == snapshotID {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
snapshotIDs = append(snapshotIDs, snapshotID)
|
snapshotIDs = append(snapshotIDs, snapshotID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,13 +22,6 @@ import (
|
|||||||
"golang.org/x/term"
|
"golang.org/x/term"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
// progressBarWidth is the character width of the progress bar display.
|
|
||||||
progressBarWidth = 40
|
|
||||||
// progressBarThrottle is the minimum interval between progress bar redraws.
|
|
||||||
progressBarThrottle = 100 * time.Millisecond
|
|
||||||
)
|
|
||||||
|
|
||||||
// RestoreOptions contains options for the restore operation
|
// RestoreOptions contains options for the restore operation
|
||||||
type RestoreOptions struct {
|
type RestoreOptions struct {
|
||||||
SnapshotID string
|
SnapshotID string
|
||||||
@@ -116,47 +109,75 @@ func (v *Vaultik) Restore(opts *RestoreOptions) error {
|
|||||||
|
|
||||||
// Step 5: Restore files
|
// Step 5: Restore files
|
||||||
result := &RestoreResult{}
|
result := &RestoreResult{}
|
||||||
blobCache, err := newBlobDiskCache(4 * v.Config.BlobSizeLimit.Int64())
|
blobCache, err := newBlobDiskCache(defaultMaxBlobCacheBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("creating blob cache: %w", err)
|
return fmt.Errorf("creating blob cache: %w", err)
|
||||||
}
|
}
|
||||||
defer func() { _ = blobCache.Close() }()
|
defer func() { _ = blobCache.Close() }()
|
||||||
|
|
||||||
// Calculate total bytes for progress bar
|
// Calculate total bytes for progress bar
|
||||||
var totalBytesExpected int64
|
var totalBytes int64
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
totalBytesExpected += file.Size
|
totalBytes += file.Size
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create progress bar if output is a terminal
|
_, _ = fmt.Fprintf(v.Stdout, "Restoring %d files (%s)...\n",
|
||||||
bar := v.newProgressBar("Restoring", totalBytesExpected)
|
len(files),
|
||||||
|
humanize.Bytes(uint64(totalBytes)),
|
||||||
|
)
|
||||||
|
|
||||||
for i, file := range files {
|
// Create progress bar if stderr is a terminal
|
||||||
|
isTTY := isTerminal(v.Stderr)
|
||||||
|
var bar *progressbar.ProgressBar
|
||||||
|
if isTTY {
|
||||||
|
bar = progressbar.NewOptions64(
|
||||||
|
totalBytes,
|
||||||
|
progressbar.OptionSetDescription("Restoring"),
|
||||||
|
progressbar.OptionSetWriter(v.Stderr),
|
||||||
|
progressbar.OptionShowBytes(true),
|
||||||
|
progressbar.OptionShowCount(),
|
||||||
|
progressbar.OptionSetWidth(40),
|
||||||
|
progressbar.OptionThrottle(100*time.Millisecond),
|
||||||
|
progressbar.OptionOnCompletion(func() {
|
||||||
|
v.printlnStderr()
|
||||||
|
}),
|
||||||
|
progressbar.OptionSetRenderBlankState(true),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
filesProcessed := 0
|
||||||
|
for _, file := range files {
|
||||||
if v.ctx.Err() != nil {
|
if v.ctx.Err() != nil {
|
||||||
return v.ctx.Err()
|
return v.ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := v.restoreFile(v.ctx, repos, file, opts.TargetDir, identity, chunkToBlobMap, blobCache, result); err != nil {
|
if err := v.restoreFile(v.ctx, repos, file, opts.TargetDir, identity, chunkToBlobMap, blobCache, result); err != nil {
|
||||||
log.Error("Failed to restore file", "path", file.Path, "error", err)
|
log.Error("Failed to restore file", "path", file.Path, "error", err)
|
||||||
result.FilesFailed++
|
filesProcessed++
|
||||||
result.FailedFiles = append(result.FailedFiles, file.Path.String())
|
|
||||||
// Update progress bar even on failure
|
// Update progress bar even on failure
|
||||||
if bar != nil {
|
if bar != nil {
|
||||||
_ = bar.Add64(file.Size)
|
_ = bar.Add64(file.Size)
|
||||||
}
|
}
|
||||||
|
// Periodic structured log for non-terminal contexts (headless/CI)
|
||||||
|
if !isTTY && filesProcessed%100 == 0 {
|
||||||
|
log.Info("Restore progress",
|
||||||
|
"files", fmt.Sprintf("%d/%d", filesProcessed, len(files)),
|
||||||
|
"bytes_restored", humanize.Bytes(uint64(result.BytesRestored)),
|
||||||
|
)
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
filesProcessed++
|
||||||
// Update progress bar
|
// Update progress bar
|
||||||
if bar != nil {
|
if bar != nil {
|
||||||
_ = bar.Add64(file.Size)
|
_ = bar.Add64(file.Size)
|
||||||
}
|
}
|
||||||
|
// Periodic structured log for non-terminal contexts (headless/CI)
|
||||||
// Progress logging (for non-terminal or structured logs)
|
if !isTTY && (filesProcessed%100 == 0 || filesProcessed == len(files)) {
|
||||||
if (i+1)%100 == 0 || i+1 == len(files) {
|
|
||||||
log.Info("Restore progress",
|
log.Info("Restore progress",
|
||||||
"files", fmt.Sprintf("%d/%d", i+1, len(files)),
|
"files", fmt.Sprintf("%d/%d", filesProcessed, len(files)),
|
||||||
"bytes", humanize.Bytes(uint64(result.BytesRestored)),
|
"bytes_restored", humanize.Bytes(uint64(result.BytesRestored)),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -181,13 +202,6 @@ func (v *Vaultik) Restore(opts *RestoreOptions) error {
|
|||||||
result.Duration.Round(time.Second),
|
result.Duration.Round(time.Second),
|
||||||
)
|
)
|
||||||
|
|
||||||
if result.FilesFailed > 0 {
|
|
||||||
_, _ = fmt.Fprintf(v.Stdout, "\nWARNING: %d file(s) failed to restore:\n", result.FilesFailed)
|
|
||||||
for _, path := range result.FailedFiles {
|
|
||||||
_, _ = fmt.Fprintf(v.Stdout, " - %s\n", path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run verification if requested
|
// Run verification if requested
|
||||||
if opts.Verify {
|
if opts.Verify {
|
||||||
if err := v.verifyRestoredFiles(v.ctx, repos, files, opts.TargetDir, result); err != nil {
|
if err := v.verifyRestoredFiles(v.ctx, repos, files, opts.TargetDir, result); err != nil {
|
||||||
@@ -208,10 +222,6 @@ func (v *Vaultik) Restore(opts *RestoreOptions) error {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.FilesFailed > 0 {
|
|
||||||
return fmt.Errorf("%d file(s) failed to restore", result.FilesFailed)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -520,6 +530,53 @@ func (v *Vaultik) restoreRegularFile(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BlobFetchResult holds the result of fetching and decrypting a blob.
|
||||||
|
type BlobFetchResult struct {
|
||||||
|
Data []byte
|
||||||
|
CompressedSize int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchAndDecryptBlob downloads a blob from storage, decrypts and decompresses it.
|
||||||
|
func (v *Vaultik) FetchAndDecryptBlob(ctx context.Context, blobHash string, expectedSize int64, identity age.Identity) (*BlobFetchResult, error) {
|
||||||
|
// Construct blob path with sharding
|
||||||
|
blobPath := fmt.Sprintf("blobs/%s/%s/%s", blobHash[:2], blobHash[2:4], blobHash)
|
||||||
|
|
||||||
|
reader, err := v.Storage.Get(ctx, blobPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("downloading blob: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = reader.Close() }()
|
||||||
|
|
||||||
|
// Read encrypted data
|
||||||
|
encryptedData, err := io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("reading blob data: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt and decompress
|
||||||
|
blobReader, err := blobgen.NewReader(bytes.NewReader(encryptedData), identity)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("creating decryption reader: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = blobReader.Close() }()
|
||||||
|
|
||||||
|
data, err := io.ReadAll(blobReader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("decrypting blob: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("Downloaded and decrypted blob",
|
||||||
|
"hash", blobHash[:16],
|
||||||
|
"encrypted_size", humanize.Bytes(uint64(len(encryptedData))),
|
||||||
|
"decrypted_size", humanize.Bytes(uint64(len(data))),
|
||||||
|
)
|
||||||
|
|
||||||
|
return &BlobFetchResult{
|
||||||
|
Data: data,
|
||||||
|
CompressedSize: int64(len(encryptedData)),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// downloadBlob downloads and decrypts a blob
|
// downloadBlob downloads and decrypts a blob
|
||||||
func (v *Vaultik) downloadBlob(ctx context.Context, blobHash string, expectedSize int64, identity age.Identity) ([]byte, error) {
|
func (v *Vaultik) downloadBlob(ctx context.Context, blobHash string, expectedSize int64, identity age.Identity) ([]byte, error) {
|
||||||
result, err := v.FetchAndDecryptBlob(ctx, blobHash, expectedSize, identity)
|
result, err := v.FetchAndDecryptBlob(ctx, blobHash, expectedSize, identity)
|
||||||
@@ -564,7 +621,22 @@ func (v *Vaultik) verifyRestoredFiles(
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Create progress bar if output is a terminal
|
// Create progress bar if output is a terminal
|
||||||
bar := v.newProgressBar("Verifying", totalBytes)
|
var bar *progressbar.ProgressBar
|
||||||
|
if isTerminal(v.Stderr) {
|
||||||
|
bar = progressbar.NewOptions64(
|
||||||
|
totalBytes,
|
||||||
|
progressbar.OptionSetDescription("Verifying"),
|
||||||
|
progressbar.OptionSetWriter(v.Stderr),
|
||||||
|
progressbar.OptionShowBytes(true),
|
||||||
|
progressbar.OptionShowCount(),
|
||||||
|
progressbar.OptionSetWidth(40),
|
||||||
|
progressbar.OptionThrottle(100*time.Millisecond),
|
||||||
|
progressbar.OptionOnCompletion(func() {
|
||||||
|
v.printfStderr("\n")
|
||||||
|
}),
|
||||||
|
progressbar.OptionSetRenderBlankState(true),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// Verify each file
|
// Verify each file
|
||||||
for _, file := range regularFiles {
|
for _, file := range regularFiles {
|
||||||
@@ -658,37 +730,11 @@ func (v *Vaultik) verifyFile(
|
|||||||
return bytesVerified, nil
|
return bytesVerified, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newProgressBar creates a terminal-aware progress bar with standard options.
|
// isTerminal returns true if the given writer is connected to a terminal.
|
||||||
// It returns nil if stdout is not a terminal.
|
// Returns false if the writer does not expose a file descriptor (e.g. in tests).
|
||||||
func (v *Vaultik) newProgressBar(description string, total int64) *progressbar.ProgressBar {
|
func isTerminal(w io.Writer) bool {
|
||||||
if !v.isTerminal() {
|
if f, ok := w.(*os.File); ok {
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return progressbar.NewOptions64(
|
|
||||||
total,
|
|
||||||
progressbar.OptionSetDescription(description),
|
|
||||||
progressbar.OptionSetWriter(v.Stderr),
|
|
||||||
progressbar.OptionShowBytes(true),
|
|
||||||
progressbar.OptionShowCount(),
|
|
||||||
progressbar.OptionSetWidth(progressBarWidth),
|
|
||||||
progressbar.OptionThrottle(progressBarThrottle),
|
|
||||||
progressbar.OptionOnCompletion(func() {
|
|
||||||
v.printfStderr("\n")
|
|
||||||
}),
|
|
||||||
progressbar.OptionSetRenderBlankState(true),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// isTerminal returns true if stdout is a terminal.
|
|
||||||
// It checks whether v.Stdout implements Fd() (i.e. is an *os.File),
|
|
||||||
// and falls back to false for non-file writers (e.g. in tests).
|
|
||||||
func (v *Vaultik) isTerminal() bool {
|
|
||||||
type fder interface {
|
|
||||||
Fd() uintptr
|
|
||||||
}
|
|
||||||
f, ok := v.Stdout.(fder)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return term.IsTerminal(int(f.Fd()))
|
return term.IsTerminal(int(f.Fd()))
|
||||||
}
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|||||||
@@ -90,24 +90,6 @@ func (v *Vaultik) CreateSnapshot(opts *SnapshotCreateOptions) error {
|
|||||||
v.printfStdout("\nAll %d snapshots completed in %s\n", len(snapshotNames), time.Since(overallStartTime).Round(time.Second))
|
v.printfStdout("\nAll %d snapshots completed in %s\n", len(snapshotNames), time.Since(overallStartTime).Round(time.Second))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prune old snapshots and unreferenced blobs if --prune was specified
|
|
||||||
if opts.Prune {
|
|
||||||
log.Info("Pruning enabled - deleting old snapshots and unreferenced blobs")
|
|
||||||
v.printlnStdout("\nPruning old snapshots (keeping latest)...")
|
|
||||||
|
|
||||||
if err := v.PurgeSnapshots(true, "", true); err != nil {
|
|
||||||
return fmt.Errorf("prune: purging old snapshots: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.printlnStdout("Pruning unreferenced blobs...")
|
|
||||||
|
|
||||||
if err := v.PruneBlobs(&PruneOptions{Force: true}); err != nil {
|
|
||||||
return fmt.Errorf("prune: removing unreferenced blobs: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("Pruning complete")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -324,6 +306,11 @@ func (v *Vaultik) createNamedSnapshot(opts *SnapshotCreateOptions, hostname, sna
|
|||||||
}
|
}
|
||||||
v.printfStdout("Duration: %s\n", formatDuration(snapshotDuration))
|
v.printfStdout("Duration: %s\n", formatDuration(snapshotDuration))
|
||||||
|
|
||||||
|
if opts.Prune {
|
||||||
|
log.Info("Pruning enabled - will delete old snapshots after snapshot")
|
||||||
|
// TODO: Implement pruning
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -913,7 +900,6 @@ func (v *Vaultik) RemoveAllSnapshots(opts *RemoveOptions) (*RemoveResult, error)
|
|||||||
log.Info("Listing all snapshots")
|
log.Info("Listing all snapshots")
|
||||||
objectCh := v.Storage.ListStream(v.ctx, "metadata/")
|
objectCh := v.Storage.ListStream(v.ctx, "metadata/")
|
||||||
|
|
||||||
seen := make(map[string]bool)
|
|
||||||
var snapshotIDs []string
|
var snapshotIDs []string
|
||||||
for object := range objectCh {
|
for object := range objectCh {
|
||||||
if object.Err != nil {
|
if object.Err != nil {
|
||||||
@@ -928,8 +914,14 @@ func (v *Vaultik) RemoveAllSnapshots(opts *RemoveOptions) (*RemoveResult, error)
|
|||||||
}
|
}
|
||||||
if strings.HasSuffix(object.Key, "/") || strings.Contains(object.Key, "/manifest.json.zst") {
|
if strings.HasSuffix(object.Key, "/") || strings.Contains(object.Key, "/manifest.json.zst") {
|
||||||
sid := parts[1]
|
sid := parts[1]
|
||||||
if !seen[sid] {
|
found := false
|
||||||
seen[sid] = true
|
for _, id := range snapshotIDs {
|
||||||
|
if id == sid {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
snapshotIDs = append(snapshotIDs, sid)
|
snapshotIDs = append(snapshotIDs, sid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1012,16 +1004,16 @@ func (v *Vaultik) deleteSnapshotFromLocalDB(snapshotID string) error {
|
|||||||
|
|
||||||
// Delete related records first to avoid foreign key constraints
|
// Delete related records first to avoid foreign key constraints
|
||||||
if err := v.Repositories.Snapshots.DeleteSnapshotFiles(v.ctx, snapshotID); err != nil {
|
if err := v.Repositories.Snapshots.DeleteSnapshotFiles(v.ctx, snapshotID); err != nil {
|
||||||
return fmt.Errorf("deleting snapshot files for %s: %w", snapshotID, err)
|
log.Error("Failed to delete snapshot files", "snapshot_id", snapshotID, "error", err)
|
||||||
}
|
}
|
||||||
if err := v.Repositories.Snapshots.DeleteSnapshotBlobs(v.ctx, snapshotID); err != nil {
|
if err := v.Repositories.Snapshots.DeleteSnapshotBlobs(v.ctx, snapshotID); err != nil {
|
||||||
return fmt.Errorf("deleting snapshot blobs for %s: %w", snapshotID, err)
|
log.Error("Failed to delete snapshot blobs", "snapshot_id", snapshotID, "error", err)
|
||||||
}
|
}
|
||||||
if err := v.Repositories.Snapshots.DeleteSnapshotUploads(v.ctx, snapshotID); err != nil {
|
if err := v.Repositories.Snapshots.DeleteSnapshotUploads(v.ctx, snapshotID); err != nil {
|
||||||
return fmt.Errorf("deleting snapshot uploads for %s: %w", snapshotID, err)
|
log.Error("Failed to delete snapshot uploads", "snapshot_id", snapshotID, "error", err)
|
||||||
}
|
}
|
||||||
if err := v.Repositories.Snapshots.Delete(v.ctx, snapshotID); err != nil {
|
if err := v.Repositories.Snapshots.Delete(v.ctx, snapshotID); err != nil {
|
||||||
return fmt.Errorf("deleting snapshot record %s: %w", snapshotID, err)
|
log.Error("Failed to delete snapshot record", "snapshot_id", snapshotID, "error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -1,23 +0,0 @@
|
|||||||
package vaultik
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestSnapshotCreateOptions_PruneFlag verifies the Prune field exists on
|
|
||||||
// SnapshotCreateOptions and can be set.
|
|
||||||
func TestSnapshotCreateOptions_PruneFlag(t *testing.T) {
|
|
||||||
opts := &SnapshotCreateOptions{
|
|
||||||
Prune: true,
|
|
||||||
}
|
|
||||||
if !opts.Prune {
|
|
||||||
t.Error("Expected Prune to be true")
|
|
||||||
}
|
|
||||||
|
|
||||||
opts2 := &SnapshotCreateOptions{
|
|
||||||
Prune: false,
|
|
||||||
}
|
|
||||||
if opts2.Prune {
|
|
||||||
t.Error("Expected Prune to be false")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -129,7 +129,7 @@ func (v *Vaultik) GetFilesystem() afero.Fs {
|
|||||||
return v.Fs
|
return v.Fs
|
||||||
}
|
}
|
||||||
|
|
||||||
// printfStdout writes formatted output to stdout.
|
// printfStdout writes formatted output to stdout for user-facing messages.
|
||||||
func (v *Vaultik) printfStdout(format string, args ...any) {
|
func (v *Vaultik) printfStdout(format string, args ...any) {
|
||||||
_, _ = fmt.Fprintf(v.Stdout, format, args...)
|
_, _ = fmt.Fprintf(v.Stdout, format, args...)
|
||||||
}
|
}
|
||||||
@@ -139,11 +139,28 @@ func (v *Vaultik) printlnStdout(args ...any) {
|
|||||||
_, _ = fmt.Fprintln(v.Stdout, args...)
|
_, _ = fmt.Fprintln(v.Stdout, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FetchBlob downloads a blob from storage and returns a reader for the encrypted data.
|
||||||
|
func (v *Vaultik) FetchBlob(ctx context.Context, blobHash string, expectedSize int64) (io.ReadCloser, int64, error) {
|
||||||
|
blobPath := fmt.Sprintf("blobs/%s/%s/%s", blobHash[:2], blobHash[2:4], blobHash)
|
||||||
|
|
||||||
|
reader, err := v.Storage.Get(ctx, blobPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("downloading blob: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return reader, expectedSize, nil
|
||||||
|
}
|
||||||
|
|
||||||
// printfStderr writes formatted output to stderr.
|
// printfStderr writes formatted output to stderr.
|
||||||
func (v *Vaultik) printfStderr(format string, args ...any) {
|
func (v *Vaultik) printfStderr(format string, args ...any) {
|
||||||
_, _ = fmt.Fprintf(v.Stderr, format, args...)
|
_, _ = fmt.Fprintf(v.Stderr, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// printlnStderr writes a line to stderr.
|
||||||
|
func (v *Vaultik) printlnStderr(args ...any) {
|
||||||
|
_, _ = fmt.Fprintln(v.Stderr, args...)
|
||||||
|
}
|
||||||
|
|
||||||
// scanStdin reads a line of input from stdin.
|
// scanStdin reads a line of input from stdin.
|
||||||
func (v *Vaultik) scanStdin(a ...any) (int, error) {
|
func (v *Vaultik) scanStdin(a ...any) (int, error) {
|
||||||
return fmt.Fscanln(v.Stdin, a...)
|
return fmt.Fscanln(v.Stdin, a...)
|
||||||
|
|||||||
Reference in New Issue
Block a user