Compare commits
25 Commits
fix/issue-
...
c76a357570
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c76a357570 | ||
| c24e7e6360 | |||
| 7a5943958d | |||
| d8a51804d2 | |||
| 76f4421eb3 | |||
| 53ac868c5d | |||
| 8c4ea2b870 | |||
| 597b560398 | |||
| 1e2eced092 | |||
| 815b35c7ae | |||
| 9c66674683 | |||
| 49de277648 | |||
| ed5d777d05 | |||
| 76e047bbb2 | |||
| 2e7356dd85 | |||
| 70d4fe2aa0 | |||
|
|
2f249e3ddd | ||
|
|
3f834f1c9c | ||
|
|
9879668c31 | ||
|
|
0a0d9f33b0 | ||
| df0e8c275b | |||
|
|
ddc23f8057 | ||
| cafb3d45b8 | |||
|
|
d77ac18aaa | ||
| 825f25da58 |
8
.dockerignore
Normal file
8
.dockerignore
Normal file
@@ -0,0 +1,8 @@
|
||||
.git
|
||||
.gitea
|
||||
*.md
|
||||
LICENSE
|
||||
vaultik
|
||||
coverage.out
|
||||
coverage.html
|
||||
.DS_Store
|
||||
14
.gitea/workflows/check.yml
Normal file
14
.gitea/workflows/check.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
name: check
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# actions/checkout v4, 2024-09-16
|
||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5
|
||||
- name: Build and check
|
||||
run: docker build .
|
||||
61
Dockerfile
Normal file
61
Dockerfile
Normal file
@@ -0,0 +1,61 @@
|
||||
# Lint stage
|
||||
# golangci/golangci-lint:v2.11.3-alpine, 2026-03-17
|
||||
FROM golangci/golangci-lint:v2.11.3-alpine@sha256:b1c3de5862ad0a95b4e45a993b0f00415835d687e4f12c845c7493b86c13414e AS lint
|
||||
|
||||
RUN apk add --no-cache make build-base
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
# Copy go mod files first for better layer caching
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Run formatting check and linter
|
||||
RUN make fmt-check
|
||||
RUN make lint
|
||||
|
||||
# Build stage
|
||||
# golang:1.26.1-alpine, 2026-03-17
|
||||
FROM golang:1.26.1-alpine@sha256:2389ebfa5b7f43eeafbd6be0c3700cc46690ef842ad962f6c5bd6be49ed82039 AS builder
|
||||
|
||||
# Depend on lint stage passing
|
||||
COPY --from=lint /src/go.sum /dev/null
|
||||
|
||||
ARG VERSION=dev
|
||||
|
||||
# Install build dependencies for CGO (mattn/go-sqlite3) and sqlite3 CLI (tests)
|
||||
RUN apk add --no-cache make build-base sqlite
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
# Copy go mod files first for better layer caching
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Run tests
|
||||
RUN make test
|
||||
|
||||
# Build with CGO enabled (required for mattn/go-sqlite3)
|
||||
RUN CGO_ENABLED=1 go build -ldflags "-X 'git.eeqj.de/sneak/vaultik/internal/globals.Version=${VERSION}' -X 'git.eeqj.de/sneak/vaultik/internal/globals.Commit=$(git rev-parse HEAD 2>/dev/null || echo unknown)'" -o /vaultik ./cmd/vaultik
|
||||
|
||||
# Runtime stage
|
||||
# alpine:3.21, 2026-02-25
|
||||
FROM alpine:3.21@sha256:c3f8e73fdb79deaebaa2037150150191b9dcbfba68b4a46d70103204c53f4709
|
||||
|
||||
RUN apk add --no-cache ca-certificates sqlite
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /vaultik /usr/local/bin/vaultik
|
||||
|
||||
# Create non-root user
|
||||
RUN adduser -D -H -s /sbin/nologin vaultik
|
||||
|
||||
USER vaultik
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/vaultik"]
|
||||
40
Makefile
40
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: test fmt lint build clean all
|
||||
.PHONY: test fmt lint fmt-check check build clean all docker hooks
|
||||
|
||||
# Version number
|
||||
VERSION := 0.0.1
|
||||
@@ -14,21 +14,12 @@ LDFLAGS := -X 'git.eeqj.de/sneak/vaultik/internal/globals.Version=$(VERSION)' \
|
||||
all: vaultik
|
||||
|
||||
# Run tests
|
||||
test: lint fmt-check
|
||||
@echo "Running tests..."
|
||||
@if ! go test -v -timeout 10s ./... 2>&1; then \
|
||||
echo ""; \
|
||||
echo "TEST FAILURES DETECTED"; \
|
||||
echo "Run 'go test -v ./internal/database' to see database test details"; \
|
||||
exit 1; \
|
||||
fi
|
||||
test:
|
||||
go test -race -timeout 30s ./...
|
||||
|
||||
# Check if code is formatted
|
||||
# Check if code is formatted (read-only)
|
||||
fmt-check:
|
||||
@if [ -n "$$(go fmt ./...)" ]; then \
|
||||
echo "Error: Code is not formatted. Run 'make fmt' to fix."; \
|
||||
exit 1; \
|
||||
fi
|
||||
@test -z "$$(gofmt -l .)" || (echo "Files not formatted:" && gofmt -l . && exit 1)
|
||||
|
||||
# Format code
|
||||
fmt:
|
||||
@@ -36,7 +27,7 @@ fmt:
|
||||
|
||||
# Run linter
|
||||
lint:
|
||||
golangci-lint run
|
||||
golangci-lint run ./...
|
||||
|
||||
# Build binary
|
||||
vaultik: internal/*/*.go cmd/vaultik/*.go
|
||||
@@ -47,11 +38,6 @@ clean:
|
||||
rm -f vaultik
|
||||
go clean
|
||||
|
||||
# Install dependencies
|
||||
deps:
|
||||
go mod download
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
|
||||
# Run tests with coverage
|
||||
test-coverage:
|
||||
go test -v -coverprofile=coverage.out ./...
|
||||
@@ -67,3 +53,17 @@ local:
|
||||
|
||||
install: vaultik
|
||||
cp ./vaultik $(HOME)/bin/
|
||||
|
||||
# Run all checks (formatting, linting, tests) without modifying files
|
||||
check: fmt-check lint test
|
||||
|
||||
# Build Docker image
|
||||
docker:
|
||||
docker build -t vaultik .
|
||||
|
||||
# Install pre-commit hook
|
||||
hooks:
|
||||
@printf '#!/bin/sh\nset -e\n' > .git/hooks/pre-commit
|
||||
@printf 'go mod tidy\ngo fmt ./...\ngit diff --exit-code -- go.mod go.sum || { echo "go mod tidy changed files; please stage and retry"; exit 1; }\n' >> .git/hooks/pre-commit
|
||||
@printf 'make check\n' >> .git/hooks/pre-commit
|
||||
@chmod +x .git/hooks/pre-commit
|
||||
|
||||
@@ -150,7 +150,7 @@ passphrase is needed or stored locally.
|
||||
vaultik [--config <path>] snapshot create [snapshot-names...] [--cron] [--daemon] [--prune]
|
||||
vaultik [--config <path>] snapshot list [--json]
|
||||
vaultik [--config <path>] snapshot verify <snapshot-id> [--deep]
|
||||
vaultik [--config <path>] snapshot purge [--keep-latest | --older-than <duration>] [--force]
|
||||
vaultik [--config <path>] snapshot purge [--keep-latest | --older-than <duration>] [--name <name>] [--force]
|
||||
vaultik [--config <path>] snapshot remove <snapshot-id> [--dry-run] [--force]
|
||||
vaultik [--config <path>] snapshot prune
|
||||
vaultik [--config <path>] restore <snapshot-id> <target-dir> [paths...]
|
||||
@@ -180,8 +180,9 @@ vaultik [--config <path>] store info
|
||||
* `--deep`: Download and verify blob contents (not just existence)
|
||||
|
||||
**snapshot purge**: Remove old snapshots based on criteria
|
||||
* `--keep-latest`: Keep only the most recent snapshot
|
||||
* `--keep-latest`: Keep the most recent snapshot per snapshot name
|
||||
* `--older-than`: Remove snapshots older than duration (e.g., 30d, 6mo, 1y)
|
||||
* `--name`: Filter purge to a specific snapshot name
|
||||
* `--force`: Skip confirmation prompt
|
||||
|
||||
**snapshot remove**: Remove a specific snapshot
|
||||
|
||||
2
go.mod
2
go.mod
@@ -1,6 +1,6 @@
|
||||
module git.eeqj.de/sneak/vaultik
|
||||
|
||||
go 1.24.4
|
||||
go 1.26.1
|
||||
|
||||
require (
|
||||
filippo.io/age v1.2.1
|
||||
|
||||
64
internal/blobgen/compress_test.go
Normal file
64
internal/blobgen/compress_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package blobgen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// testRecipient is a static age recipient for tests.
|
||||
const testRecipient = "age1cplgrwj77ta54dnmydvvmzn64ltk83ankxl5sww04mrtmu62kv3s89gmvv"
|
||||
|
||||
// TestCompressStreamNoDoubleClose is a regression test for issue #28.
|
||||
// It verifies that CompressStream does not panic or return an error due to
|
||||
// double-closing the underlying blobgen.Writer. Before the fix in PR #33,
|
||||
// the explicit Close() on the happy path combined with defer Close() would
|
||||
// cause a double close.
|
||||
func TestCompressStreamNoDoubleClose(t *testing.T) {
|
||||
input := []byte("regression test data for issue #28 double-close fix")
|
||||
var buf bytes.Buffer
|
||||
|
||||
written, hash, err := CompressStream(&buf, bytes.NewReader(input), 3, []string{testRecipient})
|
||||
require.NoError(t, err, "CompressStream should not return an error")
|
||||
assert.True(t, written > 0, "expected bytes written > 0")
|
||||
assert.NotEmpty(t, hash, "expected non-empty hash")
|
||||
assert.True(t, buf.Len() > 0, "expected non-empty output")
|
||||
}
|
||||
|
||||
// TestCompressStreamLargeInput exercises CompressStream with a larger payload
|
||||
// to ensure no double-close issues surface under heavier I/O.
|
||||
func TestCompressStreamLargeInput(t *testing.T) {
|
||||
data := make([]byte, 512*1024) // 512 KB
|
||||
_, err := rand.Read(data)
|
||||
require.NoError(t, err)
|
||||
|
||||
var buf bytes.Buffer
|
||||
written, hash, err := CompressStream(&buf, bytes.NewReader(data), 3, []string{testRecipient})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, written > 0)
|
||||
assert.NotEmpty(t, hash)
|
||||
}
|
||||
|
||||
// TestCompressStreamEmptyInput verifies CompressStream handles empty input
|
||||
// without double-close issues.
|
||||
func TestCompressStreamEmptyInput(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
_, hash, err := CompressStream(&buf, strings.NewReader(""), 3, []string{testRecipient})
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, hash)
|
||||
}
|
||||
|
||||
// TestCompressDataNoDoubleClose mirrors the stream test for CompressData,
|
||||
// ensuring the explicit Close + error-path Close pattern is also safe.
|
||||
func TestCompressDataNoDoubleClose(t *testing.T) {
|
||||
input := []byte("CompressData regression test for double-close")
|
||||
result, err := CompressData(input, 3, []string{testRecipient})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.CompressedSize > 0)
|
||||
assert.True(t, result.UncompressedSize == int64(len(input)))
|
||||
assert.NotEmpty(t, result.SHA256)
|
||||
}
|
||||
@@ -11,16 +11,9 @@ import (
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
// PurgeOptions contains options for the purge command
|
||||
type PurgeOptions struct {
|
||||
KeepLatest bool
|
||||
OlderThan string
|
||||
Force bool
|
||||
}
|
||||
|
||||
// NewPurgeCommand creates the purge command
|
||||
func NewPurgeCommand() *cobra.Command {
|
||||
opts := &PurgeOptions{}
|
||||
opts := &vaultik.SnapshotPurgeOptions{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "purge",
|
||||
@@ -28,8 +21,15 @@ func NewPurgeCommand() *cobra.Command {
|
||||
Long: `Removes snapshots based on age or count criteria.
|
||||
|
||||
This command allows you to:
|
||||
- Keep only the latest snapshot (--keep-latest)
|
||||
- Keep only the latest snapshot per name (--keep-latest)
|
||||
- Remove snapshots older than a specific duration (--older-than)
|
||||
- Filter to a specific snapshot name (--name)
|
||||
|
||||
When --keep-latest is used, retention is applied per snapshot name. For example,
|
||||
if you have snapshots named "home" and "system", --keep-latest keeps the most
|
||||
recent of each.
|
||||
|
||||
Use --name to restrict the purge to a single snapshot name.
|
||||
|
||||
Config is located at /etc/vaultik/config.yml by default, but can be overridden by
|
||||
specifying a path using --config or by setting VAULTIK_CONFIG to a path.`,
|
||||
@@ -66,7 +66,7 @@ specifying a path using --config or by setting VAULTIK_CONFIG to a path.`,
|
||||
// Start the purge operation in a goroutine
|
||||
go func() {
|
||||
// Run the purge operation
|
||||
if err := v.PurgeSnapshots(opts.KeepLatest, opts.OlderThan, opts.Force); err != nil {
|
||||
if err := v.PurgeSnapshotsWithOptions(opts); err != nil {
|
||||
if err != context.Canceled {
|
||||
log.Error("Purge operation failed", "error", err)
|
||||
os.Exit(1)
|
||||
@@ -92,9 +92,10 @@ specifying a path using --config or by setting VAULTIK_CONFIG to a path.`,
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&opts.KeepLatest, "keep-latest", false, "Keep only the latest snapshot")
|
||||
cmd.Flags().BoolVar(&opts.KeepLatest, "keep-latest", false, "Keep only the latest snapshot per name")
|
||||
cmd.Flags().StringVar(&opts.OlderThan, "older-than", "", "Remove snapshots older than duration (e.g. 30d, 6m, 1y)")
|
||||
cmd.Flags().BoolVar(&opts.Force, "force", false, "Skip confirmation prompts")
|
||||
cmd.Flags().StringVar(&opts.Name, "name", "", "Filter purge to a specific snapshot name")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -167,21 +167,25 @@ func newSnapshotListCommand() *cobra.Command {
|
||||
|
||||
// newSnapshotPurgeCommand creates the 'snapshot purge' subcommand
|
||||
func newSnapshotPurgeCommand() *cobra.Command {
|
||||
var keepLatest bool
|
||||
var olderThan string
|
||||
var force bool
|
||||
opts := &vaultik.SnapshotPurgeOptions{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "purge",
|
||||
Short: "Purge old snapshots",
|
||||
Long: "Removes snapshots based on age or count criteria",
|
||||
Long: `Removes snapshots based on age or count criteria.
|
||||
|
||||
When --keep-latest is used, retention is applied per snapshot name. For example,
|
||||
if you have snapshots named "home" and "system", --keep-latest keeps the most
|
||||
recent of each.
|
||||
|
||||
Use --name to restrict the purge to a single snapshot name.`,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Validate flags
|
||||
if !keepLatest && olderThan == "" {
|
||||
if !opts.KeepLatest && opts.OlderThan == "" {
|
||||
return fmt.Errorf("must specify either --keep-latest or --older-than")
|
||||
}
|
||||
if keepLatest && olderThan != "" {
|
||||
if opts.KeepLatest && opts.OlderThan != "" {
|
||||
return fmt.Errorf("cannot specify both --keep-latest and --older-than")
|
||||
}
|
||||
|
||||
@@ -205,7 +209,7 @@ func newSnapshotPurgeCommand() *cobra.Command {
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
go func() {
|
||||
if err := v.PurgeSnapshots(keepLatest, olderThan, force); err != nil {
|
||||
if err := v.PurgeSnapshotsWithOptions(opts); err != nil {
|
||||
if err != context.Canceled {
|
||||
log.Error("Failed to purge snapshots", "error", err)
|
||||
os.Exit(1)
|
||||
@@ -228,9 +232,10 @@ func newSnapshotPurgeCommand() *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&keepLatest, "keep-latest", false, "Keep only the latest snapshot")
|
||||
cmd.Flags().StringVar(&olderThan, "older-than", "", "Remove snapshots older than duration (e.g., 30d, 6m, 1y)")
|
||||
cmd.Flags().BoolVar(&force, "force", false, "Skip confirmation prompt")
|
||||
cmd.Flags().BoolVar(&opts.KeepLatest, "keep-latest", false, "Keep only the latest snapshot per name")
|
||||
cmd.Flags().StringVar(&opts.OlderThan, "older-than", "", "Remove snapshots older than duration (e.g., 30d, 6m, 1y)")
|
||||
cmd.Flags().BoolVar(&opts.Force, "force", false, "Skip confirmation prompt")
|
||||
cmd.Flags().StringVar(&opts.Name, "name", "", "Filter purge to a specific snapshot name")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
55
internal/vaultik/blob_fetch_stub.go
Normal file
55
internal/vaultik/blob_fetch_stub.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package vaultik
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"filippo.io/age"
|
||||
"git.eeqj.de/sneak/vaultik/internal/blobgen"
|
||||
)
|
||||
|
||||
// FetchAndDecryptBlobResult holds the result of fetching and decrypting a blob.
|
||||
type FetchAndDecryptBlobResult struct {
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// FetchAndDecryptBlob downloads a blob, decrypts it, and returns the plaintext data.
|
||||
func (v *Vaultik) FetchAndDecryptBlob(ctx context.Context, blobHash string, expectedSize int64, identity age.Identity) (*FetchAndDecryptBlobResult, error) {
|
||||
rc, _, err := v.FetchBlob(ctx, blobHash, expectedSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rc.Close() }()
|
||||
|
||||
reader, err := blobgen.NewReader(rc, identity)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating blob reader: %w", err)
|
||||
}
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
data, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading blob data: %w", err)
|
||||
}
|
||||
|
||||
return &FetchAndDecryptBlobResult{Data: data}, nil
|
||||
}
|
||||
|
||||
// FetchBlob downloads a blob and returns a reader for the encrypted data.
|
||||
func (v *Vaultik) FetchBlob(ctx context.Context, blobHash string, expectedSize int64) (io.ReadCloser, int64, error) {
|
||||
blobPath := fmt.Sprintf("blobs/%s/%s/%s", blobHash[:2], blobHash[2:4], blobHash)
|
||||
|
||||
rc, err := v.Storage.Get(ctx, blobPath)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("downloading blob %s: %w", blobHash[:16], err)
|
||||
}
|
||||
|
||||
info, err := v.Storage.Stat(ctx, blobPath)
|
||||
if err != nil {
|
||||
_ = rc.Close()
|
||||
return nil, 0, fmt.Errorf("stat blob %s: %w", blobHash[:16], err)
|
||||
}
|
||||
|
||||
return rc, info.Size, nil
|
||||
}
|
||||
207
internal/vaultik/blobcache.go
Normal file
207
internal/vaultik/blobcache.go
Normal file
@@ -0,0 +1,207 @@
|
||||
package vaultik
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// blobDiskCacheEntry tracks a cached blob on disk.
|
||||
type blobDiskCacheEntry struct {
|
||||
key string
|
||||
size int64
|
||||
prev *blobDiskCacheEntry
|
||||
next *blobDiskCacheEntry
|
||||
}
|
||||
|
||||
// blobDiskCache is an LRU cache that stores blobs on disk instead of in memory.
|
||||
// Blobs are written to a temp directory keyed by their hash. When total size
|
||||
// exceeds maxBytes, the least-recently-used entries are evicted (deleted from disk).
|
||||
type blobDiskCache struct {
|
||||
mu sync.Mutex
|
||||
dir string
|
||||
maxBytes int64
|
||||
curBytes int64
|
||||
items map[string]*blobDiskCacheEntry
|
||||
head *blobDiskCacheEntry // most recent
|
||||
tail *blobDiskCacheEntry // least recent
|
||||
}
|
||||
|
||||
// newBlobDiskCache creates a new disk-based blob cache with the given max size.
|
||||
func newBlobDiskCache(maxBytes int64) (*blobDiskCache, error) {
|
||||
dir, err := os.MkdirTemp("", "vaultik-blobcache-*")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating blob cache dir: %w", err)
|
||||
}
|
||||
return &blobDiskCache{
|
||||
dir: dir,
|
||||
maxBytes: maxBytes,
|
||||
items: make(map[string]*blobDiskCacheEntry),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *blobDiskCache) path(key string) string {
|
||||
return filepath.Join(c.dir, key)
|
||||
}
|
||||
|
||||
func (c *blobDiskCache) unlink(e *blobDiskCacheEntry) {
|
||||
if e.prev != nil {
|
||||
e.prev.next = e.next
|
||||
} else {
|
||||
c.head = e.next
|
||||
}
|
||||
if e.next != nil {
|
||||
e.next.prev = e.prev
|
||||
} else {
|
||||
c.tail = e.prev
|
||||
}
|
||||
e.prev = nil
|
||||
e.next = nil
|
||||
}
|
||||
|
||||
func (c *blobDiskCache) pushFront(e *blobDiskCacheEntry) {
|
||||
e.prev = nil
|
||||
e.next = c.head
|
||||
if c.head != nil {
|
||||
c.head.prev = e
|
||||
}
|
||||
c.head = e
|
||||
if c.tail == nil {
|
||||
c.tail = e
|
||||
}
|
||||
}
|
||||
|
||||
func (c *blobDiskCache) evictLRU() {
|
||||
if c.tail == nil {
|
||||
return
|
||||
}
|
||||
victim := c.tail
|
||||
c.unlink(victim)
|
||||
delete(c.items, victim.key)
|
||||
c.curBytes -= victim.size
|
||||
_ = os.Remove(c.path(victim.key))
|
||||
}
|
||||
|
||||
// Put writes blob data to disk cache. Entries larger than maxBytes are silently skipped.
|
||||
func (c *blobDiskCache) Put(key string, data []byte) error {
|
||||
entrySize := int64(len(data))
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if entrySize > c.maxBytes {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove old entry if updating
|
||||
if e, ok := c.items[key]; ok {
|
||||
c.unlink(e)
|
||||
c.curBytes -= e.size
|
||||
_ = os.Remove(c.path(key))
|
||||
delete(c.items, key)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(c.path(key), data, 0600); err != nil {
|
||||
return fmt.Errorf("writing blob to cache: %w", err)
|
||||
}
|
||||
|
||||
e := &blobDiskCacheEntry{key: key, size: entrySize}
|
||||
c.pushFront(e)
|
||||
c.items[key] = e
|
||||
c.curBytes += entrySize
|
||||
|
||||
for c.curBytes > c.maxBytes && c.tail != nil {
|
||||
c.evictLRU()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get reads a cached blob from disk. Returns data and true on hit.
|
||||
func (c *blobDiskCache) Get(key string) ([]byte, bool) {
|
||||
c.mu.Lock()
|
||||
e, ok := c.items[key]
|
||||
if !ok {
|
||||
c.mu.Unlock()
|
||||
return nil, false
|
||||
}
|
||||
c.unlink(e)
|
||||
c.pushFront(e)
|
||||
c.mu.Unlock()
|
||||
|
||||
data, err := os.ReadFile(c.path(key))
|
||||
if err != nil {
|
||||
c.mu.Lock()
|
||||
if e2, ok2 := c.items[key]; ok2 && e2 == e {
|
||||
c.unlink(e)
|
||||
delete(c.items, key)
|
||||
c.curBytes -= e.size
|
||||
}
|
||||
c.mu.Unlock()
|
||||
return nil, false
|
||||
}
|
||||
return data, true
|
||||
}
|
||||
|
||||
// ReadAt reads a slice of a cached blob without loading the entire blob into memory.
|
||||
func (c *blobDiskCache) ReadAt(key string, offset, length int64) ([]byte, error) {
|
||||
c.mu.Lock()
|
||||
e, ok := c.items[key]
|
||||
if !ok {
|
||||
c.mu.Unlock()
|
||||
return nil, fmt.Errorf("key %q not in cache", key)
|
||||
}
|
||||
if offset+length > e.size {
|
||||
c.mu.Unlock()
|
||||
return nil, fmt.Errorf("read beyond blob size: offset=%d length=%d size=%d", offset, length, e.size)
|
||||
}
|
||||
c.unlink(e)
|
||||
c.pushFront(e)
|
||||
c.mu.Unlock()
|
||||
|
||||
f, err := os.Open(c.path(key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
buf := make([]byte, length)
|
||||
if _, err := f.ReadAt(buf, offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// Has returns whether a key exists in the cache.
|
||||
func (c *blobDiskCache) Has(key string) bool {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
_, ok := c.items[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Size returns current total cached bytes.
|
||||
func (c *blobDiskCache) Size() int64 {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.curBytes
|
||||
}
|
||||
|
||||
// Len returns number of cached entries.
|
||||
func (c *blobDiskCache) Len() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return len(c.items)
|
||||
}
|
||||
|
||||
// Close removes the cache directory and all cached blobs.
|
||||
func (c *blobDiskCache) Close() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.items = nil
|
||||
c.head = nil
|
||||
c.tail = nil
|
||||
c.curBytes = 0
|
||||
return os.RemoveAll(c.dir)
|
||||
}
|
||||
189
internal/vaultik/blobcache_test.go
Normal file
189
internal/vaultik/blobcache_test.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package vaultik
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBlobDiskCache_BasicGetPut(t *testing.T) {
|
||||
cache, err := newBlobDiskCache(1 << 20)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() { _ = cache.Close() }()
|
||||
|
||||
data := []byte("hello world")
|
||||
if err := cache.Put("key1", data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
got, ok := cache.Get("key1")
|
||||
if !ok {
|
||||
t.Fatal("expected cache hit")
|
||||
}
|
||||
if !bytes.Equal(got, data) {
|
||||
t.Fatalf("got %q, want %q", got, data)
|
||||
}
|
||||
|
||||
_, ok = cache.Get("nonexistent")
|
||||
if ok {
|
||||
t.Fatal("expected cache miss")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobDiskCache_EvictionUnderPressure(t *testing.T) {
|
||||
maxBytes := int64(1000)
|
||||
cache, err := newBlobDiskCache(maxBytes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() { _ = cache.Close() }()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
data := make([]byte, 300)
|
||||
if err := cache.Put(fmt.Sprintf("key%d", i), data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if cache.Size() > maxBytes {
|
||||
t.Fatalf("cache size %d exceeds max %d", cache.Size(), maxBytes)
|
||||
}
|
||||
|
||||
if !cache.Has("key4") {
|
||||
t.Fatal("expected key4 to be cached")
|
||||
}
|
||||
if cache.Has("key0") {
|
||||
t.Fatal("expected key0 to be evicted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobDiskCache_OversizedEntryRejected(t *testing.T) {
|
||||
cache, err := newBlobDiskCache(100)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() { _ = cache.Close() }()
|
||||
|
||||
data := make([]byte, 200)
|
||||
if err := cache.Put("big", data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if cache.Has("big") {
|
||||
t.Fatal("oversized entry should not be cached")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobDiskCache_UpdateInPlace(t *testing.T) {
|
||||
cache, err := newBlobDiskCache(1 << 20)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() { _ = cache.Close() }()
|
||||
|
||||
if err := cache.Put("key1", []byte("v1")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cache.Put("key1", []byte("version2")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
got, ok := cache.Get("key1")
|
||||
if !ok {
|
||||
t.Fatal("expected hit")
|
||||
}
|
||||
if string(got) != "version2" {
|
||||
t.Fatalf("got %q, want %q", got, "version2")
|
||||
}
|
||||
if cache.Len() != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", cache.Len())
|
||||
}
|
||||
if cache.Size() != int64(len("version2")) {
|
||||
t.Fatalf("expected size %d, got %d", len("version2"), cache.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobDiskCache_ReadAt(t *testing.T) {
|
||||
cache, err := newBlobDiskCache(1 << 20)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() { _ = cache.Close() }()
|
||||
|
||||
data := make([]byte, 1024)
|
||||
if _, err := rand.Read(data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cache.Put("blob1", data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
chunk, err := cache.ReadAt("blob1", 100, 200)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(chunk, data[100:300]) {
|
||||
t.Fatal("ReadAt returned wrong data")
|
||||
}
|
||||
|
||||
_, err = cache.ReadAt("blob1", 900, 200)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for out-of-bounds read")
|
||||
}
|
||||
|
||||
_, err = cache.ReadAt("missing", 0, 10)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for missing key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobDiskCache_Close(t *testing.T) {
|
||||
cache, err := newBlobDiskCache(1 << 20)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cache.Put("key1", []byte("data")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cache.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobDiskCache_LRUOrder(t *testing.T) {
|
||||
cache, err := newBlobDiskCache(200)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() { _ = cache.Close() }()
|
||||
|
||||
d := make([]byte, 100)
|
||||
if err := cache.Put("a", d); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cache.Put("b", d); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Access "a" to make it most recently used
|
||||
cache.Get("a")
|
||||
|
||||
// Adding "c" should evict "b" (LRU), not "a"
|
||||
if err := cache.Put("c", d); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !cache.Has("a") {
|
||||
t.Fatal("expected 'a' to survive")
|
||||
}
|
||||
if !cache.Has("c") {
|
||||
t.Fatal("expected 'c' to be present")
|
||||
}
|
||||
if cache.Has("b") {
|
||||
t.Fatal("expected 'b' to be evicted")
|
||||
}
|
||||
}
|
||||
@@ -79,6 +79,21 @@ func parseSnapshotTimestamp(snapshotID string) (time.Time, error) {
|
||||
return timestamp.UTC(), nil
|
||||
}
|
||||
|
||||
// parseSnapshotName extracts the snapshot name from a snapshot ID.
|
||||
// Format: hostname_snapshotname_timestamp (3 parts) or hostname_timestamp (2 parts, no name).
|
||||
// Returns the snapshot name, or empty string if no name component is present.
|
||||
func parseSnapshotName(snapshotID string) string {
|
||||
parts := strings.Split(snapshotID, "_")
|
||||
if len(parts) < 3 {
|
||||
// Format: hostname_timestamp — no snapshot name
|
||||
return ""
|
||||
}
|
||||
// Format: hostname_name_timestamp — middle parts are the name.
|
||||
// The last part is the RFC3339 timestamp, the first part is the hostname,
|
||||
// everything in between is the snapshot name (which may itself contain underscores).
|
||||
return strings.Join(parts[1:len(parts)-1], "_")
|
||||
}
|
||||
|
||||
// parseDuration parses a duration string with support for days
|
||||
func parseDuration(s string) (time.Duration, error) {
|
||||
// Check for days suffix
|
||||
|
||||
119
internal/vaultik/helpers_test.go
Normal file
119
internal/vaultik/helpers_test.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package vaultik
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseSnapshotName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
snapshotID string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "standard format with name",
|
||||
snapshotID: "myhost_home_2026-01-12T14:41:15Z",
|
||||
want: "home",
|
||||
},
|
||||
{
|
||||
name: "standard format with different name",
|
||||
snapshotID: "server1_system_2026-02-15T09:30:00Z",
|
||||
want: "system",
|
||||
},
|
||||
{
|
||||
name: "no snapshot name (legacy format)",
|
||||
snapshotID: "myhost_2026-01-12T14:41:15Z",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "name with underscores",
|
||||
snapshotID: "myhost_my_special_backup_2026-03-01T00:00:00Z",
|
||||
want: "my_special_backup",
|
||||
},
|
||||
{
|
||||
name: "single part (edge case)",
|
||||
snapshotID: "nounderscore",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
snapshotID: "",
|
||||
want: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := parseSnapshotName(tt.snapshotID)
|
||||
if got != tt.want {
|
||||
t.Errorf("parseSnapshotName(%q) = %q, want %q", tt.snapshotID, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSnapshotTimestamp(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
snapshotID string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid with name",
|
||||
snapshotID: "myhost_home_2026-01-12T14:41:15Z",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid without name",
|
||||
snapshotID: "myhost_2026-01-12T14:41:15Z",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid - single part",
|
||||
snapshotID: "nounderscore",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid - bad timestamp",
|
||||
snapshotID: "myhost_home_notadate",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := parseSnapshotTimestamp(tt.snapshotID)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("parseSnapshotTimestamp(%q) error = %v, wantErr %v", tt.snapshotID, err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSnapshotPurgeOptions(t *testing.T) {
|
||||
opts := &SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Name: "home",
|
||||
Force: true,
|
||||
}
|
||||
if !opts.KeepLatest {
|
||||
t.Error("Expected KeepLatest to be true")
|
||||
}
|
||||
if opts.Name != "home" {
|
||||
t.Errorf("Expected Name to be 'home', got %q", opts.Name)
|
||||
}
|
||||
if !opts.Force {
|
||||
t.Error("Expected Force to be true")
|
||||
}
|
||||
|
||||
opts2 := &SnapshotPurgeOptions{
|
||||
OlderThan: "30d",
|
||||
Name: "system",
|
||||
}
|
||||
if opts2.OlderThan != "30d" {
|
||||
t.Errorf("Expected OlderThan to be '30d', got %q", opts2.OlderThan)
|
||||
}
|
||||
if opts2.Name != "system" {
|
||||
t.Errorf("Expected Name to be 'system', got %q", opts2.Name)
|
||||
}
|
||||
}
|
||||
@@ -15,99 +15,99 @@ import (
|
||||
// ShowInfo displays system and configuration information
|
||||
func (v *Vaultik) ShowInfo() error {
|
||||
// System Information
|
||||
fmt.Printf("=== System Information ===\n")
|
||||
fmt.Printf("OS/Architecture: %s/%s\n", runtime.GOOS, runtime.GOARCH)
|
||||
fmt.Printf("Version: %s\n", v.Globals.Version)
|
||||
fmt.Printf("Commit: %s\n", v.Globals.Commit)
|
||||
fmt.Printf("Go Version: %s\n", runtime.Version())
|
||||
fmt.Println()
|
||||
v.printfStdout("=== System Information ===\n")
|
||||
v.printfStdout("OS/Architecture: %s/%s\n", runtime.GOOS, runtime.GOARCH)
|
||||
v.printfStdout("Version: %s\n", v.Globals.Version)
|
||||
v.printfStdout("Commit: %s\n", v.Globals.Commit)
|
||||
v.printfStdout("Go Version: %s\n", runtime.Version())
|
||||
v.printlnStdout()
|
||||
|
||||
// Storage Configuration
|
||||
fmt.Printf("=== Storage Configuration ===\n")
|
||||
fmt.Printf("S3 Bucket: %s\n", v.Config.S3.Bucket)
|
||||
v.printfStdout("=== Storage Configuration ===\n")
|
||||
v.printfStdout("S3 Bucket: %s\n", v.Config.S3.Bucket)
|
||||
if v.Config.S3.Prefix != "" {
|
||||
fmt.Printf("S3 Prefix: %s\n", v.Config.S3.Prefix)
|
||||
v.printfStdout("S3 Prefix: %s\n", v.Config.S3.Prefix)
|
||||
}
|
||||
fmt.Printf("S3 Endpoint: %s\n", v.Config.S3.Endpoint)
|
||||
fmt.Printf("S3 Region: %s\n", v.Config.S3.Region)
|
||||
fmt.Println()
|
||||
v.printfStdout("S3 Endpoint: %s\n", v.Config.S3.Endpoint)
|
||||
v.printfStdout("S3 Region: %s\n", v.Config.S3.Region)
|
||||
v.printlnStdout()
|
||||
|
||||
// Backup Settings
|
||||
fmt.Printf("=== Backup Settings ===\n")
|
||||
v.printfStdout("=== Backup Settings ===\n")
|
||||
|
||||
// Show configured snapshots
|
||||
fmt.Printf("Snapshots:\n")
|
||||
v.printfStdout("Snapshots:\n")
|
||||
for _, name := range v.Config.SnapshotNames() {
|
||||
snap := v.Config.Snapshots[name]
|
||||
fmt.Printf(" %s:\n", name)
|
||||
v.printfStdout(" %s:\n", name)
|
||||
for _, path := range snap.Paths {
|
||||
fmt.Printf(" - %s\n", path)
|
||||
v.printfStdout(" - %s\n", path)
|
||||
}
|
||||
if len(snap.Exclude) > 0 {
|
||||
fmt.Printf(" exclude: %s\n", strings.Join(snap.Exclude, ", "))
|
||||
v.printfStdout(" exclude: %s\n", strings.Join(snap.Exclude, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
// Global exclude patterns
|
||||
if len(v.Config.Exclude) > 0 {
|
||||
fmt.Printf("Global Exclude: %s\n", strings.Join(v.Config.Exclude, ", "))
|
||||
v.printfStdout("Global Exclude: %s\n", strings.Join(v.Config.Exclude, ", "))
|
||||
}
|
||||
|
||||
fmt.Printf("Compression: zstd level %d\n", v.Config.CompressionLevel)
|
||||
fmt.Printf("Chunk Size: %s\n", humanize.Bytes(uint64(v.Config.ChunkSize)))
|
||||
fmt.Printf("Blob Size Limit: %s\n", humanize.Bytes(uint64(v.Config.BlobSizeLimit)))
|
||||
fmt.Println()
|
||||
v.printfStdout("Compression: zstd level %d\n", v.Config.CompressionLevel)
|
||||
v.printfStdout("Chunk Size: %s\n", humanize.Bytes(uint64(v.Config.ChunkSize)))
|
||||
v.printfStdout("Blob Size Limit: %s\n", humanize.Bytes(uint64(v.Config.BlobSizeLimit)))
|
||||
v.printlnStdout()
|
||||
|
||||
// Encryption Configuration
|
||||
fmt.Printf("=== Encryption Configuration ===\n")
|
||||
fmt.Printf("Recipients:\n")
|
||||
v.printfStdout("=== Encryption Configuration ===\n")
|
||||
v.printfStdout("Recipients:\n")
|
||||
for _, recipient := range v.Config.AgeRecipients {
|
||||
fmt.Printf(" - %s\n", recipient)
|
||||
v.printfStdout(" - %s\n", recipient)
|
||||
}
|
||||
fmt.Println()
|
||||
v.printlnStdout()
|
||||
|
||||
// Daemon Settings (if applicable)
|
||||
if v.Config.BackupInterval > 0 || v.Config.MinTimeBetweenRun > 0 {
|
||||
fmt.Printf("=== Daemon Settings ===\n")
|
||||
v.printfStdout("=== Daemon Settings ===\n")
|
||||
if v.Config.BackupInterval > 0 {
|
||||
fmt.Printf("Backup Interval: %s\n", v.Config.BackupInterval)
|
||||
v.printfStdout("Backup Interval: %s\n", v.Config.BackupInterval)
|
||||
}
|
||||
if v.Config.MinTimeBetweenRun > 0 {
|
||||
fmt.Printf("Minimum Time: %s\n", v.Config.MinTimeBetweenRun)
|
||||
v.printfStdout("Minimum Time: %s\n", v.Config.MinTimeBetweenRun)
|
||||
}
|
||||
fmt.Println()
|
||||
v.printlnStdout()
|
||||
}
|
||||
|
||||
// Local Database
|
||||
fmt.Printf("=== Local Database ===\n")
|
||||
fmt.Printf("Index Path: %s\n", v.Config.IndexPath)
|
||||
v.printfStdout("=== Local Database ===\n")
|
||||
v.printfStdout("Index Path: %s\n", v.Config.IndexPath)
|
||||
|
||||
// Check if index file exists and get its size
|
||||
if info, err := v.Fs.Stat(v.Config.IndexPath); err == nil {
|
||||
fmt.Printf("Index Size: %s\n", humanize.Bytes(uint64(info.Size())))
|
||||
v.printfStdout("Index Size: %s\n", humanize.Bytes(uint64(info.Size())))
|
||||
|
||||
// Get snapshot count from database
|
||||
query := `SELECT COUNT(*) FROM snapshots WHERE completed_at IS NOT NULL`
|
||||
var snapshotCount int
|
||||
if err := v.DB.Conn().QueryRowContext(v.ctx, query).Scan(&snapshotCount); err == nil {
|
||||
fmt.Printf("Snapshots: %d\n", snapshotCount)
|
||||
v.printfStdout("Snapshots: %d\n", snapshotCount)
|
||||
}
|
||||
|
||||
// Get blob count from database
|
||||
query = `SELECT COUNT(*) FROM blobs`
|
||||
var blobCount int
|
||||
if err := v.DB.Conn().QueryRowContext(v.ctx, query).Scan(&blobCount); err == nil {
|
||||
fmt.Printf("Blobs: %d\n", blobCount)
|
||||
v.printfStdout("Blobs: %d\n", blobCount)
|
||||
}
|
||||
|
||||
// Get file count from database
|
||||
query = `SELECT COUNT(*) FROM files`
|
||||
var fileCount int
|
||||
if err := v.DB.Conn().QueryRowContext(v.ctx, query).Scan(&fileCount); err == nil {
|
||||
fmt.Printf("Files: %d\n", fileCount)
|
||||
v.printfStdout("Files: %d\n", fileCount)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Index Size: (not created)\n")
|
||||
v.printfStdout("Index Size: (not created)\n")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -157,15 +157,15 @@ func (v *Vaultik) RemoteInfo(jsonOutput bool) error {
|
||||
result.StorageLocation = storageInfo.Location
|
||||
|
||||
if !jsonOutput {
|
||||
fmt.Printf("=== Remote Storage ===\n")
|
||||
fmt.Printf("Type: %s\n", storageInfo.Type)
|
||||
fmt.Printf("Location: %s\n", storageInfo.Location)
|
||||
fmt.Println()
|
||||
v.printfStdout("=== Remote Storage ===\n")
|
||||
v.printfStdout("Type: %s\n", storageInfo.Type)
|
||||
v.printfStdout("Location: %s\n", storageInfo.Location)
|
||||
v.printlnStdout()
|
||||
}
|
||||
|
||||
// List all snapshot metadata
|
||||
if !jsonOutput {
|
||||
fmt.Printf("Scanning snapshot metadata...\n")
|
||||
v.printfStdout("Scanning snapshot metadata...\n")
|
||||
}
|
||||
|
||||
snapshotMetadata := make(map[string]*SnapshotMetadataInfo)
|
||||
@@ -210,7 +210,7 @@ func (v *Vaultik) RemoteInfo(jsonOutput bool) error {
|
||||
|
||||
// Download and parse all manifests to get referenced blobs
|
||||
if !jsonOutput {
|
||||
fmt.Printf("Downloading %d manifest(s)...\n", len(snapshotIDs))
|
||||
v.printfStdout("Downloading %d manifest(s)...\n", len(snapshotIDs))
|
||||
}
|
||||
|
||||
referencedBlobs := make(map[string]int64) // hash -> compressed size
|
||||
@@ -260,7 +260,7 @@ func (v *Vaultik) RemoteInfo(jsonOutput bool) error {
|
||||
|
||||
// List all blobs on remote
|
||||
if !jsonOutput {
|
||||
fmt.Printf("Scanning blobs...\n")
|
||||
v.printfStdout("Scanning blobs...\n")
|
||||
}
|
||||
|
||||
allBlobs := make(map[string]int64) // hash -> size from storage
|
||||
@@ -298,14 +298,14 @@ func (v *Vaultik) RemoteInfo(jsonOutput bool) error {
|
||||
}
|
||||
|
||||
// Human-readable output
|
||||
fmt.Printf("\n=== Snapshot Metadata ===\n")
|
||||
v.printfStdout("\n=== Snapshot Metadata ===\n")
|
||||
if len(result.Snapshots) == 0 {
|
||||
fmt.Printf("No snapshots found\n")
|
||||
v.printfStdout("No snapshots found\n")
|
||||
} else {
|
||||
fmt.Printf("%-45s %12s %12s %12s %10s %12s\n", "SNAPSHOT", "MANIFEST", "DATABASE", "TOTAL", "BLOBS", "BLOB SIZE")
|
||||
fmt.Printf("%-45s %12s %12s %12s %10s %12s\n", strings.Repeat("-", 45), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 10), strings.Repeat("-", 12))
|
||||
v.printfStdout("%-45s %12s %12s %12s %10s %12s\n", "SNAPSHOT", "MANIFEST", "DATABASE", "TOTAL", "BLOBS", "BLOB SIZE")
|
||||
v.printfStdout("%-45s %12s %12s %12s %10s %12s\n", strings.Repeat("-", 45), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 10), strings.Repeat("-", 12))
|
||||
for _, info := range result.Snapshots {
|
||||
fmt.Printf("%-45s %12s %12s %12s %10s %12s\n",
|
||||
v.printfStdout("%-45s %12s %12s %12s %10s %12s\n",
|
||||
truncateString(info.SnapshotID, 45),
|
||||
humanize.Bytes(uint64(info.ManifestSize)),
|
||||
humanize.Bytes(uint64(info.DatabaseSize)),
|
||||
@@ -314,23 +314,23 @@ func (v *Vaultik) RemoteInfo(jsonOutput bool) error {
|
||||
humanize.Bytes(uint64(info.BlobsSize)),
|
||||
)
|
||||
}
|
||||
fmt.Printf("%-45s %12s %12s %12s %10s %12s\n", strings.Repeat("-", 45), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 10), strings.Repeat("-", 12))
|
||||
fmt.Printf("%-45s %12s %12s %12s\n", fmt.Sprintf("Total (%d snapshots)", result.TotalMetadataCount), "", "", humanize.Bytes(uint64(result.TotalMetadataSize)))
|
||||
v.printfStdout("%-45s %12s %12s %12s %10s %12s\n", strings.Repeat("-", 45), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 10), strings.Repeat("-", 12))
|
||||
v.printfStdout("%-45s %12s %12s %12s\n", fmt.Sprintf("Total (%d snapshots)", result.TotalMetadataCount), "", "", humanize.Bytes(uint64(result.TotalMetadataSize)))
|
||||
}
|
||||
|
||||
fmt.Printf("\n=== Blob Storage ===\n")
|
||||
fmt.Printf("Total blobs on remote: %s (%s)\n",
|
||||
v.printfStdout("\n=== Blob Storage ===\n")
|
||||
v.printfStdout("Total blobs on remote: %s (%s)\n",
|
||||
humanize.Comma(int64(result.TotalBlobCount)),
|
||||
humanize.Bytes(uint64(result.TotalBlobSize)))
|
||||
fmt.Printf("Referenced by snapshots: %s (%s)\n",
|
||||
v.printfStdout("Referenced by snapshots: %s (%s)\n",
|
||||
humanize.Comma(int64(result.ReferencedBlobCount)),
|
||||
humanize.Bytes(uint64(result.ReferencedBlobSize)))
|
||||
fmt.Printf("Orphaned (unreferenced): %s (%s)\n",
|
||||
v.printfStdout("Orphaned (unreferenced): %s (%s)\n",
|
||||
humanize.Comma(int64(result.OrphanedBlobCount)),
|
||||
humanize.Bytes(uint64(result.OrphanedBlobSize)))
|
||||
|
||||
if result.OrphanedBlobCount > 0 {
|
||||
fmt.Printf("\nRun 'vaultik prune --remote' to remove orphaned blobs.\n")
|
||||
v.printfStdout("\nRun 'vaultik prune --remote' to remove orphaned blobs.\n")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -3,7 +3,6 @@ package vaultik
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"git.eeqj.de/sneak/vaultik/internal/log"
|
||||
@@ -121,29 +120,29 @@ func (v *Vaultik) PruneBlobs(opts *PruneOptions) error {
|
||||
if len(unreferencedBlobs) == 0 {
|
||||
log.Info("No unreferenced blobs found")
|
||||
if opts.JSON {
|
||||
return outputPruneBlobsJSON(result)
|
||||
return v.outputPruneBlobsJSON(result)
|
||||
}
|
||||
fmt.Println("No unreferenced blobs to remove.")
|
||||
v.printlnStdout("No unreferenced blobs to remove.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Show what will be deleted
|
||||
log.Info("Found unreferenced blobs", "count", len(unreferencedBlobs), "total_size", humanize.Bytes(uint64(totalSize)))
|
||||
if !opts.JSON {
|
||||
fmt.Printf("Found %d unreferenced blob(s) totaling %s\n", len(unreferencedBlobs), humanize.Bytes(uint64(totalSize)))
|
||||
v.printfStdout("Found %d unreferenced blob(s) totaling %s\n", len(unreferencedBlobs), humanize.Bytes(uint64(totalSize)))
|
||||
}
|
||||
|
||||
// Confirm unless --force is used (skip in JSON mode - require --force)
|
||||
if !opts.Force && !opts.JSON {
|
||||
fmt.Printf("\nDelete %d unreferenced blob(s)? [y/N] ", len(unreferencedBlobs))
|
||||
v.printfStdout("\nDelete %d unreferenced blob(s)? [y/N] ", len(unreferencedBlobs))
|
||||
var confirm string
|
||||
if _, err := fmt.Scanln(&confirm); err != nil {
|
||||
if _, err := v.scanStdin(&confirm); err != nil {
|
||||
// Treat EOF or error as "no"
|
||||
fmt.Println("Cancelled")
|
||||
v.printlnStdout("Cancelled")
|
||||
return nil
|
||||
}
|
||||
if strings.ToLower(confirm) != "y" {
|
||||
fmt.Println("Cancelled")
|
||||
v.printlnStdout("Cancelled")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -185,20 +184,20 @@ func (v *Vaultik) PruneBlobs(opts *PruneOptions) error {
|
||||
)
|
||||
|
||||
if opts.JSON {
|
||||
return outputPruneBlobsJSON(result)
|
||||
return v.outputPruneBlobsJSON(result)
|
||||
}
|
||||
|
||||
fmt.Printf("\nDeleted %d blob(s) totaling %s\n", deletedCount, humanize.Bytes(uint64(deletedSize)))
|
||||
v.printfStdout("\nDeleted %d blob(s) totaling %s\n", deletedCount, humanize.Bytes(uint64(deletedSize)))
|
||||
if deletedCount < len(unreferencedBlobs) {
|
||||
fmt.Printf("Failed to delete %d blob(s)\n", len(unreferencedBlobs)-deletedCount)
|
||||
v.printfStdout("Failed to delete %d blob(s)\n", len(unreferencedBlobs)-deletedCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// outputPruneBlobsJSON outputs the prune result as JSON
|
||||
func outputPruneBlobsJSON(result *PruneBlobsResult) error {
|
||||
encoder := json.NewEncoder(os.Stdout)
|
||||
func (v *Vaultik) outputPruneBlobsJSON(result *PruneBlobsResult) error {
|
||||
encoder := json.NewEncoder(v.Stdout)
|
||||
encoder.SetIndent("", " ")
|
||||
return encoder.Encode(result)
|
||||
}
|
||||
|
||||
303
internal/vaultik/purge_per_name_test.go
Normal file
303
internal/vaultik/purge_per_name_test.go
Normal file
@@ -0,0 +1,303 @@
|
||||
package vaultik_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.eeqj.de/sneak/vaultik/internal/database"
|
||||
"git.eeqj.de/sneak/vaultik/internal/log"
|
||||
"git.eeqj.de/sneak/vaultik/internal/types"
|
||||
"git.eeqj.de/sneak/vaultik/internal/vaultik"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupPurgeTest creates a Vaultik instance with an in-memory database and mock
|
||||
// storage pre-populated with the given snapshot IDs. Each snapshot is marked as
|
||||
// completed. Remote metadata stubs are created so syncWithRemote keeps them.
|
||||
func setupPurgeTest(t *testing.T, snapshotIDs []string) *vaultik.Vaultik {
|
||||
t.Helper()
|
||||
log.Initialize(log.Config{})
|
||||
|
||||
ctx := context.Background()
|
||||
db, err := database.New(ctx, ":memory:")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = db.Close() })
|
||||
|
||||
repos := database.NewRepositories(db)
|
||||
mockStorage := NewMockStorer()
|
||||
|
||||
// Insert each snapshot into the DB and create remote metadata stubs.
|
||||
// Use timestamps parsed from snapshot IDs for realistic ordering.
|
||||
for _, id := range snapshotIDs {
|
||||
// Parse timestamp from the snapshot ID
|
||||
parts := strings.Split(id, "_")
|
||||
timestampStr := parts[len(parts)-1]
|
||||
startedAt, err := time.Parse(time.RFC3339, timestampStr)
|
||||
require.NoError(t, err, "parsing timestamp from snapshot ID %q", id)
|
||||
|
||||
completedAt := startedAt.Add(5 * time.Minute)
|
||||
snap := &database.Snapshot{
|
||||
ID: types.SnapshotID(id),
|
||||
Hostname: "testhost",
|
||||
VaultikVersion: "test",
|
||||
StartedAt: startedAt,
|
||||
CompletedAt: &completedAt,
|
||||
}
|
||||
err = repos.WithTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
|
||||
return repos.Snapshots.Create(ctx, tx, snap)
|
||||
})
|
||||
require.NoError(t, err, "creating snapshot %s", id)
|
||||
|
||||
// Create remote metadata stub so syncWithRemote keeps it
|
||||
metadataKey := "metadata/" + id + "/manifest.json.zst"
|
||||
err = mockStorage.Put(ctx, metadataKey, strings.NewReader("stub"))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
stdout := &bytes.Buffer{}
|
||||
stderr := &bytes.Buffer{}
|
||||
stdin := &bytes.Buffer{}
|
||||
|
||||
v := &vaultik.Vaultik{
|
||||
Storage: mockStorage,
|
||||
Repositories: repos,
|
||||
DB: db,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
Stdin: stdin,
|
||||
}
|
||||
v.SetContext(ctx)
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// listRemainingSnapshots returns IDs of all completed snapshots in the database.
|
||||
func listRemainingSnapshots(t *testing.T, v *vaultik.Vaultik) []string {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
dbSnaps, err := v.Repositories.Snapshots.ListRecent(ctx, 10000)
|
||||
require.NoError(t, err)
|
||||
|
||||
var ids []string
|
||||
for _, s := range dbSnaps {
|
||||
if s.CompletedAt != nil {
|
||||
ids = append(ids, s.ID.String())
|
||||
}
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
func TestPurgeKeepLatest_PerName(t *testing.T) {
|
||||
// Create snapshots for two different names: "home" and "system".
|
||||
// With per-name --keep-latest, the latest of each should be kept.
|
||||
snapshotIDs := []string{
|
||||
"testhost_system_2026-01-01T00:00:00Z",
|
||||
"testhost_home_2026-01-01T01:00:00Z",
|
||||
"testhost_system_2026-01-01T02:00:00Z",
|
||||
"testhost_home_2026-01-01T03:00:00Z",
|
||||
"testhost_system_2026-01-01T04:00:00Z",
|
||||
}
|
||||
|
||||
v := setupPurgeTest(t, snapshotIDs)
|
||||
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
remaining := listRemainingSnapshots(t, v)
|
||||
|
||||
// Should keep the latest of each name
|
||||
assert.Len(t, remaining, 2, "should keep exactly 2 snapshots (one per name)")
|
||||
assert.Contains(t, remaining, "testhost_system_2026-01-01T04:00:00Z", "should keep latest system")
|
||||
assert.Contains(t, remaining, "testhost_home_2026-01-01T03:00:00Z", "should keep latest home")
|
||||
}
|
||||
|
||||
func TestPurgeKeepLatest_SingleName(t *testing.T) {
|
||||
// All snapshots have the same name — keep-latest should keep exactly one.
|
||||
snapshotIDs := []string{
|
||||
"testhost_home_2026-01-01T00:00:00Z",
|
||||
"testhost_home_2026-01-01T01:00:00Z",
|
||||
"testhost_home_2026-01-01T02:00:00Z",
|
||||
}
|
||||
|
||||
v := setupPurgeTest(t, snapshotIDs)
|
||||
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
remaining := listRemainingSnapshots(t, v)
|
||||
assert.Len(t, remaining, 1)
|
||||
assert.Contains(t, remaining, "testhost_home_2026-01-01T02:00:00Z", "should keep the newest")
|
||||
}
|
||||
|
||||
func TestPurgeKeepLatest_WithNameFilter(t *testing.T) {
|
||||
// Use --name to filter purge to only "home" snapshots.
|
||||
// "system" snapshots should be untouched.
|
||||
snapshotIDs := []string{
|
||||
"testhost_system_2026-01-01T00:00:00Z",
|
||||
"testhost_home_2026-01-01T01:00:00Z",
|
||||
"testhost_system_2026-01-01T02:00:00Z",
|
||||
"testhost_home_2026-01-01T03:00:00Z",
|
||||
"testhost_home_2026-01-01T04:00:00Z",
|
||||
}
|
||||
|
||||
v := setupPurgeTest(t, snapshotIDs)
|
||||
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
Name: "home",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
remaining := listRemainingSnapshots(t, v)
|
||||
|
||||
// 2 system snapshots untouched + 1 latest home = 3
|
||||
assert.Len(t, remaining, 3)
|
||||
assert.Contains(t, remaining, "testhost_system_2026-01-01T00:00:00Z")
|
||||
assert.Contains(t, remaining, "testhost_system_2026-01-01T02:00:00Z")
|
||||
assert.Contains(t, remaining, "testhost_home_2026-01-01T04:00:00Z")
|
||||
}
|
||||
|
||||
func TestPurgeKeepLatest_NoSnapshots(t *testing.T) {
|
||||
v := setupPurgeTest(t, nil)
|
||||
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPurgeKeepLatest_NameFilterNoMatch(t *testing.T) {
|
||||
snapshotIDs := []string{
|
||||
"testhost_system_2026-01-01T00:00:00Z",
|
||||
"testhost_system_2026-01-01T01:00:00Z",
|
||||
}
|
||||
|
||||
v := setupPurgeTest(t, snapshotIDs)
|
||||
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
Name: "nonexistent",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// All snapshots should remain — the name filter matched nothing
|
||||
remaining := listRemainingSnapshots(t, v)
|
||||
assert.Len(t, remaining, 2)
|
||||
}
|
||||
|
||||
func TestPurgeOlderThan_WithNameFilter(t *testing.T) {
|
||||
// Snapshots with different names and timestamps.
|
||||
// --older-than should apply only to the named subset when --name is used.
|
||||
snapshotIDs := []string{
|
||||
"testhost_system_2020-01-01T00:00:00Z",
|
||||
"testhost_home_2020-01-01T00:00:00Z",
|
||||
"testhost_system_2026-01-01T00:00:00Z",
|
||||
"testhost_home_2026-01-01T00:00:00Z",
|
||||
}
|
||||
|
||||
v := setupPurgeTest(t, snapshotIDs)
|
||||
|
||||
// Purge only "home" snapshots older than 365 days
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
OlderThan: "365d",
|
||||
Force: true,
|
||||
Name: "home",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
remaining := listRemainingSnapshots(t, v)
|
||||
|
||||
// Old system stays (not filtered by name), old home deleted, recent ones stay
|
||||
assert.Len(t, remaining, 3)
|
||||
assert.Contains(t, remaining, "testhost_system_2020-01-01T00:00:00Z")
|
||||
assert.Contains(t, remaining, "testhost_system_2026-01-01T00:00:00Z")
|
||||
assert.Contains(t, remaining, "testhost_home_2026-01-01T00:00:00Z")
|
||||
}
|
||||
|
||||
func TestPurgeKeepLatest_LegacyNoNameSnapshots(t *testing.T) {
|
||||
// Legacy snapshots without a name component (hostname_timestamp).
|
||||
// Should be grouped together under empty-name.
|
||||
snapshotIDs := []string{
|
||||
"testhost_2026-01-01T00:00:00Z",
|
||||
"testhost_2026-01-01T01:00:00Z",
|
||||
"testhost_2026-01-01T02:00:00Z",
|
||||
}
|
||||
|
||||
v := setupPurgeTest(t, snapshotIDs)
|
||||
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
remaining := listRemainingSnapshots(t, v)
|
||||
assert.Len(t, remaining, 1)
|
||||
assert.Contains(t, remaining, "testhost_2026-01-01T02:00:00Z")
|
||||
}
|
||||
|
||||
func TestPurgeKeepLatest_MixedNamedAndLegacy(t *testing.T) {
|
||||
// Mix of named snapshots and legacy ones (no name).
|
||||
snapshotIDs := []string{
|
||||
"testhost_2026-01-01T00:00:00Z",
|
||||
"testhost_home_2026-01-01T01:00:00Z",
|
||||
"testhost_2026-01-01T02:00:00Z",
|
||||
"testhost_home_2026-01-01T03:00:00Z",
|
||||
}
|
||||
|
||||
v := setupPurgeTest(t, snapshotIDs)
|
||||
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
remaining := listRemainingSnapshots(t, v)
|
||||
|
||||
// Should keep latest of each group: latest legacy + latest home
|
||||
assert.Len(t, remaining, 2)
|
||||
assert.Contains(t, remaining, "testhost_2026-01-01T02:00:00Z")
|
||||
assert.Contains(t, remaining, "testhost_home_2026-01-01T03:00:00Z")
|
||||
}
|
||||
|
||||
func TestPurgeKeepLatest_ThreeNames(t *testing.T) {
|
||||
// Three different snapshot names with multiple snapshots each.
|
||||
snapshotIDs := []string{
|
||||
"testhost_home_2026-01-01T00:00:00Z",
|
||||
"testhost_system_2026-01-01T01:00:00Z",
|
||||
"testhost_media_2026-01-01T02:00:00Z",
|
||||
"testhost_home_2026-01-01T03:00:00Z",
|
||||
"testhost_system_2026-01-01T04:00:00Z",
|
||||
"testhost_media_2026-01-01T05:00:00Z",
|
||||
"testhost_home_2026-01-01T06:00:00Z",
|
||||
}
|
||||
|
||||
v := setupPurgeTest(t, snapshotIDs)
|
||||
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
remaining := listRemainingSnapshots(t, v)
|
||||
assert.Len(t, remaining, 3, "should keep one per name")
|
||||
assert.Contains(t, remaining, "testhost_home_2026-01-01T06:00:00Z")
|
||||
assert.Contains(t, remaining, "testhost_system_2026-01-01T04:00:00Z")
|
||||
assert.Contains(t, remaining, "testhost_media_2026-01-01T05:00:00Z")
|
||||
}
|
||||
@@ -22,6 +22,13 @@ import (
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
const (
|
||||
// progressBarWidth is the character width of the progress bar display.
|
||||
progressBarWidth = 40
|
||||
// progressBarThrottle is the minimum interval between progress bar redraws.
|
||||
progressBarThrottle = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
// RestoreOptions contains options for the restore operation
|
||||
type RestoreOptions struct {
|
||||
SnapshotID string
|
||||
@@ -109,7 +116,20 @@ func (v *Vaultik) Restore(opts *RestoreOptions) error {
|
||||
|
||||
// Step 5: Restore files
|
||||
result := &RestoreResult{}
|
||||
blobCache := make(map[string][]byte) // Cache downloaded and decrypted blobs
|
||||
blobCache, err := newBlobDiskCache(4 * v.Config.BlobSizeLimit.Int64())
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating blob cache: %w", err)
|
||||
}
|
||||
defer func() { _ = blobCache.Close() }()
|
||||
|
||||
// Calculate total bytes for progress bar
|
||||
var totalBytesExpected int64
|
||||
for _, file := range files {
|
||||
totalBytesExpected += file.Size
|
||||
}
|
||||
|
||||
// Create progress bar if output is a terminal
|
||||
bar := v.newProgressBar("Restoring", totalBytesExpected)
|
||||
|
||||
for i, file := range files {
|
||||
if v.ctx.Err() != nil {
|
||||
@@ -118,11 +138,21 @@ func (v *Vaultik) Restore(opts *RestoreOptions) error {
|
||||
|
||||
if err := v.restoreFile(v.ctx, repos, file, opts.TargetDir, identity, chunkToBlobMap, blobCache, result); err != nil {
|
||||
log.Error("Failed to restore file", "path", file.Path, "error", err)
|
||||
// Continue with other files
|
||||
result.FilesFailed++
|
||||
result.FailedFiles = append(result.FailedFiles, file.Path.String())
|
||||
// Update progress bar even on failure
|
||||
if bar != nil {
|
||||
_ = bar.Add64(file.Size)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Progress logging
|
||||
// Update progress bar
|
||||
if bar != nil {
|
||||
_ = bar.Add64(file.Size)
|
||||
}
|
||||
|
||||
// Progress logging (for non-terminal or structured logs)
|
||||
if (i+1)%100 == 0 || i+1 == len(files) {
|
||||
log.Info("Restore progress",
|
||||
"files", fmt.Sprintf("%d/%d", i+1, len(files)),
|
||||
@@ -131,6 +161,10 @@ func (v *Vaultik) Restore(opts *RestoreOptions) error {
|
||||
}
|
||||
}
|
||||
|
||||
if bar != nil {
|
||||
_ = bar.Finish()
|
||||
}
|
||||
|
||||
result.Duration = time.Since(startTime)
|
||||
|
||||
log.Info("Restore complete",
|
||||
@@ -141,12 +175,19 @@ func (v *Vaultik) Restore(opts *RestoreOptions) error {
|
||||
"duration", result.Duration,
|
||||
)
|
||||
|
||||
_, _ = fmt.Fprintf(v.Stdout, "Restored %d files (%s) in %s\n",
|
||||
v.printfStdout("Restored %d files (%s) in %s\n",
|
||||
result.FilesRestored,
|
||||
humanize.Bytes(uint64(result.BytesRestored)),
|
||||
result.Duration.Round(time.Second),
|
||||
)
|
||||
|
||||
if result.FilesFailed > 0 {
|
||||
_, _ = fmt.Fprintf(v.Stdout, "\nWARNING: %d file(s) failed to restore:\n", result.FilesFailed)
|
||||
for _, path := range result.FailedFiles {
|
||||
_, _ = fmt.Fprintf(v.Stdout, " - %s\n", path)
|
||||
}
|
||||
}
|
||||
|
||||
// Run verification if requested
|
||||
if opts.Verify {
|
||||
if err := v.verifyRestoredFiles(v.ctx, repos, files, opts.TargetDir, result); err != nil {
|
||||
@@ -154,19 +195,23 @@ func (v *Vaultik) Restore(opts *RestoreOptions) error {
|
||||
}
|
||||
|
||||
if result.FilesFailed > 0 {
|
||||
_, _ = fmt.Fprintf(v.Stdout, "\nVerification FAILED: %d files did not match expected checksums\n", result.FilesFailed)
|
||||
v.printfStdout("\nVerification FAILED: %d files did not match expected checksums\n", result.FilesFailed)
|
||||
for _, path := range result.FailedFiles {
|
||||
_, _ = fmt.Fprintf(v.Stdout, " - %s\n", path)
|
||||
v.printfStdout(" - %s\n", path)
|
||||
}
|
||||
return fmt.Errorf("%d files failed verification", result.FilesFailed)
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(v.Stdout, "Verified %d files (%s)\n",
|
||||
v.printfStdout("Verified %d files (%s)\n",
|
||||
result.FilesVerified,
|
||||
humanize.Bytes(uint64(result.BytesVerified)),
|
||||
)
|
||||
}
|
||||
|
||||
if result.FilesFailed > 0 {
|
||||
return fmt.Errorf("%d file(s) failed to restore", result.FilesFailed)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -299,7 +344,7 @@ func (v *Vaultik) restoreFile(
|
||||
targetDir string,
|
||||
identity age.Identity,
|
||||
chunkToBlobMap map[string]*database.BlobChunk,
|
||||
blobCache map[string][]byte,
|
||||
blobCache *blobDiskCache,
|
||||
result *RestoreResult,
|
||||
) error {
|
||||
// Calculate target path - use full original path under target directory
|
||||
@@ -383,7 +428,7 @@ func (v *Vaultik) restoreRegularFile(
|
||||
targetPath string,
|
||||
identity age.Identity,
|
||||
chunkToBlobMap map[string]*database.BlobChunk,
|
||||
blobCache map[string][]byte,
|
||||
blobCache *blobDiskCache,
|
||||
result *RestoreResult,
|
||||
) error {
|
||||
// Get file chunks in order
|
||||
@@ -417,13 +462,15 @@ func (v *Vaultik) restoreRegularFile(
|
||||
|
||||
// Download and decrypt blob if not cached
|
||||
blobHashStr := blob.Hash.String()
|
||||
blobData, ok := blobCache[blobHashStr]
|
||||
blobData, ok := blobCache.Get(blobHashStr)
|
||||
if !ok {
|
||||
blobData, err = v.downloadBlob(ctx, blobHashStr, blob.CompressedSize, identity)
|
||||
if err != nil {
|
||||
return fmt.Errorf("downloading blob %s: %w", blobHashStr[:16], err)
|
||||
}
|
||||
blobCache[blobHashStr] = blobData
|
||||
if putErr := blobCache.Put(blobHashStr, blobData); putErr != nil {
|
||||
log.Debug("Failed to cache blob on disk", "hash", blobHashStr[:16], "error", putErr)
|
||||
}
|
||||
result.BlobsDownloaded++
|
||||
result.BytesDownloaded += blob.CompressedSize
|
||||
}
|
||||
@@ -511,28 +558,13 @@ func (v *Vaultik) verifyRestoredFiles(
|
||||
"files", len(regularFiles),
|
||||
"bytes", humanize.Bytes(uint64(totalBytes)),
|
||||
)
|
||||
_, _ = fmt.Fprintf(v.Stdout, "\nVerifying %d files (%s)...\n",
|
||||
v.printfStdout("\nVerifying %d files (%s)...\n",
|
||||
len(regularFiles),
|
||||
humanize.Bytes(uint64(totalBytes)),
|
||||
)
|
||||
|
||||
// Create progress bar if output is a terminal
|
||||
var bar *progressbar.ProgressBar
|
||||
if isTerminal() {
|
||||
bar = progressbar.NewOptions64(
|
||||
totalBytes,
|
||||
progressbar.OptionSetDescription("Verifying"),
|
||||
progressbar.OptionSetWriter(os.Stderr),
|
||||
progressbar.OptionShowBytes(true),
|
||||
progressbar.OptionShowCount(),
|
||||
progressbar.OptionSetWidth(40),
|
||||
progressbar.OptionThrottle(100*time.Millisecond),
|
||||
progressbar.OptionOnCompletion(func() {
|
||||
fmt.Fprint(os.Stderr, "\n")
|
||||
}),
|
||||
progressbar.OptionSetRenderBlankState(true),
|
||||
)
|
||||
}
|
||||
bar := v.newProgressBar("Verifying", totalBytes)
|
||||
|
||||
// Verify each file
|
||||
for _, file := range regularFiles {
|
||||
@@ -626,7 +658,37 @@ func (v *Vaultik) verifyFile(
|
||||
return bytesVerified, nil
|
||||
}
|
||||
|
||||
// isTerminal returns true if stdout is a terminal
|
||||
func isTerminal() bool {
|
||||
return term.IsTerminal(int(os.Stdout.Fd()))
|
||||
// newProgressBar creates a terminal-aware progress bar with standard options.
|
||||
// It returns nil if stdout is not a terminal.
|
||||
func (v *Vaultik) newProgressBar(description string, total int64) *progressbar.ProgressBar {
|
||||
if !v.isTerminal() {
|
||||
return nil
|
||||
}
|
||||
return progressbar.NewOptions64(
|
||||
total,
|
||||
progressbar.OptionSetDescription(description),
|
||||
progressbar.OptionSetWriter(v.Stderr),
|
||||
progressbar.OptionShowBytes(true),
|
||||
progressbar.OptionShowCount(),
|
||||
progressbar.OptionSetWidth(progressBarWidth),
|
||||
progressbar.OptionThrottle(progressBarThrottle),
|
||||
progressbar.OptionOnCompletion(func() {
|
||||
v.printfStderr("\n")
|
||||
}),
|
||||
progressbar.OptionSetRenderBlankState(true),
|
||||
)
|
||||
}
|
||||
|
||||
// isTerminal returns true if stdout is a terminal.
|
||||
// It checks whether v.Stdout implements Fd() (i.e. is an *os.File),
|
||||
// and falls back to false for non-file writers (e.g. in tests).
|
||||
func (v *Vaultik) isTerminal() bool {
|
||||
type fder interface {
|
||||
Fd() uintptr
|
||||
}
|
||||
f, ok := v.Stdout.(fder)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return term.IsTerminal(int(f.Fd()))
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
@@ -90,6 +90,24 @@ func (v *Vaultik) CreateSnapshot(opts *SnapshotCreateOptions) error {
|
||||
v.printfStdout("\nAll %d snapshots completed in %s\n", len(snapshotNames), time.Since(overallStartTime).Round(time.Second))
|
||||
}
|
||||
|
||||
// Prune old snapshots and unreferenced blobs if --prune was specified
|
||||
if opts.Prune {
|
||||
log.Info("Pruning enabled - deleting old snapshots and unreferenced blobs")
|
||||
v.printlnStdout("\nPruning old snapshots (keeping latest)...")
|
||||
|
||||
if err := v.PurgeSnapshots(true, "", true); err != nil {
|
||||
return fmt.Errorf("prune: purging old snapshots: %w", err)
|
||||
}
|
||||
|
||||
v.printlnStdout("Pruning unreferenced blobs...")
|
||||
|
||||
if err := v.PruneBlobs(&PruneOptions{Force: true}); err != nil {
|
||||
return fmt.Errorf("prune: removing unreferenced blobs: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Pruning complete")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -306,11 +324,6 @@ func (v *Vaultik) createNamedSnapshot(opts *SnapshotCreateOptions, hostname, sna
|
||||
}
|
||||
v.printfStdout("Duration: %s\n", formatDuration(snapshotDuration))
|
||||
|
||||
if opts.Prune {
|
||||
log.Info("Pruning enabled - will delete old snapshots after snapshot")
|
||||
// TODO: Implement pruning
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -475,8 +488,30 @@ func (v *Vaultik) ListSnapshots(jsonOutput bool) error {
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
// PurgeSnapshots removes old snapshots based on criteria
|
||||
// SnapshotPurgeOptions contains options for the snapshot purge command
|
||||
type SnapshotPurgeOptions struct {
|
||||
KeepLatest bool
|
||||
OlderThan string
|
||||
Force bool
|
||||
Name string // Filter purge to a specific snapshot name
|
||||
}
|
||||
|
||||
// PurgeSnapshots removes old snapshots based on criteria.
|
||||
// When keepLatest is true, retention is applied per snapshot name — the latest
|
||||
// snapshot for each distinct name is kept.
|
||||
func (v *Vaultik) PurgeSnapshots(keepLatest bool, olderThan string, force bool) error {
|
||||
return v.PurgeSnapshotsWithOptions(&SnapshotPurgeOptions{
|
||||
KeepLatest: keepLatest,
|
||||
OlderThan: olderThan,
|
||||
Force: force,
|
||||
})
|
||||
}
|
||||
|
||||
// PurgeSnapshotsWithOptions removes old snapshots based on criteria with full options.
|
||||
// When KeepLatest is true, retention is applied per snapshot name — the latest
|
||||
// snapshot for each distinct name is kept. If Name is non-empty, only snapshots
|
||||
// matching that name are considered for purge.
|
||||
func (v *Vaultik) PurgeSnapshotsWithOptions(opts *SnapshotPurgeOptions) error {
|
||||
// Sync with remote first
|
||||
if err := v.syncWithRemote(); err != nil {
|
||||
return fmt.Errorf("syncing with remote: %w", err)
|
||||
@@ -500,6 +535,17 @@ func (v *Vaultik) PurgeSnapshots(keepLatest bool, olderThan string, force bool)
|
||||
}
|
||||
}
|
||||
|
||||
// If --name is specified, filter to only snapshots matching that name
|
||||
if opts.Name != "" {
|
||||
filtered := make([]SnapshotInfo, 0, len(snapshots))
|
||||
for _, snap := range snapshots {
|
||||
if parseSnapshotName(snap.ID.String()) == opts.Name {
|
||||
filtered = append(filtered, snap)
|
||||
}
|
||||
}
|
||||
snapshots = filtered
|
||||
}
|
||||
|
||||
// Sort by timestamp (newest first)
|
||||
sort.Slice(snapshots, func(i, j int) bool {
|
||||
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
||||
@@ -507,14 +553,23 @@ func (v *Vaultik) PurgeSnapshots(keepLatest bool, olderThan string, force bool)
|
||||
|
||||
var toDelete []SnapshotInfo
|
||||
|
||||
if keepLatest {
|
||||
// Keep only the most recent snapshot
|
||||
if len(snapshots) > 1 {
|
||||
toDelete = snapshots[1:]
|
||||
if opts.KeepLatest {
|
||||
// Keep the latest snapshot per snapshot name
|
||||
// Group snapshots by name, then mark all but the newest in each group
|
||||
latestByName := make(map[string]bool) // tracks whether we've seen the latest for each name
|
||||
for _, snap := range snapshots {
|
||||
name := parseSnapshotName(snap.ID.String())
|
||||
if latestByName[name] {
|
||||
// Already kept the latest for this name — delete this one
|
||||
toDelete = append(toDelete, snap)
|
||||
} else {
|
||||
// This is the latest (sorted newest-first) — keep it
|
||||
latestByName[name] = true
|
||||
}
|
||||
} else if olderThan != "" {
|
||||
}
|
||||
} else if opts.OlderThan != "" {
|
||||
// Parse duration
|
||||
duration, err := parseDuration(olderThan)
|
||||
duration, err := parseDuration(opts.OlderThan)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid duration: %w", err)
|
||||
}
|
||||
@@ -542,10 +597,10 @@ func (v *Vaultik) PurgeSnapshots(keepLatest bool, olderThan string, force bool)
|
||||
}
|
||||
|
||||
// Confirm unless --force is used
|
||||
if !force {
|
||||
if !opts.Force {
|
||||
v.printfStdout("\nDelete %d snapshot(s)? [y/N] ", len(toDelete))
|
||||
var confirm string
|
||||
if _, err := fmt.Scanln(&confirm); err != nil {
|
||||
if _, err := v.scanStdin(&confirm); err != nil {
|
||||
// Treat EOF or error as "no"
|
||||
v.printlnStdout("Cancelled")
|
||||
return nil
|
||||
@@ -851,7 +906,7 @@ func (v *Vaultik) RemoveSnapshot(snapshotID string, opts *RemoveOptions) (*Remov
|
||||
v.printfStdout("Remove snapshot '%s' from local database? [y/N] ", snapshotID)
|
||||
}
|
||||
var confirm string
|
||||
if err := v.scanlnStdin(&confirm); err != nil {
|
||||
if _, err := v.scanStdin(&confirm); err != nil {
|
||||
v.printlnStdout("Cancelled")
|
||||
return result, nil
|
||||
}
|
||||
@@ -1004,16 +1059,16 @@ func (v *Vaultik) deleteSnapshotFromLocalDB(snapshotID string) error {
|
||||
|
||||
// Delete related records first to avoid foreign key constraints
|
||||
if err := v.Repositories.Snapshots.DeleteSnapshotFiles(v.ctx, snapshotID); err != nil {
|
||||
log.Error("Failed to delete snapshot files", "snapshot_id", snapshotID, "error", err)
|
||||
return fmt.Errorf("deleting snapshot files for %s: %w", snapshotID, err)
|
||||
}
|
||||
if err := v.Repositories.Snapshots.DeleteSnapshotBlobs(v.ctx, snapshotID); err != nil {
|
||||
log.Error("Failed to delete snapshot blobs", "snapshot_id", snapshotID, "error", err)
|
||||
return fmt.Errorf("deleting snapshot blobs for %s: %w", snapshotID, err)
|
||||
}
|
||||
if err := v.Repositories.Snapshots.DeleteSnapshotUploads(v.ctx, snapshotID); err != nil {
|
||||
log.Error("Failed to delete snapshot uploads", "snapshot_id", snapshotID, "error", err)
|
||||
return fmt.Errorf("deleting snapshot uploads for %s: %w", snapshotID, err)
|
||||
}
|
||||
if err := v.Repositories.Snapshots.Delete(v.ctx, snapshotID); err != nil {
|
||||
log.Error("Failed to delete snapshot record", "snapshot_id", snapshotID, "error", err)
|
||||
return fmt.Errorf("deleting snapshot record %s: %w", snapshotID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
23
internal/vaultik/snapshot_prune_test.go
Normal file
23
internal/vaultik/snapshot_prune_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package vaultik
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestSnapshotCreateOptions_PruneFlag verifies the Prune field exists on
|
||||
// SnapshotCreateOptions and can be set.
|
||||
func TestSnapshotCreateOptions_PruneFlag(t *testing.T) {
|
||||
opts := &SnapshotCreateOptions{
|
||||
Prune: true,
|
||||
}
|
||||
if !opts.Prune {
|
||||
t.Error("Expected Prune to be true")
|
||||
}
|
||||
|
||||
opts2 := &SnapshotCreateOptions{
|
||||
Prune: false,
|
||||
}
|
||||
if opts2.Prune {
|
||||
t.Error("Expected Prune to be false")
|
||||
}
|
||||
}
|
||||
@@ -129,12 +129,26 @@ func (v *Vaultik) GetFilesystem() afero.Fs {
|
||||
return v.Fs
|
||||
}
|
||||
|
||||
// Outputf writes formatted output to stdout for user-facing messages.
|
||||
// This should be used for all non-log user output.
|
||||
func (v *Vaultik) Outputf(format string, args ...any) {
|
||||
// printfStdout writes formatted output to stdout.
|
||||
func (v *Vaultik) printfStdout(format string, args ...any) {
|
||||
_, _ = fmt.Fprintf(v.Stdout, format, args...)
|
||||
}
|
||||
|
||||
// printlnStdout writes a line to stdout.
|
||||
func (v *Vaultik) printlnStdout(args ...any) {
|
||||
_, _ = fmt.Fprintln(v.Stdout, args...)
|
||||
}
|
||||
|
||||
// printfStderr writes formatted output to stderr.
|
||||
func (v *Vaultik) printfStderr(format string, args ...any) {
|
||||
_, _ = fmt.Fprintf(v.Stderr, format, args...)
|
||||
}
|
||||
|
||||
// scanStdin reads a line of input from stdin.
|
||||
func (v *Vaultik) scanStdin(a ...any) (int, error) {
|
||||
return fmt.Fscanln(v.Stdin, a...)
|
||||
}
|
||||
|
||||
// TestVaultik wraps a Vaultik with captured stdout/stderr for testing
|
||||
type TestVaultik struct {
|
||||
*Vaultik
|
||||
|
||||
@@ -58,14 +58,14 @@ func (v *Vaultik) RunDeepVerify(snapshotID string, opts *VerifyOptions) error {
|
||||
)
|
||||
|
||||
if !opts.JSON {
|
||||
v.Outputf("Deep verification of snapshot: %s\n\n", snapshotID)
|
||||
v.printfStdout("Deep verification of snapshot: %s\n\n", snapshotID)
|
||||
}
|
||||
|
||||
// Step 1: Download manifest
|
||||
manifestPath := fmt.Sprintf("metadata/%s/manifest.json.zst", snapshotID)
|
||||
log.Info("Downloading manifest", "path", manifestPath)
|
||||
if !opts.JSON {
|
||||
v.Outputf("Downloading manifest...\n")
|
||||
v.printfStdout("Downloading manifest...\n")
|
||||
}
|
||||
|
||||
manifestReader, err := v.Storage.Get(v.ctx, manifestPath)
|
||||
@@ -95,14 +95,14 @@ func (v *Vaultik) RunDeepVerify(snapshotID string, opts *VerifyOptions) error {
|
||||
"manifest_total_size", humanize.Bytes(uint64(manifest.TotalCompressedSize)),
|
||||
)
|
||||
if !opts.JSON {
|
||||
v.Outputf("Manifest loaded: %d blobs (%s)\n", manifest.BlobCount, humanize.Bytes(uint64(manifest.TotalCompressedSize)))
|
||||
v.printfStdout("Manifest loaded: %d blobs (%s)\n", manifest.BlobCount, humanize.Bytes(uint64(manifest.TotalCompressedSize)))
|
||||
}
|
||||
|
||||
// Step 2: Download and decrypt database (authoritative source)
|
||||
dbPath := fmt.Sprintf("metadata/%s/db.zst.age", snapshotID)
|
||||
log.Info("Downloading encrypted database", "path", dbPath)
|
||||
if !opts.JSON {
|
||||
v.Outputf("Downloading and decrypting database...\n")
|
||||
v.printfStdout("Downloading and decrypting database...\n")
|
||||
}
|
||||
|
||||
dbReader, err := v.Storage.Get(v.ctx, dbPath)
|
||||
@@ -155,8 +155,8 @@ func (v *Vaultik) RunDeepVerify(snapshotID string, opts *VerifyOptions) error {
|
||||
"db_total_size", humanize.Bytes(uint64(totalSize)),
|
||||
)
|
||||
if !opts.JSON {
|
||||
v.Outputf("Database loaded: %d blobs (%s)\n", len(dbBlobs), humanize.Bytes(uint64(totalSize)))
|
||||
v.Outputf("Verifying manifest against database...\n")
|
||||
v.printfStdout("Database loaded: %d blobs (%s)\n", len(dbBlobs), humanize.Bytes(uint64(totalSize)))
|
||||
v.printfStdout("Verifying manifest against database...\n")
|
||||
}
|
||||
|
||||
// Step 4: Verify manifest matches database
|
||||
@@ -171,8 +171,8 @@ func (v *Vaultik) RunDeepVerify(snapshotID string, opts *VerifyOptions) error {
|
||||
|
||||
// Step 5: Verify all blobs exist in S3 (using database as source)
|
||||
if !opts.JSON {
|
||||
v.Outputf("Manifest verified.\n")
|
||||
v.Outputf("Checking blob existence in remote storage...\n")
|
||||
v.printfStdout("Manifest verified.\n")
|
||||
v.printfStdout("Checking blob existence in remote storage...\n")
|
||||
}
|
||||
if err := v.verifyBlobExistenceFromDB(dbBlobs); err != nil {
|
||||
result.Status = "failed"
|
||||
@@ -185,8 +185,8 @@ func (v *Vaultik) RunDeepVerify(snapshotID string, opts *VerifyOptions) error {
|
||||
|
||||
// Step 6: Deep verification - download and verify blob contents
|
||||
if !opts.JSON {
|
||||
v.Outputf("All blobs exist.\n")
|
||||
v.Outputf("Downloading and verifying blob contents (%d blobs, %s)...\n", len(dbBlobs), humanize.Bytes(uint64(totalSize)))
|
||||
v.printfStdout("All blobs exist.\n")
|
||||
v.printfStdout("Downloading and verifying blob contents (%d blobs, %s)...\n", len(dbBlobs), humanize.Bytes(uint64(totalSize)))
|
||||
}
|
||||
if err := v.performDeepVerificationFromDB(dbBlobs, tempDB.DB, opts); err != nil {
|
||||
result.Status = "failed"
|
||||
@@ -211,10 +211,10 @@ func (v *Vaultik) RunDeepVerify(snapshotID string, opts *VerifyOptions) error {
|
||||
"blobs_verified", len(dbBlobs),
|
||||
)
|
||||
|
||||
v.Outputf("\n✓ Verification completed successfully\n")
|
||||
v.Outputf(" Snapshot: %s\n", snapshotID)
|
||||
v.Outputf(" Blobs verified: %d\n", len(dbBlobs))
|
||||
v.Outputf(" Total size: %s\n", humanize.Bytes(uint64(totalSize)))
|
||||
v.printfStdout("\n✓ Verification completed successfully\n")
|
||||
v.printfStdout(" Snapshot: %s\n", snapshotID)
|
||||
v.printfStdout(" Blobs verified: %d\n", len(dbBlobs))
|
||||
v.printfStdout(" Total size: %s\n", humanize.Bytes(uint64(totalSize)))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -569,7 +569,7 @@ func (v *Vaultik) performDeepVerificationFromDB(blobs []snapshot.BlobInfo, db *s
|
||||
)
|
||||
|
||||
if !opts.JSON {
|
||||
v.Outputf(" Verified %d/%d blobs (%d remaining) - %s/%s - elapsed %s, eta %s\n",
|
||||
v.printfStdout(" Verified %d/%d blobs (%d remaining) - %s/%s - elapsed %s, eta %s\n",
|
||||
i+1, len(blobs), remaining,
|
||||
humanize.Bytes(uint64(bytesProcessed)),
|
||||
humanize.Bytes(uint64(totalBytesExpected)),
|
||||
|
||||
Reference in New Issue
Block a user