Compare commits
9 Commits
bb334d8e72
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 1c72a37bc8 | |||
| 60b6746db9 | |||
| f28c8a73b7 | |||
| 1c0f5b8eb2 | |||
| 689109a2b8 | |||
| ac2f21a89d | |||
| 8c59f55096 | |||
| c24e7e6360 | |||
| 7a5943958d |
8
.dockerignore
Normal file
8
.dockerignore
Normal file
@@ -0,0 +1,8 @@
|
||||
.git
|
||||
.gitea
|
||||
*.md
|
||||
LICENSE
|
||||
vaultik
|
||||
coverage.out
|
||||
coverage.html
|
||||
.DS_Store
|
||||
14
.gitea/workflows/check.yml
Normal file
14
.gitea/workflows/check.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
name: check
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# actions/checkout v4, 2024-09-16
|
||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5
|
||||
- name: Build and check
|
||||
run: docker build .
|
||||
@@ -54,7 +54,7 @@ The database tracks five primary entities and their relationships:
|
||||
|
||||
#### File (`database.File`)
|
||||
Represents a file or directory in the backup system. Stores metadata needed for restoration:
|
||||
- Path, timestamps (mtime, ctime)
|
||||
- Path, mtime
|
||||
- Size, mode, ownership (uid, gid)
|
||||
- Symlink target (if applicable)
|
||||
|
||||
|
||||
61
Dockerfile
Normal file
61
Dockerfile
Normal file
@@ -0,0 +1,61 @@
|
||||
# Lint stage
|
||||
# golangci/golangci-lint:v2.11.3-alpine, 2026-03-17
|
||||
FROM golangci/golangci-lint:v2.11.3-alpine@sha256:b1c3de5862ad0a95b4e45a993b0f00415835d687e4f12c845c7493b86c13414e AS lint
|
||||
|
||||
RUN apk add --no-cache make build-base
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
# Copy go mod files first for better layer caching
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Run formatting check and linter
|
||||
RUN make fmt-check
|
||||
RUN make lint
|
||||
|
||||
# Build stage
|
||||
# golang:1.26.1-alpine, 2026-03-17
|
||||
FROM golang:1.26.1-alpine@sha256:2389ebfa5b7f43eeafbd6be0c3700cc46690ef842ad962f6c5bd6be49ed82039 AS builder
|
||||
|
||||
# Depend on lint stage passing
|
||||
COPY --from=lint /src/go.sum /dev/null
|
||||
|
||||
ARG VERSION=dev
|
||||
|
||||
# Install build dependencies for CGO (mattn/go-sqlite3) and sqlite3 CLI (tests)
|
||||
RUN apk add --no-cache make build-base sqlite
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
# Copy go mod files first for better layer caching
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Run tests
|
||||
RUN make test
|
||||
|
||||
# Build with CGO enabled (required for mattn/go-sqlite3)
|
||||
RUN CGO_ENABLED=1 go build -ldflags "-X 'git.eeqj.de/sneak/vaultik/internal/globals.Version=${VERSION}' -X 'git.eeqj.de/sneak/vaultik/internal/globals.Commit=$(git rev-parse HEAD 2>/dev/null || echo unknown)'" -o /vaultik ./cmd/vaultik
|
||||
|
||||
# Runtime stage
|
||||
# alpine:3.21, 2026-02-25
|
||||
FROM alpine:3.21@sha256:c3f8e73fdb79deaebaa2037150150191b9dcbfba68b4a46d70103204c53f4709
|
||||
|
||||
RUN apk add --no-cache ca-certificates sqlite
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /vaultik /usr/local/bin/vaultik
|
||||
|
||||
# Create non-root user
|
||||
RUN adduser -D -H -s /sbin/nologin vaultik
|
||||
|
||||
USER vaultik
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/vaultik"]
|
||||
40
Makefile
40
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: test fmt lint build clean all
|
||||
.PHONY: test fmt lint fmt-check check build clean all docker hooks
|
||||
|
||||
# Version number
|
||||
VERSION := 0.0.1
|
||||
@@ -14,21 +14,12 @@ LDFLAGS := -X 'git.eeqj.de/sneak/vaultik/internal/globals.Version=$(VERSION)' \
|
||||
all: vaultik
|
||||
|
||||
# Run tests
|
||||
test: lint fmt-check
|
||||
@echo "Running tests..."
|
||||
@if ! go test -v -timeout 10s ./... 2>&1; then \
|
||||
echo ""; \
|
||||
echo "TEST FAILURES DETECTED"; \
|
||||
echo "Run 'go test -v ./internal/database' to see database test details"; \
|
||||
exit 1; \
|
||||
fi
|
||||
test:
|
||||
go test -race -timeout 30s ./...
|
||||
|
||||
# Check if code is formatted
|
||||
# Check if code is formatted (read-only)
|
||||
fmt-check:
|
||||
@if [ -n "$$(go fmt ./...)" ]; then \
|
||||
echo "Error: Code is not formatted. Run 'make fmt' to fix."; \
|
||||
exit 1; \
|
||||
fi
|
||||
@test -z "$$(gofmt -l .)" || (echo "Files not formatted:" && gofmt -l . && exit 1)
|
||||
|
||||
# Format code
|
||||
fmt:
|
||||
@@ -36,7 +27,7 @@ fmt:
|
||||
|
||||
# Run linter
|
||||
lint:
|
||||
golangci-lint run
|
||||
golangci-lint run ./...
|
||||
|
||||
# Build binary
|
||||
vaultik: internal/*/*.go cmd/vaultik/*.go
|
||||
@@ -47,11 +38,6 @@ clean:
|
||||
rm -f vaultik
|
||||
go clean
|
||||
|
||||
# Install dependencies
|
||||
deps:
|
||||
go mod download
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
|
||||
# Run tests with coverage
|
||||
test-coverage:
|
||||
go test -v -coverprofile=coverage.out ./...
|
||||
@@ -67,3 +53,17 @@ local:
|
||||
|
||||
install: vaultik
|
||||
cp ./vaultik $(HOME)/bin/
|
||||
|
||||
# Run all checks (formatting, linting, tests) without modifying files
|
||||
check: fmt-check lint test
|
||||
|
||||
# Build Docker image
|
||||
docker:
|
||||
docker build -t vaultik .
|
||||
|
||||
# Install pre-commit hook
|
||||
hooks:
|
||||
@printf '#!/bin/sh\nset -e\n' > .git/hooks/pre-commit
|
||||
@printf 'go mod tidy\ngo fmt ./...\ngit diff --exit-code -- go.mod go.sum || { echo "go mod tidy changed files; please stage and retry"; exit 1; }\n' >> .git/hooks/pre-commit
|
||||
@printf 'make check\n' >> .git/hooks/pre-commit
|
||||
@chmod +x .git/hooks/pre-commit
|
||||
|
||||
@@ -17,7 +17,6 @@ Stores metadata about files in the filesystem being backed up.
|
||||
- `id` (TEXT PRIMARY KEY) - UUID for the file record
|
||||
- `path` (TEXT NOT NULL UNIQUE) - Absolute file path
|
||||
- `mtime` (INTEGER NOT NULL) - Modification time as Unix timestamp
|
||||
- `ctime` (INTEGER NOT NULL) - Change time as Unix timestamp
|
||||
- `size` (INTEGER NOT NULL) - File size in bytes
|
||||
- `mode` (INTEGER NOT NULL) - Unix file permissions and type
|
||||
- `uid` (INTEGER NOT NULL) - User ID of file owner
|
||||
|
||||
2
go.mod
2
go.mod
@@ -1,6 +1,6 @@
|
||||
module git.eeqj.de/sneak/vaultik
|
||||
|
||||
go 1.24.4
|
||||
go 1.26.1
|
||||
|
||||
require (
|
||||
filippo.io/age v1.2.1
|
||||
|
||||
@@ -29,7 +29,6 @@ func TestCascadeDeleteDebug(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/cascade-test.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
|
||||
@@ -22,7 +22,6 @@ func TestChunkFileRepository(t *testing.T) {
|
||||
file1 := &File{
|
||||
Path: "/file1.txt",
|
||||
MTime: testTime,
|
||||
CTime: testTime,
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -37,7 +36,6 @@ func TestChunkFileRepository(t *testing.T) {
|
||||
file2 := &File{
|
||||
Path: "/file2.txt",
|
||||
MTime: testTime,
|
||||
CTime: testTime,
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -138,9 +136,9 @@ func TestChunkFileRepositoryComplexDeduplication(t *testing.T) {
|
||||
|
||||
// Create test files
|
||||
testTime := time.Now().Truncate(time.Second)
|
||||
file1 := &File{Path: "/file1.txt", MTime: testTime, CTime: testTime, Size: 3072, Mode: 0644, UID: 1000, GID: 1000}
|
||||
file2 := &File{Path: "/file2.txt", MTime: testTime, CTime: testTime, Size: 3072, Mode: 0644, UID: 1000, GID: 1000}
|
||||
file3 := &File{Path: "/file3.txt", MTime: testTime, CTime: testTime, Size: 2048, Mode: 0644, UID: 1000, GID: 1000}
|
||||
file1 := &File{Path: "/file1.txt", MTime: testTime, Size: 3072, Mode: 0644, UID: 1000, GID: 1000}
|
||||
file2 := &File{Path: "/file2.txt", MTime: testTime, Size: 3072, Mode: 0644, UID: 1000, GID: 1000}
|
||||
file3 := &File{Path: "/file3.txt", MTime: testTime, Size: 2048, Mode: 0644, UID: 1000, GID: 1000}
|
||||
|
||||
if err := fileRepo.Create(ctx, nil, file1); err != nil {
|
||||
t.Fatalf("failed to create file1: %v", err)
|
||||
|
||||
@@ -22,7 +22,6 @@ func TestFileChunkRepository(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test/file.txt",
|
||||
MTime: testTime,
|
||||
CTime: testTime,
|
||||
Size: 3072,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -135,7 +134,6 @@ func TestFileChunkRepositoryMultipleFiles(t *testing.T) {
|
||||
file := &File{
|
||||
Path: types.FilePath(path),
|
||||
MTime: testTime,
|
||||
CTime: testTime,
|
||||
Size: 2048,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
|
||||
@@ -25,12 +25,11 @@ func (r *FileRepository) Create(ctx context.Context, tx *sql.Tx, file *File) err
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO files (id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
INSERT INTO files (id, path, source_path, mtime, size, mode, uid, gid, link_target)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(path) DO UPDATE SET
|
||||
source_path = excluded.source_path,
|
||||
mtime = excluded.mtime,
|
||||
ctime = excluded.ctime,
|
||||
size = excluded.size,
|
||||
mode = excluded.mode,
|
||||
uid = excluded.uid,
|
||||
@@ -42,10 +41,10 @@ func (r *FileRepository) Create(ctx context.Context, tx *sql.Tx, file *File) err
|
||||
var idStr string
|
||||
var err error
|
||||
if tx != nil {
|
||||
LogSQL("Execute", query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String())
|
||||
err = tx.QueryRowContext(ctx, query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String()).Scan(&idStr)
|
||||
LogSQL("Execute", query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String())
|
||||
err = tx.QueryRowContext(ctx, query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String()).Scan(&idStr)
|
||||
} else {
|
||||
err = r.db.QueryRowWithLog(ctx, query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String()).Scan(&idStr)
|
||||
err = r.db.QueryRowWithLog(ctx, query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String()).Scan(&idStr)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -63,7 +62,7 @@ func (r *FileRepository) Create(ctx context.Context, tx *sql.Tx, file *File) err
|
||||
|
||||
func (r *FileRepository) GetByPath(ctx context.Context, path string) (*File, error) {
|
||||
query := `
|
||||
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
||||
FROM files
|
||||
WHERE path = ?
|
||||
`
|
||||
@@ -82,7 +81,7 @@ func (r *FileRepository) GetByPath(ctx context.Context, path string) (*File, err
|
||||
// GetByID retrieves a file by its UUID
|
||||
func (r *FileRepository) GetByID(ctx context.Context, id types.FileID) (*File, error) {
|
||||
query := `
|
||||
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
||||
FROM files
|
||||
WHERE id = ?
|
||||
`
|
||||
@@ -100,7 +99,7 @@ func (r *FileRepository) GetByID(ctx context.Context, id types.FileID) (*File, e
|
||||
|
||||
func (r *FileRepository) GetByPathTx(ctx context.Context, tx *sql.Tx, path string) (*File, error) {
|
||||
query := `
|
||||
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
||||
FROM files
|
||||
WHERE path = ?
|
||||
`
|
||||
@@ -123,7 +122,7 @@ func (r *FileRepository) GetByPathTx(ctx context.Context, tx *sql.Tx, path strin
|
||||
func (r *FileRepository) scanFile(row *sql.Row) (*File, error) {
|
||||
var file File
|
||||
var idStr, pathStr, sourcePathStr string
|
||||
var mtimeUnix, ctimeUnix int64
|
||||
var mtimeUnix int64
|
||||
var linkTarget sql.NullString
|
||||
|
||||
err := row.Scan(
|
||||
@@ -131,7 +130,6 @@ func (r *FileRepository) scanFile(row *sql.Row) (*File, error) {
|
||||
&pathStr,
|
||||
&sourcePathStr,
|
||||
&mtimeUnix,
|
||||
&ctimeUnix,
|
||||
&file.Size,
|
||||
&file.Mode,
|
||||
&file.UID,
|
||||
@@ -149,7 +147,6 @@ func (r *FileRepository) scanFile(row *sql.Row) (*File, error) {
|
||||
file.Path = types.FilePath(pathStr)
|
||||
file.SourcePath = types.SourcePath(sourcePathStr)
|
||||
file.MTime = time.Unix(mtimeUnix, 0).UTC()
|
||||
file.CTime = time.Unix(ctimeUnix, 0).UTC()
|
||||
if linkTarget.Valid {
|
||||
file.LinkTarget = types.FilePath(linkTarget.String)
|
||||
}
|
||||
@@ -161,7 +158,7 @@ func (r *FileRepository) scanFile(row *sql.Row) (*File, error) {
|
||||
func (r *FileRepository) scanFileRows(rows *sql.Rows) (*File, error) {
|
||||
var file File
|
||||
var idStr, pathStr, sourcePathStr string
|
||||
var mtimeUnix, ctimeUnix int64
|
||||
var mtimeUnix int64
|
||||
var linkTarget sql.NullString
|
||||
|
||||
err := rows.Scan(
|
||||
@@ -169,7 +166,6 @@ func (r *FileRepository) scanFileRows(rows *sql.Rows) (*File, error) {
|
||||
&pathStr,
|
||||
&sourcePathStr,
|
||||
&mtimeUnix,
|
||||
&ctimeUnix,
|
||||
&file.Size,
|
||||
&file.Mode,
|
||||
&file.UID,
|
||||
@@ -187,7 +183,6 @@ func (r *FileRepository) scanFileRows(rows *sql.Rows) (*File, error) {
|
||||
file.Path = types.FilePath(pathStr)
|
||||
file.SourcePath = types.SourcePath(sourcePathStr)
|
||||
file.MTime = time.Unix(mtimeUnix, 0).UTC()
|
||||
file.CTime = time.Unix(ctimeUnix, 0).UTC()
|
||||
if linkTarget.Valid {
|
||||
file.LinkTarget = types.FilePath(linkTarget.String)
|
||||
}
|
||||
@@ -197,7 +192,7 @@ func (r *FileRepository) scanFileRows(rows *sql.Rows) (*File, error) {
|
||||
|
||||
func (r *FileRepository) ListModifiedSince(ctx context.Context, since time.Time) ([]*File, error) {
|
||||
query := `
|
||||
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
||||
FROM files
|
||||
WHERE mtime >= ?
|
||||
ORDER BY path
|
||||
@@ -258,7 +253,7 @@ func (r *FileRepository) DeleteByID(ctx context.Context, tx *sql.Tx, id types.Fi
|
||||
|
||||
func (r *FileRepository) ListByPrefix(ctx context.Context, prefix string) ([]*File, error) {
|
||||
query := `
|
||||
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
||||
FROM files
|
||||
WHERE path LIKE ? || '%'
|
||||
ORDER BY path
|
||||
@@ -285,7 +280,7 @@ func (r *FileRepository) ListByPrefix(ctx context.Context, prefix string) ([]*Fi
|
||||
// ListAll returns all files in the database
|
||||
func (r *FileRepository) ListAll(ctx context.Context) ([]*File, error) {
|
||||
query := `
|
||||
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
||||
FROM files
|
||||
ORDER BY path
|
||||
`
|
||||
@@ -315,7 +310,7 @@ func (r *FileRepository) CreateBatch(ctx context.Context, tx *sql.Tx, files []*F
|
||||
return nil
|
||||
}
|
||||
|
||||
// Each File has 10 values, so batch at 100 to be safe with SQLite's variable limit
|
||||
// Each File has 9 values, so batch at 100 to be safe with SQLite's variable limit
|
||||
const batchSize = 100
|
||||
|
||||
for i := 0; i < len(files); i += batchSize {
|
||||
@@ -325,19 +320,18 @@ func (r *FileRepository) CreateBatch(ctx context.Context, tx *sql.Tx, files []*F
|
||||
}
|
||||
batch := files[i:end]
|
||||
|
||||
query := `INSERT INTO files (id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target) VALUES `
|
||||
args := make([]interface{}, 0, len(batch)*10)
|
||||
query := `INSERT INTO files (id, path, source_path, mtime, size, mode, uid, gid, link_target) VALUES `
|
||||
args := make([]interface{}, 0, len(batch)*9)
|
||||
for j, f := range batch {
|
||||
if j > 0 {
|
||||
query += ", "
|
||||
}
|
||||
query += "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
|
||||
args = append(args, f.ID.String(), f.Path.String(), f.SourcePath.String(), f.MTime.Unix(), f.CTime.Unix(), f.Size, f.Mode, f.UID, f.GID, f.LinkTarget.String())
|
||||
query += "(?, ?, ?, ?, ?, ?, ?, ?, ?)"
|
||||
args = append(args, f.ID.String(), f.Path.String(), f.SourcePath.String(), f.MTime.Unix(), f.Size, f.Mode, f.UID, f.GID, f.LinkTarget.String())
|
||||
}
|
||||
query += ` ON CONFLICT(path) DO UPDATE SET
|
||||
source_path = excluded.source_path,
|
||||
mtime = excluded.mtime,
|
||||
ctime = excluded.ctime,
|
||||
size = excluded.size,
|
||||
mode = excluded.mode,
|
||||
uid = excluded.uid,
|
||||
|
||||
@@ -39,7 +39,6 @@ func TestFileRepository(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test/file.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -124,7 +123,6 @@ func TestFileRepositorySymlink(t *testing.T) {
|
||||
symlink := &File{
|
||||
Path: "/test/link",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 0,
|
||||
Mode: uint32(0777 | os.ModeSymlink),
|
||||
UID: 1000,
|
||||
@@ -161,7 +159,6 @@ func TestFileRepositoryTransaction(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test/tx_file.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
|
||||
@@ -17,7 +17,6 @@ type File struct {
|
||||
Path types.FilePath // Absolute path of the file
|
||||
SourcePath types.SourcePath // The source directory this file came from (for restore path stripping)
|
||||
MTime time.Time
|
||||
CTime time.Time
|
||||
Size int64
|
||||
Mode uint32
|
||||
UID uint32
|
||||
|
||||
@@ -23,7 +23,6 @@ func TestRepositoriesTransaction(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test/tx_file.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -146,7 +145,6 @@ func TestRepositoriesTransactionRollback(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test/rollback_file.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -202,7 +200,6 @@ func TestRepositoriesReadTransaction(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test/read_file.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -226,7 +223,6 @@ func TestRepositoriesReadTransaction(t *testing.T) {
|
||||
_ = repos.Files.Create(ctx, tx, &File{
|
||||
Path: "/test/should_fail.txt",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 0,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
|
||||
@@ -23,7 +23,6 @@ func TestFileRepositoryUUIDGeneration(t *testing.T) {
|
||||
{
|
||||
Path: "/file1.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -32,7 +31,6 @@ func TestFileRepositoryUUIDGeneration(t *testing.T) {
|
||||
{
|
||||
Path: "/file2.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 2048,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -72,7 +70,6 @@ func TestFileRepositoryGetByID(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -120,7 +117,6 @@ func TestOrphanedFileCleanup(t *testing.T) {
|
||||
file1 := &File{
|
||||
Path: "/orphaned.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -129,7 +125,6 @@ func TestOrphanedFileCleanup(t *testing.T) {
|
||||
file2 := &File{
|
||||
Path: "/referenced.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 2048,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -218,7 +213,6 @@ func TestOrphanedChunkCleanup(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -348,7 +342,6 @@ func TestFileChunkRepositoryWithUUIDs(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 3072,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -419,7 +412,6 @@ func TestChunkFileRepositoryWithUUIDs(t *testing.T) {
|
||||
file1 := &File{
|
||||
Path: "/file1.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -428,7 +420,6 @@ func TestChunkFileRepositoryWithUUIDs(t *testing.T) {
|
||||
file2 := &File{
|
||||
Path: "/file2.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -586,7 +577,6 @@ func TestComplexOrphanedDataScenario(t *testing.T) {
|
||||
files[i] = &File{
|
||||
Path: types.FilePath(fmt.Sprintf("/file%d.txt", i)),
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -678,7 +668,6 @@ func TestCascadeDelete(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/cascade-test.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -750,7 +739,6 @@ func TestTransactionIsolation(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/tx-test.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -812,7 +800,6 @@ func TestConcurrentOrphanedCleanup(t *testing.T) {
|
||||
file := &File{
|
||||
Path: types.FilePath(fmt.Sprintf("/concurrent-%d.txt", i)),
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
|
||||
@@ -18,7 +18,6 @@ func TestOrphanedFileCleanupDebug(t *testing.T) {
|
||||
file1 := &File{
|
||||
Path: "/orphaned.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -27,7 +26,6 @@ func TestOrphanedFileCleanupDebug(t *testing.T) {
|
||||
file2 := &File{
|
||||
Path: "/referenced.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 2048,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
|
||||
@@ -29,7 +29,6 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
||||
file: &File{
|
||||
Path: "",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -42,7 +41,6 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
||||
file: &File{
|
||||
Path: types.FilePath("/" + strings.Repeat("a", 4096)),
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -55,7 +53,6 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
||||
file: &File{
|
||||
Path: "/test/file with spaces and 特殊文字.txt",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -68,7 +65,6 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
||||
file: &File{
|
||||
Path: "/empty.txt",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 0,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -81,7 +77,6 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
||||
file: &File{
|
||||
Path: "/link",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 0,
|
||||
Mode: 0777 | 0120000, // symlink mode
|
||||
UID: 1000,
|
||||
@@ -123,7 +118,6 @@ func TestDuplicateHandling(t *testing.T) {
|
||||
file1 := &File{
|
||||
Path: "/duplicate.txt",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -132,7 +126,6 @@ func TestDuplicateHandling(t *testing.T) {
|
||||
file2 := &File{
|
||||
Path: "/duplicate.txt", // Same path
|
||||
MTime: time.Now().Add(time.Hour),
|
||||
CTime: time.Now().Add(time.Hour),
|
||||
Size: 2048,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -192,7 +185,6 @@ func TestDuplicateHandling(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test-dup-fc.txt",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -244,7 +236,6 @@ func TestNullHandling(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/regular.txt",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -349,7 +340,6 @@ func TestLargeDatasets(t *testing.T) {
|
||||
file := &File{
|
||||
Path: types.FilePath(fmt.Sprintf("/large/file%05d.txt", i)),
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: int64(i * 1024),
|
||||
Mode: 0644,
|
||||
UID: uint32(1000 + (i % 10)),
|
||||
@@ -474,7 +464,6 @@ func TestQueryInjection(t *testing.T) {
|
||||
file := &File{
|
||||
Path: types.FilePath(injection),
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -513,7 +502,6 @@ func TestTimezoneHandling(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/timezone-test.txt",
|
||||
MTime: nyTime,
|
||||
CTime: nyTime,
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
|
||||
@@ -8,7 +8,6 @@ CREATE TABLE IF NOT EXISTS files (
|
||||
path TEXT NOT NULL UNIQUE,
|
||||
source_path TEXT NOT NULL DEFAULT '', -- The source directory this file came from (for restore path stripping)
|
||||
mtime INTEGER NOT NULL,
|
||||
ctime INTEGER NOT NULL,
|
||||
size INTEGER NOT NULL,
|
||||
mode INTEGER NOT NULL,
|
||||
uid INTEGER NOT NULL,
|
||||
@@ -103,7 +102,7 @@ CREATE TABLE IF NOT EXISTS snapshot_files (
|
||||
file_id TEXT NOT NULL,
|
||||
PRIMARY KEY (snapshot_id, file_id),
|
||||
FOREIGN KEY (snapshot_id) REFERENCES snapshots(id) ON DELETE CASCADE,
|
||||
FOREIGN KEY (file_id) REFERENCES files(id)
|
||||
FOREIGN KEY (file_id) REFERENCES files(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Index for efficient file lookups (used in orphan detection)
|
||||
@@ -116,7 +115,7 @@ CREATE TABLE IF NOT EXISTS snapshot_blobs (
|
||||
blob_hash TEXT NOT NULL,
|
||||
PRIMARY KEY (snapshot_id, blob_id),
|
||||
FOREIGN KEY (snapshot_id) REFERENCES snapshots(id) ON DELETE CASCADE,
|
||||
FOREIGN KEY (blob_id) REFERENCES blobs(id)
|
||||
FOREIGN KEY (blob_id) REFERENCES blobs(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Index for efficient blob lookups (used in orphan detection)
|
||||
@@ -130,7 +129,7 @@ CREATE TABLE IF NOT EXISTS uploads (
|
||||
size INTEGER NOT NULL,
|
||||
duration_ms INTEGER NOT NULL,
|
||||
FOREIGN KEY (blob_hash) REFERENCES blobs(blob_hash),
|
||||
FOREIGN KEY (snapshot_id) REFERENCES snapshots(id)
|
||||
FOREIGN KEY (snapshot_id) REFERENCES snapshots(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Index for efficient snapshot lookups
|
||||
|
||||
@@ -345,9 +345,8 @@ func (b *BackupEngine) Backup(ctx context.Context, fsys fs.FS, root string) (str
|
||||
Size: info.Size(),
|
||||
Mode: uint32(info.Mode()),
|
||||
MTime: info.ModTime(),
|
||||
CTime: info.ModTime(), // Use mtime as ctime for test
|
||||
UID: 1000, // Default UID for test
|
||||
GID: 1000, // Default GID for test
|
||||
UID: 1000, // Default UID for test
|
||||
GID: 1000, // Default GID for test
|
||||
}
|
||||
err = b.repos.WithTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
|
||||
return b.repos.Files.Create(ctx, tx, file)
|
||||
|
||||
@@ -785,7 +785,6 @@ func (s *Scanner) checkFileInMemory(path string, info os.FileInfo, knownFiles ma
|
||||
Path: types.FilePath(path),
|
||||
SourcePath: types.SourcePath(s.currentSourcePath), // Store source directory for restore path stripping
|
||||
MTime: info.ModTime(),
|
||||
CTime: info.ModTime(), // afero doesn't provide ctime
|
||||
Size: info.Size(),
|
||||
Mode: uint32(info.Mode()),
|
||||
UID: uid,
|
||||
|
||||
93
internal/vaultik/blob_fetch.go
Normal file
93
internal/vaultik/blob_fetch.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package vaultik
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"filippo.io/age"
|
||||
"git.eeqj.de/sneak/vaultik/internal/blobgen"
|
||||
)
|
||||
|
||||
// hashVerifyReader wraps a blobgen.Reader and verifies the double-SHA-256 hash
|
||||
// of decrypted plaintext when Close is called. It reuses the hash that
|
||||
// blobgen.Reader already computes internally via its TeeReader, avoiding
|
||||
// redundant SHA-256 computation.
|
||||
type hashVerifyReader struct {
|
||||
reader *blobgen.Reader // underlying decrypted blob reader (has internal hasher)
|
||||
fetcher io.ReadCloser // raw fetched stream (closed on Close)
|
||||
blobHash string // expected double-SHA-256 hex
|
||||
done bool // EOF reached
|
||||
}
|
||||
|
||||
func (h *hashVerifyReader) Read(p []byte) (int, error) {
|
||||
n, err := h.reader.Read(p)
|
||||
if err == io.EOF {
|
||||
h.done = true
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close verifies the hash (if the stream was fully read) and closes underlying readers.
|
||||
func (h *hashVerifyReader) Close() error {
|
||||
readerErr := h.reader.Close()
|
||||
fetcherErr := h.fetcher.Close()
|
||||
|
||||
if h.done {
|
||||
firstHash := h.reader.Sum256()
|
||||
secondHasher := sha256.New()
|
||||
secondHasher.Write(firstHash)
|
||||
actualHashHex := hex.EncodeToString(secondHasher.Sum(nil))
|
||||
if actualHashHex != h.blobHash {
|
||||
return fmt.Errorf("blob hash mismatch: expected %s, got %s", h.blobHash[:16], actualHashHex[:16])
|
||||
}
|
||||
}
|
||||
|
||||
if readerErr != nil {
|
||||
return readerErr
|
||||
}
|
||||
return fetcherErr
|
||||
}
|
||||
|
||||
// FetchAndDecryptBlob downloads a blob, decrypts and decompresses it, and
|
||||
// returns a streaming reader that computes the double-SHA-256 hash on the fly.
|
||||
// The hash is verified when the returned reader is closed (after fully reading).
|
||||
// This avoids buffering the entire blob in memory.
|
||||
func (v *Vaultik) FetchAndDecryptBlob(ctx context.Context, blobHash string, expectedSize int64, identity age.Identity) (io.ReadCloser, error) {
|
||||
rc, _, err := v.FetchBlob(ctx, blobHash, expectedSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, err := blobgen.NewReader(rc, identity)
|
||||
if err != nil {
|
||||
_ = rc.Close()
|
||||
return nil, fmt.Errorf("creating blob reader: %w", err)
|
||||
}
|
||||
|
||||
return &hashVerifyReader{
|
||||
reader: reader,
|
||||
fetcher: rc,
|
||||
blobHash: blobHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FetchBlob downloads a blob and returns a reader for the encrypted data.
|
||||
func (v *Vaultik) FetchBlob(ctx context.Context, blobHash string, expectedSize int64) (io.ReadCloser, int64, error) {
|
||||
blobPath := fmt.Sprintf("blobs/%s/%s/%s", blobHash[:2], blobHash[2:4], blobHash)
|
||||
|
||||
rc, err := v.Storage.Get(ctx, blobPath)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("downloading blob %s: %w", blobHash[:16], err)
|
||||
}
|
||||
|
||||
info, err := v.Storage.Stat(ctx, blobPath)
|
||||
if err != nil {
|
||||
_ = rc.Close()
|
||||
return nil, 0, fmt.Errorf("stat blob %s: %w", blobHash[:16], err)
|
||||
}
|
||||
|
||||
return rc, info.Size, nil
|
||||
}
|
||||
100
internal/vaultik/blob_fetch_hash_test.go
Normal file
100
internal/vaultik/blob_fetch_hash_test.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package vaultik_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"filippo.io/age"
|
||||
"git.eeqj.de/sneak/vaultik/internal/blobgen"
|
||||
"git.eeqj.de/sneak/vaultik/internal/vaultik"
|
||||
)
|
||||
|
||||
// TestFetchAndDecryptBlobVerifiesHash verifies that FetchAndDecryptBlob checks
|
||||
// the double-SHA-256 hash of the decrypted plaintext against the expected blob hash.
|
||||
func TestFetchAndDecryptBlobVerifiesHash(t *testing.T) {
|
||||
identity, err := age.GenerateX25519Identity()
|
||||
if err != nil {
|
||||
t.Fatalf("generating identity: %v", err)
|
||||
}
|
||||
|
||||
// Create test data and encrypt it using blobgen.Writer
|
||||
plaintext := []byte("hello world test data for blob hash verification")
|
||||
var encBuf bytes.Buffer
|
||||
writer, err := blobgen.NewWriter(&encBuf, 1, []string{identity.Recipient().String()})
|
||||
if err != nil {
|
||||
t.Fatalf("creating blobgen writer: %v", err)
|
||||
}
|
||||
if _, err := writer.Write(plaintext); err != nil {
|
||||
t.Fatalf("writing plaintext: %v", err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatalf("closing writer: %v", err)
|
||||
}
|
||||
encryptedData := encBuf.Bytes()
|
||||
|
||||
// Compute correct double-SHA-256 hash of the plaintext (matches blobgen.Writer.Sum256)
|
||||
firstHash := sha256.Sum256(plaintext)
|
||||
secondHash := sha256.Sum256(firstHash[:])
|
||||
correctHash := hex.EncodeToString(secondHash[:])
|
||||
|
||||
// Verify our hash matches what blobgen.Writer produces
|
||||
writerHash := hex.EncodeToString(writer.Sum256())
|
||||
if correctHash != writerHash {
|
||||
t.Fatalf("hash computation mismatch: manual=%s, writer=%s", correctHash, writerHash)
|
||||
}
|
||||
|
||||
// Set up mock storage with the blob at the correct path
|
||||
mockStorage := NewMockStorer()
|
||||
blobPath := "blobs/" + correctHash[:2] + "/" + correctHash[2:4] + "/" + correctHash
|
||||
mockStorage.mu.Lock()
|
||||
mockStorage.data[blobPath] = encryptedData
|
||||
mockStorage.mu.Unlock()
|
||||
|
||||
tv := vaultik.NewForTesting(mockStorage)
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("correct hash succeeds", func(t *testing.T) {
|
||||
rc, err := tv.FetchAndDecryptBlob(ctx, correctHash, int64(len(encryptedData)), identity)
|
||||
if err != nil {
|
||||
t.Fatalf("expected success, got error: %v", err)
|
||||
}
|
||||
data, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
t.Fatalf("reading stream: %v", err)
|
||||
}
|
||||
if err := rc.Close(); err != nil {
|
||||
t.Fatalf("close (hash verification) failed: %v", err)
|
||||
}
|
||||
if !bytes.Equal(data, plaintext) {
|
||||
t.Fatalf("decrypted data mismatch: got %q, want %q", data, plaintext)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("wrong hash fails", func(t *testing.T) {
|
||||
// Use a fake hash that doesn't match the actual plaintext
|
||||
fakeHash := strings.Repeat("ab", 32) // 64 hex chars
|
||||
fakePath := "blobs/" + fakeHash[:2] + "/" + fakeHash[2:4] + "/" + fakeHash
|
||||
mockStorage.mu.Lock()
|
||||
mockStorage.data[fakePath] = encryptedData
|
||||
mockStorage.mu.Unlock()
|
||||
|
||||
rc, err := tv.FetchAndDecryptBlob(ctx, fakeHash, int64(len(encryptedData)), identity)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error opening stream: %v", err)
|
||||
}
|
||||
// Read all data — hash is verified on Close
|
||||
_, _ = io.ReadAll(rc)
|
||||
err = rc.Close()
|
||||
if err == nil {
|
||||
t.Fatal("expected error for mismatched hash, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "hash mismatch") {
|
||||
t.Fatalf("expected hash mismatch error, got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
package vaultik
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"filippo.io/age"
|
||||
"git.eeqj.de/sneak/vaultik/internal/blobgen"
|
||||
)
|
||||
|
||||
// FetchAndDecryptBlobResult holds the result of fetching and decrypting a blob.
|
||||
type FetchAndDecryptBlobResult struct {
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// FetchAndDecryptBlob downloads a blob, decrypts it, and returns the plaintext data.
|
||||
func (v *Vaultik) FetchAndDecryptBlob(ctx context.Context, blobHash string, expectedSize int64, identity age.Identity) (*FetchAndDecryptBlobResult, error) {
|
||||
rc, _, err := v.FetchBlob(ctx, blobHash, expectedSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rc.Close() }()
|
||||
|
||||
reader, err := blobgen.NewReader(rc, identity)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating blob reader: %w", err)
|
||||
}
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
data, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading blob data: %w", err)
|
||||
}
|
||||
|
||||
return &FetchAndDecryptBlobResult{Data: data}, nil
|
||||
}
|
||||
|
||||
// FetchBlob downloads a blob and returns a reader for the encrypted data.
|
||||
func (v *Vaultik) FetchBlob(ctx context.Context, blobHash string, expectedSize int64) (io.ReadCloser, int64, error) {
|
||||
blobPath := fmt.Sprintf("blobs/%s/%s/%s", blobHash[:2], blobHash[2:4], blobHash)
|
||||
|
||||
rc, err := v.Storage.Get(ctx, blobPath)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("downloading blob %s: %w", blobHash[:16], err)
|
||||
}
|
||||
|
||||
info, err := v.Storage.Stat(ctx, blobPath)
|
||||
if err != nil {
|
||||
_ = rc.Close()
|
||||
return nil, 0, fmt.Errorf("stat blob %s: %w", blobHash[:16], err)
|
||||
}
|
||||
|
||||
return rc, info.Size, nil
|
||||
}
|
||||
@@ -22,6 +22,13 @@ import (
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
const (
|
||||
// progressBarWidth is the character width of the progress bar display.
|
||||
progressBarWidth = 40
|
||||
// progressBarThrottle is the minimum interval between progress bar redraws.
|
||||
progressBarThrottle = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
// RestoreOptions contains options for the restore operation
|
||||
type RestoreOptions struct {
|
||||
SnapshotID string
|
||||
@@ -172,6 +179,15 @@ func (v *Vaultik) restoreAllFiles(
|
||||
}
|
||||
defer func() { _ = blobCache.Close() }()
|
||||
|
||||
// Calculate total bytes for progress bar
|
||||
var totalBytesExpected int64
|
||||
for _, file := range files {
|
||||
totalBytesExpected += file.Size
|
||||
}
|
||||
|
||||
// Create progress bar if output is a terminal
|
||||
bar := v.newProgressBar("Restoring", totalBytesExpected)
|
||||
|
||||
for i, file := range files {
|
||||
if v.ctx.Err() != nil {
|
||||
return nil, v.ctx.Err()
|
||||
@@ -181,11 +197,19 @@ func (v *Vaultik) restoreAllFiles(
|
||||
log.Error("Failed to restore file", "path", file.Path, "error", err)
|
||||
result.FilesFailed++
|
||||
result.FailedFiles = append(result.FailedFiles, file.Path.String())
|
||||
// Continue with other files
|
||||
// Update progress bar even on failure
|
||||
if bar != nil {
|
||||
_ = bar.Add64(file.Size)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Progress logging
|
||||
// Update progress bar
|
||||
if bar != nil {
|
||||
_ = bar.Add64(file.Size)
|
||||
}
|
||||
|
||||
// Progress logging (for non-terminal or structured logs)
|
||||
if (i+1)%100 == 0 || i+1 == len(files) {
|
||||
log.Info("Restore progress",
|
||||
"files", fmt.Sprintf("%d/%d", i+1, len(files)),
|
||||
@@ -194,6 +218,10 @@ func (v *Vaultik) restoreAllFiles(
|
||||
}
|
||||
}
|
||||
|
||||
if bar != nil {
|
||||
_ = bar.Finish()
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -530,11 +558,23 @@ func (v *Vaultik) restoreRegularFile(
|
||||
|
||||
// downloadBlob downloads and decrypts a blob
|
||||
func (v *Vaultik) downloadBlob(ctx context.Context, blobHash string, expectedSize int64, identity age.Identity) ([]byte, error) {
|
||||
result, err := v.FetchAndDecryptBlob(ctx, blobHash, expectedSize, identity)
|
||||
rc, err := v.FetchAndDecryptBlob(ctx, blobHash, expectedSize, identity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result.Data, nil
|
||||
|
||||
data, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
_ = rc.Close()
|
||||
return nil, fmt.Errorf("reading blob data: %w", err)
|
||||
}
|
||||
|
||||
// Close triggers hash verification
|
||||
if err := rc.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// verifyRestoredFiles verifies that all restored files match their expected chunk hashes
|
||||
@@ -572,22 +612,7 @@ func (v *Vaultik) verifyRestoredFiles(
|
||||
)
|
||||
|
||||
// Create progress bar if output is a terminal
|
||||
var bar *progressbar.ProgressBar
|
||||
if isTerminal() {
|
||||
bar = progressbar.NewOptions64(
|
||||
totalBytes,
|
||||
progressbar.OptionSetDescription("Verifying"),
|
||||
progressbar.OptionSetWriter(v.Stderr),
|
||||
progressbar.OptionShowBytes(true),
|
||||
progressbar.OptionShowCount(),
|
||||
progressbar.OptionSetWidth(40),
|
||||
progressbar.OptionThrottle(100*time.Millisecond),
|
||||
progressbar.OptionOnCompletion(func() {
|
||||
v.printfStderr("\n")
|
||||
}),
|
||||
progressbar.OptionSetRenderBlankState(true),
|
||||
)
|
||||
}
|
||||
bar := v.newProgressBar("Verifying", totalBytes)
|
||||
|
||||
// Verify each file
|
||||
for _, file := range regularFiles {
|
||||
@@ -681,7 +706,37 @@ func (v *Vaultik) verifyFile(
|
||||
return bytesVerified, nil
|
||||
}
|
||||
|
||||
// isTerminal returns true if stdout is a terminal
|
||||
func isTerminal() bool {
|
||||
return term.IsTerminal(int(os.Stdout.Fd()))
|
||||
// newProgressBar creates a terminal-aware progress bar with standard options.
|
||||
// It returns nil if stdout is not a terminal.
|
||||
func (v *Vaultik) newProgressBar(description string, total int64) *progressbar.ProgressBar {
|
||||
if !v.isTerminal() {
|
||||
return nil
|
||||
}
|
||||
return progressbar.NewOptions64(
|
||||
total,
|
||||
progressbar.OptionSetDescription(description),
|
||||
progressbar.OptionSetWriter(v.Stderr),
|
||||
progressbar.OptionShowBytes(true),
|
||||
progressbar.OptionShowCount(),
|
||||
progressbar.OptionSetWidth(progressBarWidth),
|
||||
progressbar.OptionThrottle(progressBarThrottle),
|
||||
progressbar.OptionOnCompletion(func() {
|
||||
v.printfStderr("\n")
|
||||
}),
|
||||
progressbar.OptionSetRenderBlankState(true),
|
||||
)
|
||||
}
|
||||
|
||||
// isTerminal returns true if stdout is a terminal.
|
||||
// It checks whether v.Stdout implements Fd() (i.e. is an *os.File),
|
||||
// and falls back to false for non-file writers (e.g. in tests).
|
||||
func (v *Vaultik) isTerminal() bool {
|
||||
type fder interface {
|
||||
Fd() uintptr
|
||||
}
|
||||
f, ok := v.Stdout.(fder)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return term.IsTerminal(int(f.Fd()))
|
||||
}
|
||||
|
||||
@@ -419,7 +419,7 @@ func (v *Vaultik) listRemoteSnapshotIDs() (map[string]bool, error) {
|
||||
return remoteSnapshots, nil
|
||||
}
|
||||
|
||||
// reconcileLocalWithRemote removes local snapshots not in remote and returns the surviving local map
|
||||
// reconcileLocalWithRemote builds a map of local snapshots keyed by ID for cross-referencing with remote
|
||||
func (v *Vaultik) reconcileLocalWithRemote(remoteSnapshots map[string]bool) (map[string]*database.Snapshot, error) {
|
||||
localSnapshots, err := v.Repositories.Snapshots.ListRecent(v.ctx, 10000)
|
||||
if err != nil {
|
||||
@@ -431,19 +431,6 @@ func (v *Vaultik) reconcileLocalWithRemote(remoteSnapshots map[string]bool) (map
|
||||
localSnapshotMap[s.ID.String()] = s
|
||||
}
|
||||
|
||||
for _, snap := range localSnapshots {
|
||||
snapshotIDStr := snap.ID.String()
|
||||
if !remoteSnapshots[snapshotIDStr] {
|
||||
log.Info("Removing local snapshot not found in remote", "snapshot_id", snap.ID)
|
||||
if err := v.deleteSnapshotFromLocalDB(snapshotIDStr); err != nil {
|
||||
log.Error("Failed to delete local snapshot", "snapshot_id", snap.ID, "error", err)
|
||||
} else {
|
||||
log.Info("Deleted local snapshot not found in remote", "snapshot_id", snap.ID)
|
||||
delete(localSnapshotMap, snapshotIDStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return localSnapshotMap, nil
|
||||
}
|
||||
|
||||
@@ -872,7 +859,7 @@ func (v *Vaultik) syncWithRemote() error {
|
||||
snapshotIDStr := snapshot.ID.String()
|
||||
if !remoteSnapshots[snapshotIDStr] {
|
||||
log.Info("Removing local snapshot not found in remote", "snapshot_id", snapshot.ID)
|
||||
if err := v.Repositories.Snapshots.Delete(v.ctx, snapshotIDStr); err != nil {
|
||||
if err := v.deleteSnapshotFromLocalDB(snapshotIDStr); err != nil {
|
||||
log.Error("Failed to delete local snapshot", "snapshot_id", snapshot.ID, "error", err)
|
||||
} else {
|
||||
removedCount++
|
||||
|
||||
Reference in New Issue
Block a user