Add custom types, version command, and restore --verify flag

- Add internal/types package with type-safe wrappers for IDs, hashes,
  paths, and credentials (FileID, BlobID, ChunkHash, etc.)
- Implement driver.Valuer and sql.Scanner for UUID-based types
- Add `vaultik version` command showing version, commit, go version
- Add `--verify` flag to restore command that checksums all restored
  files against expected chunk hashes with progress bar
- Remove fetch.go (dead code, functionality in restore)
- Clean up TODO.md, remove completed items
- Update all database and snapshot code to use new custom types
This commit is contained in:
2026-01-14 17:11:52 -08:00
parent 2afd54d693
commit 417b25a5f5
53 changed files with 2330 additions and 1581 deletions

View File

@@ -6,6 +6,8 @@ import (
"fmt"
"testing"
"time"
"git.eeqj.de/sneak/vaultik/internal/types"
)
// TestFileRepositoryUUIDGeneration tests that files get unique UUIDs
@@ -46,15 +48,15 @@ func TestFileRepositoryUUIDGeneration(t *testing.T) {
}
// Check UUID was generated
if file.ID == "" {
if file.ID.IsZero() {
t.Error("file ID was not generated")
}
// Check UUID is unique
if uuids[file.ID] {
if uuids[file.ID.String()] {
t.Errorf("duplicate UUID generated: %s", file.ID)
}
uuids[file.ID] = true
uuids[file.ID.String()] = true
}
}
@@ -96,7 +98,8 @@ func TestFileRepositoryGetByID(t *testing.T) {
}
// Test non-existent ID
nonExistent, err := repo.GetByID(ctx, "non-existent-uuid")
nonExistentID := types.NewFileID() // Generate a new UUID that won't exist in the database
nonExistent, err := repo.GetByID(ctx, nonExistentID)
if err != nil {
t.Fatalf("GetByID should not return error for non-existent ID: %v", err)
}
@@ -154,7 +157,7 @@ func TestOrphanedFileCleanup(t *testing.T) {
}
// Add file2 to snapshot
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot.ID, file2.ID)
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot.ID.String(), file2.ID)
if err != nil {
t.Fatalf("failed to add file to snapshot: %v", err)
}
@@ -194,11 +197,11 @@ func TestOrphanedChunkCleanup(t *testing.T) {
// Create chunks
chunk1 := &Chunk{
ChunkHash: "orphaned-chunk",
ChunkHash: types.ChunkHash("orphaned-chunk"),
Size: 1024,
}
chunk2 := &Chunk{
ChunkHash: "referenced-chunk",
ChunkHash: types.ChunkHash("referenced-chunk"),
Size: 1024,
}
@@ -244,7 +247,7 @@ func TestOrphanedChunkCleanup(t *testing.T) {
}
// Check that orphaned chunk is gone
orphanedChunk, err := repos.Chunks.GetByHash(ctx, chunk1.ChunkHash)
orphanedChunk, err := repos.Chunks.GetByHash(ctx, chunk1.ChunkHash.String())
if err != nil {
t.Fatalf("error getting chunk: %v", err)
}
@@ -253,7 +256,7 @@ func TestOrphanedChunkCleanup(t *testing.T) {
}
// Check that referenced chunk still exists
referencedChunk, err := repos.Chunks.GetByHash(ctx, chunk2.ChunkHash)
referencedChunk, err := repos.Chunks.GetByHash(ctx, chunk2.ChunkHash.String())
if err != nil {
t.Fatalf("error getting chunk: %v", err)
}
@@ -272,13 +275,13 @@ func TestOrphanedBlobCleanup(t *testing.T) {
// Create blobs
blob1 := &Blob{
ID: "orphaned-blob-id",
Hash: "orphaned-blob",
ID: types.NewBlobID(),
Hash: types.BlobHash("orphaned-blob"),
CreatedTS: time.Now().Truncate(time.Second),
}
blob2 := &Blob{
ID: "referenced-blob-id",
Hash: "referenced-blob",
ID: types.NewBlobID(),
Hash: types.BlobHash("referenced-blob"),
CreatedTS: time.Now().Truncate(time.Second),
}
@@ -303,7 +306,7 @@ func TestOrphanedBlobCleanup(t *testing.T) {
}
// Add blob2 to snapshot
err = repos.Snapshots.AddBlob(ctx, nil, snapshot.ID, blob2.ID, blob2.Hash)
err = repos.Snapshots.AddBlob(ctx, nil, snapshot.ID.String(), blob2.ID, blob2.Hash)
if err != nil {
t.Fatalf("failed to add blob to snapshot: %v", err)
}
@@ -315,7 +318,7 @@ func TestOrphanedBlobCleanup(t *testing.T) {
}
// Check that orphaned blob is gone
orphanedBlob, err := repos.Blobs.GetByID(ctx, blob1.ID)
orphanedBlob, err := repos.Blobs.GetByID(ctx, blob1.ID.String())
if err != nil {
t.Fatalf("error getting blob: %v", err)
}
@@ -324,7 +327,7 @@ func TestOrphanedBlobCleanup(t *testing.T) {
}
// Check that referenced blob still exists
referencedBlob, err := repos.Blobs.GetByID(ctx, blob2.ID)
referencedBlob, err := repos.Blobs.GetByID(ctx, blob2.ID.String())
if err != nil {
t.Fatalf("error getting blob: %v", err)
}
@@ -357,7 +360,7 @@ func TestFileChunkRepositoryWithUUIDs(t *testing.T) {
}
// Create chunks
chunks := []string{"chunk1", "chunk2", "chunk3"}
chunks := []types.ChunkHash{"chunk1", "chunk2", "chunk3"}
for i, chunkHash := range chunks {
chunk := &Chunk{
ChunkHash: chunkHash,
@@ -443,7 +446,7 @@ func TestChunkFileRepositoryWithUUIDs(t *testing.T) {
// Create a chunk that appears in both files (deduplication)
chunk := &Chunk{
ChunkHash: "shared-chunk",
ChunkHash: types.ChunkHash("shared-chunk"),
Size: 1024,
}
err = repos.Chunks.Create(ctx, nil, chunk)
@@ -526,7 +529,7 @@ func TestSnapshotRepositoryExtendedFields(t *testing.T) {
}
// Retrieve and verify
retrieved, err := repo.GetByID(ctx, snapshot.ID)
retrieved, err := repo.GetByID(ctx, snapshot.ID.String())
if err != nil {
t.Fatalf("failed to get snapshot: %v", err)
}
@@ -581,7 +584,7 @@ func TestComplexOrphanedDataScenario(t *testing.T) {
files := make([]*File, 3)
for i := range files {
files[i] = &File{
Path: fmt.Sprintf("/file%d.txt", i),
Path: types.FilePath(fmt.Sprintf("/file%d.txt", i)),
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
@@ -601,29 +604,29 @@ func TestComplexOrphanedDataScenario(t *testing.T) {
// file0: only in snapshot1
// file1: in both snapshots
// file2: only in snapshot2
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot1.ID, files[0].ID)
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot1.ID.String(), files[0].ID)
if err != nil {
t.Fatal(err)
}
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot1.ID, files[1].ID)
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot1.ID.String(), files[1].ID)
if err != nil {
t.Fatal(err)
}
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot2.ID, files[1].ID)
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot2.ID.String(), files[1].ID)
if err != nil {
t.Fatal(err)
}
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot2.ID, files[2].ID)
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot2.ID.String(), files[2].ID)
if err != nil {
t.Fatal(err)
}
// Delete snapshot1
err = repos.Snapshots.DeleteSnapshotFiles(ctx, snapshot1.ID)
err = repos.Snapshots.DeleteSnapshotFiles(ctx, snapshot1.ID.String())
if err != nil {
t.Fatal(err)
}
err = repos.Snapshots.Delete(ctx, snapshot1.ID)
err = repos.Snapshots.Delete(ctx, snapshot1.ID.String())
if err != nil {
t.Fatal(err)
}
@@ -689,7 +692,7 @@ func TestCascadeDelete(t *testing.T) {
// Create chunks and file-chunk mappings
for i := 0; i < 3; i++ {
chunk := &Chunk{
ChunkHash: fmt.Sprintf("cascade-chunk-%d", i),
ChunkHash: types.ChunkHash(fmt.Sprintf("cascade-chunk-%d", i)),
Size: 1024,
}
err = repos.Chunks.Create(ctx, nil, chunk)
@@ -807,7 +810,7 @@ func TestConcurrentOrphanedCleanup(t *testing.T) {
// Create many files, some orphaned
for i := 0; i < 20; i++ {
file := &File{
Path: fmt.Sprintf("/concurrent-%d.txt", i),
Path: types.FilePath(fmt.Sprintf("/concurrent-%d.txt", i)),
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
@@ -822,7 +825,7 @@ func TestConcurrentOrphanedCleanup(t *testing.T) {
// Add even-numbered files to snapshot
if i%2 == 0 {
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot.ID, file.ID)
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot.ID.String(), file.ID)
if err != nil {
t.Fatal(err)
}
@@ -860,7 +863,7 @@ func TestConcurrentOrphanedCleanup(t *testing.T) {
// Verify all remaining files are even-numbered
for _, file := range files {
var num int
_, err := fmt.Sscanf(file.Path, "/concurrent-%d.txt", &num)
_, err := fmt.Sscanf(file.Path.String(), "/concurrent-%d.txt", &num)
if err != nil {
t.Logf("failed to parse file number from %s: %v", file.Path, err)
}