vaultik/internal/database/blob_chunks_test.go
sneak 417b25a5f5 Add custom types, version command, and restore --verify flag
- Add internal/types package with type-safe wrappers for IDs, hashes,
  paths, and credentials (FileID, BlobID, ChunkHash, etc.)
- Implement driver.Valuer and sql.Scanner for UUID-based types
- Add `vaultik version` command showing version, commit, go version
- Add `--verify` flag to restore command that checksums all restored
  files against expected chunk hashes with progress bar
- Remove fetch.go (dead code, functionality in restore)
- Clean up TODO.md, remove completed items
- Update all database and snapshot code to use new custom types
2026-01-14 17:11:52 -08:00

218 lines
5.5 KiB
Go

package database
import (
"context"
"strings"
"testing"
"time"
"git.eeqj.de/sneak/vaultik/internal/types"
)
func TestBlobChunkRepository(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Create blob first
blob := &Blob{
ID: types.NewBlobID(),
Hash: types.BlobHash("blob1-hash"),
CreatedTS: time.Now(),
}
err := repos.Blobs.Create(ctx, nil, blob)
if err != nil {
t.Fatalf("failed to create blob: %v", err)
}
// Create chunks
chunks := []types.ChunkHash{"chunk1", "chunk2", "chunk3"}
for _, chunkHash := range chunks {
chunk := &Chunk{
ChunkHash: chunkHash,
Size: 1024,
}
err = repos.Chunks.Create(ctx, nil, chunk)
if err != nil {
t.Fatalf("failed to create chunk %s: %v", chunkHash, err)
}
}
// Test Create
bc1 := &BlobChunk{
BlobID: blob.ID,
ChunkHash: types.ChunkHash("chunk1"),
Offset: 0,
Length: 1024,
}
err = repos.BlobChunks.Create(ctx, nil, bc1)
if err != nil {
t.Fatalf("failed to create blob chunk: %v", err)
}
// Add more chunks to the same blob
bc2 := &BlobChunk{
BlobID: blob.ID,
ChunkHash: types.ChunkHash("chunk2"),
Offset: 1024,
Length: 2048,
}
err = repos.BlobChunks.Create(ctx, nil, bc2)
if err != nil {
t.Fatalf("failed to create second blob chunk: %v", err)
}
bc3 := &BlobChunk{
BlobID: blob.ID,
ChunkHash: types.ChunkHash("chunk3"),
Offset: 3072,
Length: 512,
}
err = repos.BlobChunks.Create(ctx, nil, bc3)
if err != nil {
t.Fatalf("failed to create third blob chunk: %v", err)
}
// Test GetByBlobID
blobChunks, err := repos.BlobChunks.GetByBlobID(ctx, blob.ID.String())
if err != nil {
t.Fatalf("failed to get blob chunks: %v", err)
}
if len(blobChunks) != 3 {
t.Errorf("expected 3 chunks, got %d", len(blobChunks))
}
// Verify order by offset
expectedOffsets := []int64{0, 1024, 3072}
for i, bc := range blobChunks {
if bc.Offset != expectedOffsets[i] {
t.Errorf("wrong chunk order: expected offset %d, got %d", expectedOffsets[i], bc.Offset)
}
}
// Test GetByChunkHash
bc, err := repos.BlobChunks.GetByChunkHash(ctx, "chunk2")
if err != nil {
t.Fatalf("failed to get blob chunk by chunk hash: %v", err)
}
if bc == nil {
t.Fatal("expected blob chunk, got nil")
}
if bc.BlobID != blob.ID {
t.Errorf("wrong blob ID: expected %s, got %s", blob.ID, bc.BlobID)
}
if bc.Offset != 1024 {
t.Errorf("wrong offset: expected 1024, got %d", bc.Offset)
}
// Test duplicate insert (should fail due to primary key constraint)
err = repos.BlobChunks.Create(ctx, nil, bc1)
if err == nil {
t.Fatal("duplicate blob_chunk insert should fail due to primary key constraint")
}
if !strings.Contains(err.Error(), "UNIQUE") && !strings.Contains(err.Error(), "constraint") {
t.Fatalf("expected constraint error, got: %v", err)
}
// Test non-existent chunk
bc, err = repos.BlobChunks.GetByChunkHash(ctx, "nonexistent")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if bc != nil {
t.Error("expected nil for non-existent chunk")
}
}
func TestBlobChunkRepositoryMultipleBlobs(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Create blobs
blob1 := &Blob{
ID: types.NewBlobID(),
Hash: types.BlobHash("blob1-hash"),
CreatedTS: time.Now(),
}
blob2 := &Blob{
ID: types.NewBlobID(),
Hash: types.BlobHash("blob2-hash"),
CreatedTS: time.Now(),
}
err := repos.Blobs.Create(ctx, nil, blob1)
if err != nil {
t.Fatalf("failed to create blob1: %v", err)
}
err = repos.Blobs.Create(ctx, nil, blob2)
if err != nil {
t.Fatalf("failed to create blob2: %v", err)
}
// Create chunks
chunkHashes := []types.ChunkHash{"chunk1", "chunk2", "chunk3"}
for _, chunkHash := range chunkHashes {
chunk := &Chunk{
ChunkHash: chunkHash,
Size: 1024,
}
err = repos.Chunks.Create(ctx, nil, chunk)
if err != nil {
t.Fatalf("failed to create chunk %s: %v", chunkHash, err)
}
}
// Create chunks across multiple blobs
// Some chunks are shared between blobs (deduplication scenario)
blobChunks := []BlobChunk{
{BlobID: blob1.ID, ChunkHash: types.ChunkHash("chunk1"), Offset: 0, Length: 1024},
{BlobID: blob1.ID, ChunkHash: types.ChunkHash("chunk2"), Offset: 1024, Length: 1024},
{BlobID: blob2.ID, ChunkHash: types.ChunkHash("chunk2"), Offset: 0, Length: 1024}, // chunk2 is shared
{BlobID: blob2.ID, ChunkHash: types.ChunkHash("chunk3"), Offset: 1024, Length: 1024},
}
for _, bc := range blobChunks {
err := repos.BlobChunks.Create(ctx, nil, &bc)
if err != nil {
t.Fatalf("failed to create blob chunk: %v", err)
}
}
// Verify blob1 chunks
chunks, err := repos.BlobChunks.GetByBlobID(ctx, blob1.ID.String())
if err != nil {
t.Fatalf("failed to get blob1 chunks: %v", err)
}
if len(chunks) != 2 {
t.Errorf("expected 2 chunks for blob1, got %d", len(chunks))
}
// Verify blob2 chunks
chunks, err = repos.BlobChunks.GetByBlobID(ctx, blob2.ID.String())
if err != nil {
t.Fatalf("failed to get blob2 chunks: %v", err)
}
if len(chunks) != 2 {
t.Errorf("expected 2 chunks for blob2, got %d", len(chunks))
}
// Verify shared chunk
bc, err := repos.BlobChunks.GetByChunkHash(ctx, "chunk2")
if err != nil {
t.Fatalf("failed to get shared chunk: %v", err)
}
if bc == nil {
t.Fatal("expected shared chunk, got nil")
}
// GetByChunkHash returns first match, should be blob1
if bc.BlobID != blob1.ID {
t.Errorf("expected %s for shared chunk, got %s", blob1.ID, bc.BlobID)
}
}