vaultik/internal/database/blob_chunks_test.go
sneak d3afa65420 Fix foreign key constraints and improve snapshot tracking
- Add unified compression/encryption package in internal/blobgen
- Update DATAMODEL.md to reflect current schema implementation
- Refactor snapshot cleanup into well-named methods for clarity
- Add snapshot_id to uploads table to track new blobs per snapshot
- Fix blob count reporting for incremental backups
- Add DeleteOrphaned method to BlobChunkRepository
- Fix cleanup order to respect foreign key constraints
- Update tests to reflect schema changes
2025-07-26 02:22:25 +02:00

216 lines
5.2 KiB
Go

package database
import (
"context"
"strings"
"testing"
"time"
)
func TestBlobChunkRepository(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Create blob first
blob := &Blob{
ID: "blob1-uuid",
Hash: "blob1-hash",
CreatedTS: time.Now(),
}
err := repos.Blobs.Create(ctx, nil, blob)
if err != nil {
t.Fatalf("failed to create blob: %v", err)
}
// Create chunks
chunks := []string{"chunk1", "chunk2", "chunk3"}
for _, chunkHash := range chunks {
chunk := &Chunk{
ChunkHash: chunkHash,
Size: 1024,
}
err = repos.Chunks.Create(ctx, nil, chunk)
if err != nil {
t.Fatalf("failed to create chunk %s: %v", chunkHash, err)
}
}
// Test Create
bc1 := &BlobChunk{
BlobID: blob.ID,
ChunkHash: "chunk1",
Offset: 0,
Length: 1024,
}
err = repos.BlobChunks.Create(ctx, nil, bc1)
if err != nil {
t.Fatalf("failed to create blob chunk: %v", err)
}
// Add more chunks to the same blob
bc2 := &BlobChunk{
BlobID: blob.ID,
ChunkHash: "chunk2",
Offset: 1024,
Length: 2048,
}
err = repos.BlobChunks.Create(ctx, nil, bc2)
if err != nil {
t.Fatalf("failed to create second blob chunk: %v", err)
}
bc3 := &BlobChunk{
BlobID: blob.ID,
ChunkHash: "chunk3",
Offset: 3072,
Length: 512,
}
err = repos.BlobChunks.Create(ctx, nil, bc3)
if err != nil {
t.Fatalf("failed to create third blob chunk: %v", err)
}
// Test GetByBlobID
blobChunks, err := repos.BlobChunks.GetByBlobID(ctx, blob.ID)
if err != nil {
t.Fatalf("failed to get blob chunks: %v", err)
}
if len(blobChunks) != 3 {
t.Errorf("expected 3 chunks, got %d", len(blobChunks))
}
// Verify order by offset
expectedOffsets := []int64{0, 1024, 3072}
for i, bc := range blobChunks {
if bc.Offset != expectedOffsets[i] {
t.Errorf("wrong chunk order: expected offset %d, got %d", expectedOffsets[i], bc.Offset)
}
}
// Test GetByChunkHash
bc, err := repos.BlobChunks.GetByChunkHash(ctx, "chunk2")
if err != nil {
t.Fatalf("failed to get blob chunk by chunk hash: %v", err)
}
if bc == nil {
t.Fatal("expected blob chunk, got nil")
}
if bc.BlobID != blob.ID {
t.Errorf("wrong blob ID: expected %s, got %s", blob.ID, bc.BlobID)
}
if bc.Offset != 1024 {
t.Errorf("wrong offset: expected 1024, got %d", bc.Offset)
}
// Test duplicate insert (should fail due to primary key constraint)
err = repos.BlobChunks.Create(ctx, nil, bc1)
if err == nil {
t.Fatal("duplicate blob_chunk insert should fail due to primary key constraint")
}
if !strings.Contains(err.Error(), "UNIQUE") && !strings.Contains(err.Error(), "constraint") {
t.Fatalf("expected constraint error, got: %v", err)
}
// Test non-existent chunk
bc, err = repos.BlobChunks.GetByChunkHash(ctx, "nonexistent")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if bc != nil {
t.Error("expected nil for non-existent chunk")
}
}
func TestBlobChunkRepositoryMultipleBlobs(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Create blobs
blob1 := &Blob{
ID: "blob1-uuid",
Hash: "blob1-hash",
CreatedTS: time.Now(),
}
blob2 := &Blob{
ID: "blob2-uuid",
Hash: "blob2-hash",
CreatedTS: time.Now(),
}
err := repos.Blobs.Create(ctx, nil, blob1)
if err != nil {
t.Fatalf("failed to create blob1: %v", err)
}
err = repos.Blobs.Create(ctx, nil, blob2)
if err != nil {
t.Fatalf("failed to create blob2: %v", err)
}
// Create chunks
chunkHashes := []string{"chunk1", "chunk2", "chunk3"}
for _, chunkHash := range chunkHashes {
chunk := &Chunk{
ChunkHash: chunkHash,
Size: 1024,
}
err = repos.Chunks.Create(ctx, nil, chunk)
if err != nil {
t.Fatalf("failed to create chunk %s: %v", chunkHash, err)
}
}
// Create chunks across multiple blobs
// Some chunks are shared between blobs (deduplication scenario)
blobChunks := []BlobChunk{
{BlobID: blob1.ID, ChunkHash: "chunk1", Offset: 0, Length: 1024},
{BlobID: blob1.ID, ChunkHash: "chunk2", Offset: 1024, Length: 1024},
{BlobID: blob2.ID, ChunkHash: "chunk2", Offset: 0, Length: 1024}, // chunk2 is shared
{BlobID: blob2.ID, ChunkHash: "chunk3", Offset: 1024, Length: 1024},
}
for _, bc := range blobChunks {
err := repos.BlobChunks.Create(ctx, nil, &bc)
if err != nil {
t.Fatalf("failed to create blob chunk: %v", err)
}
}
// Verify blob1 chunks
chunks, err := repos.BlobChunks.GetByBlobID(ctx, blob1.ID)
if err != nil {
t.Fatalf("failed to get blob1 chunks: %v", err)
}
if len(chunks) != 2 {
t.Errorf("expected 2 chunks for blob1, got %d", len(chunks))
}
// Verify blob2 chunks
chunks, err = repos.BlobChunks.GetByBlobID(ctx, blob2.ID)
if err != nil {
t.Fatalf("failed to get blob2 chunks: %v", err)
}
if len(chunks) != 2 {
t.Errorf("expected 2 chunks for blob2, got %d", len(chunks))
}
// Verify shared chunk
bc, err := repos.BlobChunks.GetByChunkHash(ctx, "chunk2")
if err != nil {
t.Fatalf("failed to get shared chunk: %v", err)
}
if bc == nil {
t.Fatal("expected shared chunk, got nil")
}
// GetByChunkHash returns first match, should be blob1
if bc.BlobID != blob1.ID {
t.Errorf("expected %s for shared chunk, got %s", blob1.ID, bc.BlobID)
}
}