vaultik/internal/database/file_chunks_test.go
sneak 78af626759 Major refactoring: UUID-based storage, streaming architecture, and CLI improvements
This commit represents a significant architectural overhaul of vaultik:

Database Schema Changes:
- Switch files table to use UUID primary keys instead of path-based keys
- Add UUID primary keys to blobs table for immediate chunk association
- Update all foreign key relationships to use UUIDs
- Add comprehensive schema documentation in DATAMODEL.md
- Add SQLite busy timeout handling for concurrent operations

Streaming and Performance Improvements:
- Implement true streaming blob packing without intermediate storage
- Add streaming chunk processing to reduce memory usage
- Improve progress reporting with real-time metrics
- Add upload metrics tracking in new uploads table

CLI Refactoring:
- Restructure CLI to use subcommands: snapshot create/list/purge/verify
- Add store info command for S3 configuration display
- Add custom duration parser supporting days/weeks/months/years
- Remove old backup.go in favor of enhanced snapshot.go
- Add --cron flag for silent operation

Configuration Changes:
- Remove unused index_prefix configuration option
- Add support for snapshot pruning retention policies
- Improve configuration validation and error messages

Testing Improvements:
- Add comprehensive repository tests with edge cases
- Add cascade delete debugging tests
- Fix concurrent operation tests to use SQLite busy timeout
- Remove tolerance for SQLITE_BUSY errors in tests

Documentation:
- Add MIT LICENSE file
- Update README with new command structure
- Add comprehensive DATAMODEL.md explaining database schema
- Update DESIGN.md with UUID-based architecture

Other Changes:
- Add test-config.yml for testing
- Update Makefile with better test output formatting
- Fix various race conditions in concurrent operations
- Improve error handling throughout
2025-07-22 14:56:44 +02:00

162 lines
3.5 KiB
Go

package database
import (
"context"
"fmt"
"testing"
"time"
)
func TestFileChunkRepository(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewFileChunkRepository(db)
fileRepo := NewFileRepository(db)
// Create test file first
testTime := time.Now().Truncate(time.Second)
file := &File{
Path: "/test/file.txt",
MTime: testTime,
CTime: testTime,
Size: 3072,
Mode: 0644,
UID: 1000,
GID: 1000,
LinkTarget: "",
}
err := fileRepo.Create(ctx, nil, file)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
// Test Create
fc1 := &FileChunk{
FileID: file.ID,
Idx: 0,
ChunkHash: "chunk1",
}
err = repo.Create(ctx, nil, fc1)
if err != nil {
t.Fatalf("failed to create file chunk: %v", err)
}
// Add more chunks for the same file
fc2 := &FileChunk{
FileID: file.ID,
Idx: 1,
ChunkHash: "chunk2",
}
err = repo.Create(ctx, nil, fc2)
if err != nil {
t.Fatalf("failed to create second file chunk: %v", err)
}
fc3 := &FileChunk{
FileID: file.ID,
Idx: 2,
ChunkHash: "chunk3",
}
err = repo.Create(ctx, nil, fc3)
if err != nil {
t.Fatalf("failed to create third file chunk: %v", err)
}
// Test GetByFile
chunks, err := repo.GetByFile(ctx, "/test/file.txt")
if err != nil {
t.Fatalf("failed to get file chunks: %v", err)
}
if len(chunks) != 3 {
t.Errorf("expected 3 chunks, got %d", len(chunks))
}
// Verify order
for i, chunk := range chunks {
if chunk.Idx != i {
t.Errorf("wrong chunk order: expected idx %d, got %d", i, chunk.Idx)
}
}
// Test duplicate insert (should be idempotent)
err = repo.Create(ctx, nil, fc1)
if err != nil {
t.Fatalf("failed to create duplicate file chunk: %v", err)
}
// Test DeleteByFileID
err = repo.DeleteByFileID(ctx, nil, file.ID)
if err != nil {
t.Fatalf("failed to delete file chunks: %v", err)
}
chunks, err = repo.GetByFileID(ctx, file.ID)
if err != nil {
t.Fatalf("failed to get deleted file chunks: %v", err)
}
if len(chunks) != 0 {
t.Errorf("expected 0 chunks after delete, got %d", len(chunks))
}
}
func TestFileChunkRepositoryMultipleFiles(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewFileChunkRepository(db)
fileRepo := NewFileRepository(db)
// Create test files
testTime := time.Now().Truncate(time.Second)
filePaths := []string{"/file1.txt", "/file2.txt", "/file3.txt"}
files := make([]*File, len(filePaths))
for i, path := range filePaths {
file := &File{
Path: path,
MTime: testTime,
CTime: testTime,
Size: 2048,
Mode: 0644,
UID: 1000,
GID: 1000,
LinkTarget: "",
}
err := fileRepo.Create(ctx, nil, file)
if err != nil {
t.Fatalf("failed to create file %s: %v", path, err)
}
files[i] = file
}
// Create chunks for multiple files
for i, file := range files {
for j := 0; j < 2; j++ {
fc := &FileChunk{
FileID: file.ID,
Idx: j,
ChunkHash: fmt.Sprintf("file%d_chunk%d", i, j),
}
err := repo.Create(ctx, nil, fc)
if err != nil {
t.Fatalf("failed to create file chunk: %v", err)
}
}
}
// Verify each file has correct chunks
for i, file := range files {
chunks, err := repo.GetByFileID(ctx, file.ID)
if err != nil {
t.Fatalf("failed to get chunks for file %d: %v", i, err)
}
if len(chunks) != 2 {
t.Errorf("expected 2 chunks for file %d, got %d", i, len(chunks))
}
}
}