vaultik/internal/database/cascade_debug_test.go
sneak 78af626759 Major refactoring: UUID-based storage, streaming architecture, and CLI improvements
This commit represents a significant architectural overhaul of vaultik:

Database Schema Changes:
- Switch files table to use UUID primary keys instead of path-based keys
- Add UUID primary keys to blobs table for immediate chunk association
- Update all foreign key relationships to use UUIDs
- Add comprehensive schema documentation in DATAMODEL.md
- Add SQLite busy timeout handling for concurrent operations

Streaming and Performance Improvements:
- Implement true streaming blob packing without intermediate storage
- Add streaming chunk processing to reduce memory usage
- Improve progress reporting with real-time metrics
- Add upload metrics tracking in new uploads table

CLI Refactoring:
- Restructure CLI to use subcommands: snapshot create/list/purge/verify
- Add store info command for S3 configuration display
- Add custom duration parser supporting days/weeks/months/years
- Remove old backup.go in favor of enhanced snapshot.go
- Add --cron flag for silent operation

Configuration Changes:
- Remove unused index_prefix configuration option
- Add support for snapshot pruning retention policies
- Improve configuration validation and error messages

Testing Improvements:
- Add comprehensive repository tests with edge cases
- Add cascade delete debugging tests
- Fix concurrent operation tests to use SQLite busy timeout
- Remove tolerance for SQLITE_BUSY errors in tests

Documentation:
- Add MIT LICENSE file
- Update README with new command structure
- Add comprehensive DATAMODEL.md explaining database schema
- Update DESIGN.md with UUID-based architecture

Other Changes:
- Add test-config.yml for testing
- Update Makefile with better test output formatting
- Fix various race conditions in concurrent operations
- Improve error handling throughout
2025-07-22 14:56:44 +02:00

125 lines
3.0 KiB
Go

package database
import (
"context"
"fmt"
"testing"
"time"
)
// TestCascadeDeleteDebug tests cascade delete with debug output
func TestCascadeDeleteDebug(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Check if foreign keys are enabled
var fkEnabled int
err := db.conn.QueryRow("PRAGMA foreign_keys").Scan(&fkEnabled)
if err != nil {
t.Fatal(err)
}
t.Logf("Foreign keys enabled: %d", fkEnabled)
// Create a file
file := &File{
Path: "/cascade-test.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
}
err = repos.Files.Create(ctx, nil, file)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
t.Logf("Created file with ID: %s", file.ID)
// Create chunks and file-chunk mappings
for i := 0; i < 3; i++ {
chunk := &Chunk{
ChunkHash: fmt.Sprintf("cascade-chunk-%d", i),
SHA256: fmt.Sprintf("cascade-sha-%d", i),
Size: 1024,
}
err = repos.Chunks.Create(ctx, nil, chunk)
if err != nil {
t.Fatalf("failed to create chunk: %v", err)
}
fc := &FileChunk{
FileID: file.ID,
Idx: i,
ChunkHash: chunk.ChunkHash,
}
err = repos.FileChunks.Create(ctx, nil, fc)
if err != nil {
t.Fatalf("failed to create file chunk: %v", err)
}
t.Logf("Created file chunk mapping: file_id=%s, idx=%d, chunk=%s", fc.FileID, fc.Idx, fc.ChunkHash)
}
// Verify file chunks exist
fileChunks, err := repos.FileChunks.GetByFileID(ctx, file.ID)
if err != nil {
t.Fatal(err)
}
t.Logf("File chunks before delete: %d", len(fileChunks))
// Check the foreign key constraint
var fkInfo string
err = db.conn.QueryRow(`
SELECT sql FROM sqlite_master
WHERE type='table' AND name='file_chunks'
`).Scan(&fkInfo)
if err != nil {
t.Fatal(err)
}
t.Logf("file_chunks table definition:\n%s", fkInfo)
// Delete the file
t.Log("Deleting file...")
err = repos.Files.DeleteByID(ctx, nil, file.ID)
if err != nil {
t.Fatalf("failed to delete file: %v", err)
}
// Verify file is gone
deletedFile, err := repos.Files.GetByID(ctx, file.ID)
if err != nil {
t.Fatal(err)
}
if deletedFile != nil {
t.Error("file should have been deleted")
} else {
t.Log("File was successfully deleted")
}
// Check file chunks after delete
fileChunks, err = repos.FileChunks.GetByFileID(ctx, file.ID)
if err != nil {
t.Fatal(err)
}
t.Logf("File chunks after delete: %d", len(fileChunks))
// Manually check the database
var count int
err = db.conn.QueryRow("SELECT COUNT(*) FROM file_chunks WHERE file_id = ?", file.ID).Scan(&count)
if err != nil {
t.Fatal(err)
}
t.Logf("Manual count of file_chunks for deleted file: %d", count)
if len(fileChunks) != 0 {
t.Errorf("expected 0 file chunks after cascade delete, got %d", len(fileChunks))
// List the remaining chunks
for _, fc := range fileChunks {
t.Logf("Remaining chunk: file_id=%s, idx=%d, chunk=%s", fc.FileID, fc.Idx, fc.ChunkHash)
}
}
}