Major refactoring: UUID-based storage, streaming architecture, and CLI improvements
This commit represents a significant architectural overhaul of vaultik: Database Schema Changes: - Switch files table to use UUID primary keys instead of path-based keys - Add UUID primary keys to blobs table for immediate chunk association - Update all foreign key relationships to use UUIDs - Add comprehensive schema documentation in DATAMODEL.md - Add SQLite busy timeout handling for concurrent operations Streaming and Performance Improvements: - Implement true streaming blob packing without intermediate storage - Add streaming chunk processing to reduce memory usage - Improve progress reporting with real-time metrics - Add upload metrics tracking in new uploads table CLI Refactoring: - Restructure CLI to use subcommands: snapshot create/list/purge/verify - Add store info command for S3 configuration display - Add custom duration parser supporting days/weeks/months/years - Remove old backup.go in favor of enhanced snapshot.go - Add --cron flag for silent operation Configuration Changes: - Remove unused index_prefix configuration option - Add support for snapshot pruning retention policies - Improve configuration validation and error messages Testing Improvements: - Add comprehensive repository tests with edge cases - Add cascade delete debugging tests - Fix concurrent operation tests to use SQLite busy timeout - Remove tolerance for SQLITE_BUSY errors in tests Documentation: - Add MIT LICENSE file - Update README with new command structure - Add comprehensive DATAMODEL.md explaining database schema - Update DESIGN.md with UUID-based architecture Other Changes: - Add test-config.yml for testing - Update Makefile with better test output formatting - Fix various race conditions in concurrent operations - Improve error handling throughout
This commit is contained in:
@@ -67,21 +67,26 @@ func TestDatabaseConcurrentAccess(t *testing.T) {
|
||||
}()
|
||||
|
||||
// Test concurrent writes
|
||||
done := make(chan bool, 10)
|
||||
type result struct {
|
||||
index int
|
||||
err error
|
||||
}
|
||||
results := make(chan result, 10)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
go func(i int) {
|
||||
_, err := db.ExecWithLock(ctx, "INSERT INTO chunks (chunk_hash, sha256, size) VALUES (?, ?, ?)",
|
||||
_, err := db.ExecWithLog(ctx, "INSERT INTO chunks (chunk_hash, sha256, size) VALUES (?, ?, ?)",
|
||||
fmt.Sprintf("hash%d", i), fmt.Sprintf("sha%d", i), i*1024)
|
||||
if err != nil {
|
||||
t.Errorf("concurrent insert failed: %v", err)
|
||||
}
|
||||
done <- true
|
||||
results <- result{index: i, err: err}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for all goroutines
|
||||
// Wait for all goroutines and check results
|
||||
for i := 0; i < 10; i++ {
|
||||
<-done
|
||||
r := <-results
|
||||
if r.err != nil {
|
||||
t.Fatalf("concurrent insert %d failed: %v", r.index, r.err)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all inserts succeeded
|
||||
|
||||
Reference in New Issue
Block a user