Refactor blob storage to use UUID primary keys and implement streaming chunking
- Changed blob table to use ID (UUID) as primary key instead of hash - Blob records are now created at packing start, enabling immediate chunk associations - Implemented streaming chunking to process large files without memory exhaustion - Fixed blob manifest generation to include all referenced blobs - Updated all foreign key references from blob_hash to blob_id - Added progress reporting and improved error handling - Enforced encryption requirement for all blob packing - Updated tests to use test encryption keys - Added Cyrillic transliteration to README
This commit is contained in:
@@ -15,6 +15,7 @@ type Repositories struct {
|
||||
BlobChunks *BlobChunkRepository
|
||||
ChunkFiles *ChunkFileRepository
|
||||
Snapshots *SnapshotRepository
|
||||
Uploads *UploadRepository
|
||||
}
|
||||
|
||||
func NewRepositories(db *DB) *Repositories {
|
||||
@@ -27,6 +28,7 @@ func NewRepositories(db *DB) *Repositories {
|
||||
BlobChunks: NewBlobChunkRepository(db),
|
||||
ChunkFiles: NewChunkFileRepository(db),
|
||||
Snapshots: NewSnapshotRepository(db),
|
||||
Uploads: NewUploadRepository(db.conn),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,13 +36,19 @@ type TxFunc func(ctx context.Context, tx *sql.Tx) error
|
||||
|
||||
func (r *Repositories) WithTx(ctx context.Context, fn TxFunc) error {
|
||||
// Acquire write lock for the entire transaction
|
||||
LogSQL("WithTx", "Acquiring write lock", "")
|
||||
r.db.LockForWrite()
|
||||
defer r.db.UnlockWrite()
|
||||
defer func() {
|
||||
LogSQL("WithTx", "Releasing write lock", "")
|
||||
r.db.UnlockWrite()
|
||||
}()
|
||||
|
||||
LogSQL("WithTx", "Beginning transaction", "")
|
||||
tx, err := r.db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("beginning transaction: %w", err)
|
||||
}
|
||||
LogSQL("WithTx", "Transaction started", "")
|
||||
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
|
||||
Reference in New Issue
Block a user