Refactor blob storage to use UUID primary keys and implement streaming chunking
- Changed blob table to use ID (UUID) as primary key instead of hash - Blob records are now created at packing start, enabling immediate chunk associations - Implemented streaming chunking to process large files without memory exhaustion - Fixed blob manifest generation to include all referenced blobs - Updated all foreign key references from blob_hash to blob_id - Added progress reporting and improved error handling - Enforced encryption requirement for all blob packing - Updated tests to use test encryption keys - Added Cyrillic transliteration to README
This commit is contained in:
135
internal/database/uploads.go
Normal file
135
internal/database/uploads.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"git.eeqj.de/sneak/vaultik/internal/log"
|
||||
)
|
||||
|
||||
// Upload represents a blob upload record
|
||||
type Upload struct {
|
||||
BlobHash string
|
||||
UploadedAt time.Time
|
||||
Size int64
|
||||
DurationMs int64
|
||||
}
|
||||
|
||||
// UploadRepository handles upload records
|
||||
type UploadRepository struct {
|
||||
conn *sql.DB
|
||||
}
|
||||
|
||||
// NewUploadRepository creates a new upload repository
|
||||
func NewUploadRepository(conn *sql.DB) *UploadRepository {
|
||||
return &UploadRepository{conn: conn}
|
||||
}
|
||||
|
||||
// Create inserts a new upload record
|
||||
func (r *UploadRepository) Create(ctx context.Context, tx *sql.Tx, upload *Upload) error {
|
||||
query := `
|
||||
INSERT INTO uploads (blob_hash, uploaded_at, size, duration_ms)
|
||||
VALUES (?, ?, ?, ?)
|
||||
`
|
||||
|
||||
var err error
|
||||
if tx != nil {
|
||||
_, err = tx.ExecContext(ctx, query, upload.BlobHash, upload.UploadedAt, upload.Size, upload.DurationMs)
|
||||
} else {
|
||||
_, err = r.conn.ExecContext(ctx, query, upload.BlobHash, upload.UploadedAt, upload.Size, upload.DurationMs)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetByBlobHash retrieves an upload record by blob hash
|
||||
func (r *UploadRepository) GetByBlobHash(ctx context.Context, blobHash string) (*Upload, error) {
|
||||
query := `
|
||||
SELECT blob_hash, uploaded_at, size, duration_ms
|
||||
FROM uploads
|
||||
WHERE blob_hash = ?
|
||||
`
|
||||
|
||||
var upload Upload
|
||||
err := r.conn.QueryRowContext(ctx, query, blobHash).Scan(
|
||||
&upload.BlobHash,
|
||||
&upload.UploadedAt,
|
||||
&upload.Size,
|
||||
&upload.DurationMs,
|
||||
)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &upload, nil
|
||||
}
|
||||
|
||||
// GetRecentUploads retrieves recent uploads ordered by upload time
|
||||
func (r *UploadRepository) GetRecentUploads(ctx context.Context, limit int) ([]*Upload, error) {
|
||||
query := `
|
||||
SELECT blob_hash, uploaded_at, size, duration_ms
|
||||
FROM uploads
|
||||
ORDER BY uploaded_at DESC
|
||||
LIMIT ?
|
||||
`
|
||||
|
||||
rows, err := r.conn.QueryContext(ctx, query, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := rows.Close(); err != nil {
|
||||
log.Error("failed to close rows", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
var uploads []*Upload
|
||||
for rows.Next() {
|
||||
var upload Upload
|
||||
if err := rows.Scan(&upload.BlobHash, &upload.UploadedAt, &upload.Size, &upload.DurationMs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uploads = append(uploads, &upload)
|
||||
}
|
||||
|
||||
return uploads, rows.Err()
|
||||
}
|
||||
|
||||
// GetUploadStats returns aggregate statistics for uploads
|
||||
func (r *UploadRepository) GetUploadStats(ctx context.Context, since time.Time) (*UploadStats, error) {
|
||||
query := `
|
||||
SELECT
|
||||
COUNT(*) as count,
|
||||
COALESCE(SUM(size), 0) as total_size,
|
||||
COALESCE(AVG(duration_ms), 0) as avg_duration_ms,
|
||||
COALESCE(MIN(duration_ms), 0) as min_duration_ms,
|
||||
COALESCE(MAX(duration_ms), 0) as max_duration_ms
|
||||
FROM uploads
|
||||
WHERE uploaded_at >= ?
|
||||
`
|
||||
|
||||
var stats UploadStats
|
||||
err := r.conn.QueryRowContext(ctx, query, since).Scan(
|
||||
&stats.Count,
|
||||
&stats.TotalSize,
|
||||
&stats.AvgDurationMs,
|
||||
&stats.MinDurationMs,
|
||||
&stats.MaxDurationMs,
|
||||
)
|
||||
|
||||
return &stats, err
|
||||
}
|
||||
|
||||
// UploadStats contains aggregate upload statistics
|
||||
type UploadStats struct {
|
||||
Count int64
|
||||
TotalSize int64
|
||||
AvgDurationMs float64
|
||||
MinDurationMs int64
|
||||
MaxDurationMs int64
|
||||
}
|
||||
Reference in New Issue
Block a user