vaultik/internal/database/uploads.go
sneak d3afa65420 Fix foreign key constraints and improve snapshot tracking
- Add unified compression/encryption package in internal/blobgen
- Update DATAMODEL.md to reflect current schema implementation
- Refactor snapshot cleanup into well-named methods for clarity
- Add snapshot_id to uploads table to track new blobs per snapshot
- Fix blob count reporting for incremental backups
- Add DeleteOrphaned method to BlobChunkRepository
- Fix cleanup order to respect foreign key constraints
- Update tests to reflect schema changes
2025-07-26 02:22:25 +02:00

148 lines
3.5 KiB
Go

package database
import (
"context"
"database/sql"
"time"
"git.eeqj.de/sneak/vaultik/internal/log"
)
// Upload represents a blob upload record
type Upload struct {
BlobHash string
SnapshotID string
UploadedAt time.Time
Size int64
DurationMs int64
}
// UploadRepository handles upload records
type UploadRepository struct {
conn *sql.DB
}
// NewUploadRepository creates a new upload repository
func NewUploadRepository(conn *sql.DB) *UploadRepository {
return &UploadRepository{conn: conn}
}
// Create inserts a new upload record
func (r *UploadRepository) Create(ctx context.Context, tx *sql.Tx, upload *Upload) error {
query := `
INSERT INTO uploads (blob_hash, snapshot_id, uploaded_at, size, duration_ms)
VALUES (?, ?, ?, ?, ?)
`
var err error
if tx != nil {
_, err = tx.ExecContext(ctx, query, upload.BlobHash, upload.SnapshotID, upload.UploadedAt, upload.Size, upload.DurationMs)
} else {
_, err = r.conn.ExecContext(ctx, query, upload.BlobHash, upload.SnapshotID, upload.UploadedAt, upload.Size, upload.DurationMs)
}
return err
}
// GetByBlobHash retrieves an upload record by blob hash
func (r *UploadRepository) GetByBlobHash(ctx context.Context, blobHash string) (*Upload, error) {
query := `
SELECT blob_hash, uploaded_at, size, duration_ms
FROM uploads
WHERE blob_hash = ?
`
var upload Upload
err := r.conn.QueryRowContext(ctx, query, blobHash).Scan(
&upload.BlobHash,
&upload.UploadedAt,
&upload.Size,
&upload.DurationMs,
)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, err
}
return &upload, nil
}
// GetRecentUploads retrieves recent uploads ordered by upload time
func (r *UploadRepository) GetRecentUploads(ctx context.Context, limit int) ([]*Upload, error) {
query := `
SELECT blob_hash, uploaded_at, size, duration_ms
FROM uploads
ORDER BY uploaded_at DESC
LIMIT ?
`
rows, err := r.conn.QueryContext(ctx, query, limit)
if err != nil {
return nil, err
}
defer func() {
if err := rows.Close(); err != nil {
log.Error("failed to close rows", "error", err)
}
}()
var uploads []*Upload
for rows.Next() {
var upload Upload
if err := rows.Scan(&upload.BlobHash, &upload.UploadedAt, &upload.Size, &upload.DurationMs); err != nil {
return nil, err
}
uploads = append(uploads, &upload)
}
return uploads, rows.Err()
}
// GetUploadStats returns aggregate statistics for uploads
func (r *UploadRepository) GetUploadStats(ctx context.Context, since time.Time) (*UploadStats, error) {
query := `
SELECT
COUNT(*) as count,
COALESCE(SUM(size), 0) as total_size,
COALESCE(AVG(duration_ms), 0) as avg_duration_ms,
COALESCE(MIN(duration_ms), 0) as min_duration_ms,
COALESCE(MAX(duration_ms), 0) as max_duration_ms
FROM uploads
WHERE uploaded_at >= ?
`
var stats UploadStats
err := r.conn.QueryRowContext(ctx, query, since).Scan(
&stats.Count,
&stats.TotalSize,
&stats.AvgDurationMs,
&stats.MinDurationMs,
&stats.MaxDurationMs,
)
return &stats, err
}
// UploadStats contains aggregate upload statistics
type UploadStats struct {
Count int64
TotalSize int64
AvgDurationMs float64
MinDurationMs int64
MaxDurationMs int64
}
// GetCountBySnapshot returns the count of uploads for a specific snapshot
func (r *UploadRepository) GetCountBySnapshot(ctx context.Context, snapshotID string) (int64, error) {
query := `SELECT COUNT(*) FROM uploads WHERE snapshot_id = ?`
var count int64
err := r.conn.QueryRowContext(ctx, query, snapshotID).Scan(&count)
if err != nil {
return 0, err
}
return count, nil
}