- Add SQLite database connection management with proper error handling - Implement schema for files, chunks, blobs, and snapshots tables - Create repository pattern for each database table - Add transaction support with proper rollback handling - Integrate database module with fx dependency injection - Make index path configurable via VAULTIK_INDEX_PATH env var - Add fatal error handling for database integrity issues - Update DESIGN.md to clarify file_chunks vs chunk_files distinction - Remove FinalHash from BlobInfo (blobs are content-addressable) - Add file metadata support (mtime, ctime, mode, uid, gid, symlinks)
97 lines
1.8 KiB
Go
97 lines
1.8 KiB
Go
package database
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"fmt"
|
|
"time"
|
|
)
|
|
|
|
type BlobRepository struct {
|
|
db *DB
|
|
}
|
|
|
|
func NewBlobRepository(db *DB) *BlobRepository {
|
|
return &BlobRepository{db: db}
|
|
}
|
|
|
|
func (r *BlobRepository) Create(ctx context.Context, tx *sql.Tx, blob *Blob) error {
|
|
query := `
|
|
INSERT INTO blobs (blob_hash, created_ts)
|
|
VALUES (?, ?)
|
|
`
|
|
|
|
var err error
|
|
if tx != nil {
|
|
_, err = tx.ExecContext(ctx, query, blob.BlobHash, blob.CreatedTS.Unix())
|
|
} else {
|
|
_, err = r.db.conn.ExecContext(ctx, query, blob.BlobHash, blob.CreatedTS.Unix())
|
|
}
|
|
|
|
if err != nil {
|
|
return fmt.Errorf("inserting blob: %w", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (r *BlobRepository) GetByHash(ctx context.Context, hash string) (*Blob, error) {
|
|
query := `
|
|
SELECT blob_hash, created_ts
|
|
FROM blobs
|
|
WHERE blob_hash = ?
|
|
`
|
|
|
|
var blob Blob
|
|
var createdTSUnix int64
|
|
|
|
err := r.db.conn.QueryRowContext(ctx, query, hash).Scan(
|
|
&blob.BlobHash,
|
|
&createdTSUnix,
|
|
)
|
|
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, fmt.Errorf("querying blob: %w", err)
|
|
}
|
|
|
|
blob.CreatedTS = time.Unix(createdTSUnix, 0)
|
|
return &blob, nil
|
|
}
|
|
|
|
func (r *BlobRepository) List(ctx context.Context, limit, offset int) ([]*Blob, error) {
|
|
query := `
|
|
SELECT blob_hash, created_ts
|
|
FROM blobs
|
|
ORDER BY blob_hash
|
|
LIMIT ? OFFSET ?
|
|
`
|
|
|
|
rows, err := r.db.conn.QueryContext(ctx, query, limit, offset)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("querying blobs: %w", err)
|
|
}
|
|
defer CloseRows(rows)
|
|
|
|
var blobs []*Blob
|
|
for rows.Next() {
|
|
var blob Blob
|
|
var createdTSUnix int64
|
|
|
|
err := rows.Scan(
|
|
&blob.BlobHash,
|
|
&createdTSUnix,
|
|
)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("scanning blob: %w", err)
|
|
}
|
|
|
|
blob.CreatedTS = time.Unix(createdTSUnix, 0)
|
|
blobs = append(blobs, &blob)
|
|
}
|
|
|
|
return blobs, rows.Err()
|
|
}
|