vaultik/internal/database/blob_chunks.go
sneak b2e85d9e76 Implement local SQLite index database with repositories
- Add SQLite database connection management with proper error handling
- Implement schema for files, chunks, blobs, and snapshots tables
- Create repository pattern for each database table
- Add transaction support with proper rollback handling
- Integrate database module with fx dependency injection
- Make index path configurable via VAULTIK_INDEX_PATH env var
- Add fatal error handling for database integrity issues
- Update DESIGN.md to clarify file_chunks vs chunk_files distinction
- Remove FinalHash from BlobInfo (blobs are content-addressable)
- Add file metadata support (mtime, ctime, mode, uid, gid, symlinks)
2025-07-20 10:26:15 +02:00

89 lines
1.9 KiB
Go

package database
import (
"context"
"database/sql"
"fmt"
)
type BlobChunkRepository struct {
db *DB
}
func NewBlobChunkRepository(db *DB) *BlobChunkRepository {
return &BlobChunkRepository{db: db}
}
func (r *BlobChunkRepository) Create(ctx context.Context, tx *sql.Tx, bc *BlobChunk) error {
query := `
INSERT INTO blob_chunks (blob_hash, chunk_hash, offset, length)
VALUES (?, ?, ?, ?)
`
var err error
if tx != nil {
_, err = tx.ExecContext(ctx, query, bc.BlobHash, bc.ChunkHash, bc.Offset, bc.Length)
} else {
_, err = r.db.conn.ExecContext(ctx, query, bc.BlobHash, bc.ChunkHash, bc.Offset, bc.Length)
}
if err != nil {
return fmt.Errorf("inserting blob_chunk: %w", err)
}
return nil
}
func (r *BlobChunkRepository) GetByBlobHash(ctx context.Context, blobHash string) ([]*BlobChunk, error) {
query := `
SELECT blob_hash, chunk_hash, offset, length
FROM blob_chunks
WHERE blob_hash = ?
ORDER BY offset
`
rows, err := r.db.conn.QueryContext(ctx, query, blobHash)
if err != nil {
return nil, fmt.Errorf("querying blob chunks: %w", err)
}
defer CloseRows(rows)
var blobChunks []*BlobChunk
for rows.Next() {
var bc BlobChunk
err := rows.Scan(&bc.BlobHash, &bc.ChunkHash, &bc.Offset, &bc.Length)
if err != nil {
return nil, fmt.Errorf("scanning blob chunk: %w", err)
}
blobChunks = append(blobChunks, &bc)
}
return blobChunks, rows.Err()
}
func (r *BlobChunkRepository) GetByChunkHash(ctx context.Context, chunkHash string) (*BlobChunk, error) {
query := `
SELECT blob_hash, chunk_hash, offset, length
FROM blob_chunks
WHERE chunk_hash = ?
LIMIT 1
`
var bc BlobChunk
err := r.db.conn.QueryRowContext(ctx, query, chunkHash).Scan(
&bc.BlobHash,
&bc.ChunkHash,
&bc.Offset,
&bc.Length,
)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("querying blob chunk: %w", err)
}
return &bc, nil
}