vaultik/internal/database/blob_chunks.go
sneak 8529ae9735 Implement SQLite index database layer
- Add pure Go SQLite driver (modernc.org/sqlite) to avoid CGO dependency
- Implement database connection management with WAL mode
- Add write mutex for serializing concurrent writes
- Create schema for all tables matching DESIGN.md specifications
- Implement repository pattern for all database entities:
  - Files, FileChunks, Chunks, Blobs, BlobChunks, ChunkFiles, Snapshots
- Add transaction support with proper rollback handling
- Add fatal error handling for database integrity issues
- Add snapshot fields for tracking file sizes and compression ratios
- Make index path configurable via VAULTIK_INDEX_PATH environment variable
- Add comprehensive test coverage for all repositories
- Add format check to Makefile to ensure code formatting
2025-07-20 10:56:30 +02:00

89 lines
1.9 KiB
Go

package database
import (
"context"
"database/sql"
"fmt"
)
type BlobChunkRepository struct {
db *DB
}
func NewBlobChunkRepository(db *DB) *BlobChunkRepository {
return &BlobChunkRepository{db: db}
}
func (r *BlobChunkRepository) Create(ctx context.Context, tx *sql.Tx, bc *BlobChunk) error {
query := `
INSERT INTO blob_chunks (blob_hash, chunk_hash, offset, length)
VALUES (?, ?, ?, ?)
`
var err error
if tx != nil {
_, err = tx.ExecContext(ctx, query, bc.BlobHash, bc.ChunkHash, bc.Offset, bc.Length)
} else {
_, err = r.db.ExecWithLock(ctx, query, bc.BlobHash, bc.ChunkHash, bc.Offset, bc.Length)
}
if err != nil {
return fmt.Errorf("inserting blob_chunk: %w", err)
}
return nil
}
func (r *BlobChunkRepository) GetByBlobHash(ctx context.Context, blobHash string) ([]*BlobChunk, error) {
query := `
SELECT blob_hash, chunk_hash, offset, length
FROM blob_chunks
WHERE blob_hash = ?
ORDER BY offset
`
rows, err := r.db.conn.QueryContext(ctx, query, blobHash)
if err != nil {
return nil, fmt.Errorf("querying blob chunks: %w", err)
}
defer CloseRows(rows)
var blobChunks []*BlobChunk
for rows.Next() {
var bc BlobChunk
err := rows.Scan(&bc.BlobHash, &bc.ChunkHash, &bc.Offset, &bc.Length)
if err != nil {
return nil, fmt.Errorf("scanning blob chunk: %w", err)
}
blobChunks = append(blobChunks, &bc)
}
return blobChunks, rows.Err()
}
func (r *BlobChunkRepository) GetByChunkHash(ctx context.Context, chunkHash string) (*BlobChunk, error) {
query := `
SELECT blob_hash, chunk_hash, offset, length
FROM blob_chunks
WHERE chunk_hash = ?
LIMIT 1
`
var bc BlobChunk
err := r.db.conn.QueryRowContext(ctx, query, chunkHash).Scan(
&bc.BlobHash,
&bc.ChunkHash,
&bc.Offset,
&bc.Length,
)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("querying blob chunk: %w", err)
}
return &bc, nil
}