vaultik/internal/database/chunks.go
sneak e29a995120 Refactor: Move Vaultik struct and methods to internal/vaultik package
- Created new internal/vaultik package with unified Vaultik struct
- Moved all command methods (snapshot, info, prune, verify) from CLI to vaultik package
- Implemented single constructor that handles crypto capabilities automatically
- Added CanDecrypt() method to check if decryption is available
- Updated all CLI commands to use the new vaultik.Vaultik struct
- Removed old fragmented App structs and WithCrypto wrapper
- Fixed context management - Vaultik now owns its context lifecycle
- Cleaned up package imports and dependencies

This creates a cleaner separation between CLI/Cobra code and business logic,
with all vaultik operations now centralized in the internal/vaultik package.
2025-07-26 14:47:26 +02:00

168 lines
3.2 KiB
Go

package database
import (
"context"
"database/sql"
"fmt"
"git.eeqj.de/sneak/vaultik/internal/log"
)
type ChunkRepository struct {
db *DB
}
func NewChunkRepository(db *DB) *ChunkRepository {
return &ChunkRepository{db: db}
}
func (r *ChunkRepository) Create(ctx context.Context, tx *sql.Tx, chunk *Chunk) error {
query := `
INSERT INTO chunks (chunk_hash, size)
VALUES (?, ?)
ON CONFLICT(chunk_hash) DO NOTHING
`
var err error
if tx != nil {
_, err = tx.ExecContext(ctx, query, chunk.ChunkHash, chunk.Size)
} else {
_, err = r.db.ExecWithLog(ctx, query, chunk.ChunkHash, chunk.Size)
}
if err != nil {
return fmt.Errorf("inserting chunk: %w", err)
}
return nil
}
func (r *ChunkRepository) GetByHash(ctx context.Context, hash string) (*Chunk, error) {
query := `
SELECT chunk_hash, size
FROM chunks
WHERE chunk_hash = ?
`
var chunk Chunk
err := r.db.conn.QueryRowContext(ctx, query, hash).Scan(
&chunk.ChunkHash,
&chunk.Size,
)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("querying chunk: %w", err)
}
return &chunk, nil
}
func (r *ChunkRepository) GetByHashes(ctx context.Context, hashes []string) ([]*Chunk, error) {
if len(hashes) == 0 {
return nil, nil
}
query := `
SELECT chunk_hash, size
FROM chunks
WHERE chunk_hash IN (`
args := make([]interface{}, len(hashes))
for i, hash := range hashes {
if i > 0 {
query += ", "
}
query += "?"
args[i] = hash
}
query += ") ORDER BY chunk_hash"
rows, err := r.db.conn.QueryContext(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("querying chunks: %w", err)
}
defer CloseRows(rows)
var chunks []*Chunk
for rows.Next() {
var chunk Chunk
err := rows.Scan(
&chunk.ChunkHash,
&chunk.Size,
)
if err != nil {
return nil, fmt.Errorf("scanning chunk: %w", err)
}
chunks = append(chunks, &chunk)
}
return chunks, rows.Err()
}
func (r *ChunkRepository) ListUnpacked(ctx context.Context, limit int) ([]*Chunk, error) {
query := `
SELECT c.chunk_hash, c.size
FROM chunks c
LEFT JOIN blob_chunks bc ON c.chunk_hash = bc.chunk_hash
WHERE bc.chunk_hash IS NULL
ORDER BY c.chunk_hash
LIMIT ?
`
rows, err := r.db.conn.QueryContext(ctx, query, limit)
if err != nil {
return nil, fmt.Errorf("querying unpacked chunks: %w", err)
}
defer CloseRows(rows)
var chunks []*Chunk
for rows.Next() {
var chunk Chunk
err := rows.Scan(
&chunk.ChunkHash,
&chunk.Size,
)
if err != nil {
return nil, fmt.Errorf("scanning chunk: %w", err)
}
chunks = append(chunks, &chunk)
}
return chunks, rows.Err()
}
// DeleteOrphaned deletes chunks that are not referenced by any file or blob
func (r *ChunkRepository) DeleteOrphaned(ctx context.Context) error {
query := `
DELETE FROM chunks
WHERE NOT EXISTS (
SELECT 1 FROM file_chunks
WHERE file_chunks.chunk_hash = chunks.chunk_hash
)
AND NOT EXISTS (
SELECT 1 FROM blob_chunks
WHERE blob_chunks.chunk_hash = chunks.chunk_hash
)
`
result, err := r.db.ExecWithLog(ctx, query)
if err != nil {
return fmt.Errorf("deleting orphaned chunks: %w", err)
}
rowsAffected, _ := result.RowsAffected()
if rowsAffected > 0 {
log.Debug("Deleted orphaned chunks", "count", rowsAffected)
}
return nil
}