- Add internal/types package with type-safe wrappers for IDs, hashes, paths, and credentials (FileID, BlobID, ChunkHash, etc.) - Implement driver.Valuer and sql.Scanner for UUID-based types - Add `vaultik version` command showing version, commit, go version - Add `--verify` flag to restore command that checksums all restored files against expected chunk hashes with progress bar - Remove fetch.go (dead code, functionality in restore) - Clean up TODO.md, remove completed items - Update all database and snapshot code to use new custom types
383 lines
9.3 KiB
Go
383 lines
9.3 KiB
Go
package database
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"fmt"
|
|
"time"
|
|
|
|
"git.eeqj.de/sneak/vaultik/internal/log"
|
|
"git.eeqj.de/sneak/vaultik/internal/types"
|
|
)
|
|
|
|
type FileRepository struct {
|
|
db *DB
|
|
}
|
|
|
|
func NewFileRepository(db *DB) *FileRepository {
|
|
return &FileRepository{db: db}
|
|
}
|
|
|
|
func (r *FileRepository) Create(ctx context.Context, tx *sql.Tx, file *File) error {
|
|
// Generate UUID if not provided
|
|
if file.ID.IsZero() {
|
|
file.ID = types.NewFileID()
|
|
}
|
|
|
|
query := `
|
|
INSERT INTO files (id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target)
|
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
ON CONFLICT(path) DO UPDATE SET
|
|
source_path = excluded.source_path,
|
|
mtime = excluded.mtime,
|
|
ctime = excluded.ctime,
|
|
size = excluded.size,
|
|
mode = excluded.mode,
|
|
uid = excluded.uid,
|
|
gid = excluded.gid,
|
|
link_target = excluded.link_target
|
|
RETURNING id
|
|
`
|
|
|
|
var idStr string
|
|
var err error
|
|
if tx != nil {
|
|
LogSQL("Execute", query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String())
|
|
err = tx.QueryRowContext(ctx, query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String()).Scan(&idStr)
|
|
} else {
|
|
err = r.db.QueryRowWithLog(ctx, query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String()).Scan(&idStr)
|
|
}
|
|
|
|
if err != nil {
|
|
return fmt.Errorf("inserting file: %w", err)
|
|
}
|
|
|
|
// Parse the returned ID
|
|
file.ID, err = types.ParseFileID(idStr)
|
|
if err != nil {
|
|
return fmt.Errorf("parsing file ID: %w", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (r *FileRepository) GetByPath(ctx context.Context, path string) (*File, error) {
|
|
query := `
|
|
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
|
FROM files
|
|
WHERE path = ?
|
|
`
|
|
|
|
file, err := r.scanFile(r.db.conn.QueryRowContext(ctx, query, path))
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, fmt.Errorf("querying file: %w", err)
|
|
}
|
|
|
|
return file, nil
|
|
}
|
|
|
|
// GetByID retrieves a file by its UUID
|
|
func (r *FileRepository) GetByID(ctx context.Context, id types.FileID) (*File, error) {
|
|
query := `
|
|
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
|
FROM files
|
|
WHERE id = ?
|
|
`
|
|
|
|
file, err := r.scanFile(r.db.conn.QueryRowContext(ctx, query, id.String()))
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, fmt.Errorf("querying file: %w", err)
|
|
}
|
|
|
|
return file, nil
|
|
}
|
|
|
|
func (r *FileRepository) GetByPathTx(ctx context.Context, tx *sql.Tx, path string) (*File, error) {
|
|
query := `
|
|
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
|
FROM files
|
|
WHERE path = ?
|
|
`
|
|
|
|
LogSQL("GetByPathTx QueryRowContext", query, path)
|
|
file, err := r.scanFile(tx.QueryRowContext(ctx, query, path))
|
|
LogSQL("GetByPathTx Scan complete", query, path)
|
|
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, fmt.Errorf("querying file: %w", err)
|
|
}
|
|
|
|
return file, nil
|
|
}
|
|
|
|
// scanFile is a helper that scans a single file row
|
|
func (r *FileRepository) scanFile(row *sql.Row) (*File, error) {
|
|
var file File
|
|
var idStr, pathStr, sourcePathStr string
|
|
var mtimeUnix, ctimeUnix int64
|
|
var linkTarget sql.NullString
|
|
|
|
err := row.Scan(
|
|
&idStr,
|
|
&pathStr,
|
|
&sourcePathStr,
|
|
&mtimeUnix,
|
|
&ctimeUnix,
|
|
&file.Size,
|
|
&file.Mode,
|
|
&file.UID,
|
|
&file.GID,
|
|
&linkTarget,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
file.ID, err = types.ParseFileID(idStr)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("parsing file ID: %w", err)
|
|
}
|
|
file.Path = types.FilePath(pathStr)
|
|
file.SourcePath = types.SourcePath(sourcePathStr)
|
|
file.MTime = time.Unix(mtimeUnix, 0).UTC()
|
|
file.CTime = time.Unix(ctimeUnix, 0).UTC()
|
|
if linkTarget.Valid {
|
|
file.LinkTarget = types.FilePath(linkTarget.String)
|
|
}
|
|
|
|
return &file, nil
|
|
}
|
|
|
|
// scanFileRows is a helper that scans a file row from rows iterator
|
|
func (r *FileRepository) scanFileRows(rows *sql.Rows) (*File, error) {
|
|
var file File
|
|
var idStr, pathStr, sourcePathStr string
|
|
var mtimeUnix, ctimeUnix int64
|
|
var linkTarget sql.NullString
|
|
|
|
err := rows.Scan(
|
|
&idStr,
|
|
&pathStr,
|
|
&sourcePathStr,
|
|
&mtimeUnix,
|
|
&ctimeUnix,
|
|
&file.Size,
|
|
&file.Mode,
|
|
&file.UID,
|
|
&file.GID,
|
|
&linkTarget,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
file.ID, err = types.ParseFileID(idStr)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("parsing file ID: %w", err)
|
|
}
|
|
file.Path = types.FilePath(pathStr)
|
|
file.SourcePath = types.SourcePath(sourcePathStr)
|
|
file.MTime = time.Unix(mtimeUnix, 0).UTC()
|
|
file.CTime = time.Unix(ctimeUnix, 0).UTC()
|
|
if linkTarget.Valid {
|
|
file.LinkTarget = types.FilePath(linkTarget.String)
|
|
}
|
|
|
|
return &file, nil
|
|
}
|
|
|
|
func (r *FileRepository) ListModifiedSince(ctx context.Context, since time.Time) ([]*File, error) {
|
|
query := `
|
|
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
|
FROM files
|
|
WHERE mtime >= ?
|
|
ORDER BY path
|
|
`
|
|
|
|
rows, err := r.db.conn.QueryContext(ctx, query, since.Unix())
|
|
if err != nil {
|
|
return nil, fmt.Errorf("querying files: %w", err)
|
|
}
|
|
defer CloseRows(rows)
|
|
|
|
var files []*File
|
|
for rows.Next() {
|
|
file, err := r.scanFileRows(rows)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("scanning file: %w", err)
|
|
}
|
|
files = append(files, file)
|
|
}
|
|
|
|
return files, rows.Err()
|
|
}
|
|
|
|
func (r *FileRepository) Delete(ctx context.Context, tx *sql.Tx, path string) error {
|
|
query := `DELETE FROM files WHERE path = ?`
|
|
|
|
var err error
|
|
if tx != nil {
|
|
_, err = tx.ExecContext(ctx, query, path)
|
|
} else {
|
|
_, err = r.db.ExecWithLog(ctx, query, path)
|
|
}
|
|
|
|
if err != nil {
|
|
return fmt.Errorf("deleting file: %w", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// DeleteByID deletes a file by its UUID
|
|
func (r *FileRepository) DeleteByID(ctx context.Context, tx *sql.Tx, id types.FileID) error {
|
|
query := `DELETE FROM files WHERE id = ?`
|
|
|
|
var err error
|
|
if tx != nil {
|
|
_, err = tx.ExecContext(ctx, query, id.String())
|
|
} else {
|
|
_, err = r.db.ExecWithLog(ctx, query, id.String())
|
|
}
|
|
|
|
if err != nil {
|
|
return fmt.Errorf("deleting file: %w", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (r *FileRepository) ListByPrefix(ctx context.Context, prefix string) ([]*File, error) {
|
|
query := `
|
|
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
|
FROM files
|
|
WHERE path LIKE ? || '%'
|
|
ORDER BY path
|
|
`
|
|
|
|
rows, err := r.db.conn.QueryContext(ctx, query, prefix)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("querying files: %w", err)
|
|
}
|
|
defer CloseRows(rows)
|
|
|
|
var files []*File
|
|
for rows.Next() {
|
|
file, err := r.scanFileRows(rows)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("scanning file: %w", err)
|
|
}
|
|
files = append(files, file)
|
|
}
|
|
|
|
return files, rows.Err()
|
|
}
|
|
|
|
// ListAll returns all files in the database
|
|
func (r *FileRepository) ListAll(ctx context.Context) ([]*File, error) {
|
|
query := `
|
|
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
|
FROM files
|
|
ORDER BY path
|
|
`
|
|
|
|
rows, err := r.db.conn.QueryContext(ctx, query)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("querying files: %w", err)
|
|
}
|
|
defer CloseRows(rows)
|
|
|
|
var files []*File
|
|
for rows.Next() {
|
|
file, err := r.scanFileRows(rows)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("scanning file: %w", err)
|
|
}
|
|
files = append(files, file)
|
|
}
|
|
|
|
return files, rows.Err()
|
|
}
|
|
|
|
// CreateBatch inserts or updates multiple files in a single statement for efficiency.
|
|
// File IDs must be pre-generated before calling this method.
|
|
func (r *FileRepository) CreateBatch(ctx context.Context, tx *sql.Tx, files []*File) error {
|
|
if len(files) == 0 {
|
|
return nil
|
|
}
|
|
|
|
// Each File has 10 values, so batch at 100 to be safe with SQLite's variable limit
|
|
const batchSize = 100
|
|
|
|
for i := 0; i < len(files); i += batchSize {
|
|
end := i + batchSize
|
|
if end > len(files) {
|
|
end = len(files)
|
|
}
|
|
batch := files[i:end]
|
|
|
|
query := `INSERT INTO files (id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target) VALUES `
|
|
args := make([]interface{}, 0, len(batch)*10)
|
|
for j, f := range batch {
|
|
if j > 0 {
|
|
query += ", "
|
|
}
|
|
query += "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
|
|
args = append(args, f.ID.String(), f.Path.String(), f.SourcePath.String(), f.MTime.Unix(), f.CTime.Unix(), f.Size, f.Mode, f.UID, f.GID, f.LinkTarget.String())
|
|
}
|
|
query += ` ON CONFLICT(path) DO UPDATE SET
|
|
source_path = excluded.source_path,
|
|
mtime = excluded.mtime,
|
|
ctime = excluded.ctime,
|
|
size = excluded.size,
|
|
mode = excluded.mode,
|
|
uid = excluded.uid,
|
|
gid = excluded.gid,
|
|
link_target = excluded.link_target`
|
|
|
|
var err error
|
|
if tx != nil {
|
|
_, err = tx.ExecContext(ctx, query, args...)
|
|
} else {
|
|
_, err = r.db.ExecWithLog(ctx, query, args...)
|
|
}
|
|
if err != nil {
|
|
return fmt.Errorf("batch inserting files: %w", err)
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// DeleteOrphaned deletes files that are not referenced by any snapshot
|
|
func (r *FileRepository) DeleteOrphaned(ctx context.Context) error {
|
|
query := `
|
|
DELETE FROM files
|
|
WHERE NOT EXISTS (
|
|
SELECT 1 FROM snapshot_files
|
|
WHERE snapshot_files.file_id = files.id
|
|
)
|
|
`
|
|
|
|
result, err := r.db.ExecWithLog(ctx, query)
|
|
if err != nil {
|
|
return fmt.Errorf("deleting orphaned files: %w", err)
|
|
}
|
|
|
|
rowsAffected, _ := result.RowsAffected()
|
|
if rowsAffected > 0 {
|
|
log.Debug("Deleted orphaned files", "count", rowsAffected)
|
|
}
|
|
|
|
return nil
|
|
}
|