Move schema_migrations table creation into 000.sql with INTEGER version column #58

Open
clawbot wants to merge 2 commits from feat/migration-bootstrap-000sql into main
5 changed files with 185 additions and 28 deletions
Showing only changes of commit eb1d5dd561 - Show all commits

View File

@@ -6,24 +6,32 @@
// multiple source files. Blobs are content-addressed, meaning their filename
// is derived from their SHA256 hash after compression and encryption.
//
// The database does not support migrations. If the schema changes, delete
// the local database and perform a full backup to recreate it.
// Schema is managed via numbered SQL migrations embedded in the schema/
// directory. Migration 000.sql bootstraps the schema_migrations tracking
// table; subsequent migrations (001, 002, …) are applied in order.
package database
import (
"context"
"database/sql"
_ "embed"
"embed"
"fmt"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"git.eeqj.de/sneak/vaultik/internal/log"
_ "modernc.org/sqlite"
)
//go:embed schema.sql
var schemaSQL string
//go:embed schema/*.sql
var schemaFS embed.FS
// bootstrapVersion is the migration that creates the schema_migrations
// table itself. It is applied before the normal migration loop.
const bootstrapVersion = 0
// DB represents the Vaultik local index database connection.
// It uses SQLite to track file metadata, content-defined chunks, and blob associations.
@@ -35,6 +43,46 @@ type DB struct {
path string
}
// ParseMigrationVersion extracts the numeric version prefix from a migration
// filename. Filenames must follow the pattern "<version>.sql" or
// "<version>_<description>.sql", where version is a zero-padded numeric
// string (e.g. "001", "002"). Returns the version as an integer and an
// error if the filename does not match the expected pattern.
func ParseMigrationVersion(filename string) (int, error) {
name := strings.TrimSuffix(filename, filepath.Ext(filename))
if name == "" {
return 0, fmt.Errorf("invalid migration filename %q: empty name", filename)
}
// Split on underscore to separate version from description.
// If there's no underscore, the entire stem is the version.
versionStr := name
if idx := strings.IndexByte(name, '_'); idx >= 0 {
versionStr = name[:idx]
}
if versionStr == "" {
return 0, fmt.Errorf("invalid migration filename %q: empty version prefix", filename)
}
// Validate the version is purely numeric.
for _, ch := range versionStr {
if ch < '0' || ch > '9' {
return 0, fmt.Errorf(
"invalid migration filename %q: version %q contains non-numeric character %q",
filename, versionStr, string(ch),
)
}
}
version, err := strconv.Atoi(versionStr)
if err != nil {
return 0, fmt.Errorf("invalid migration filename %q: %w", filename, err)
}
return version, nil
}
// New creates a new database connection at the specified path.
// It creates the schema if needed and configures SQLite with WAL mode for
// better concurrency. SQLite handles crash recovery automatically when
@@ -72,9 +120,9 @@ func New(ctx context.Context, path string) (*DB, error) {
}
db := &DB{conn: conn, path: path}
if err := db.createSchema(ctx); err != nil {
if err := applyMigrations(ctx, conn); err != nil {
_ = conn.Close()
return nil, fmt.Errorf("creating schema: %w", err)
return nil, fmt.Errorf("applying migrations: %w", err)
}
return db, nil
}
@@ -125,9 +173,9 @@ func New(ctx context.Context, path string) (*DB, error) {
}
db := &DB{conn: conn, path: path}
if err := db.createSchema(ctx); err != nil {
if err := applyMigrations(ctx, conn); err != nil {
_ = conn.Close()
return nil, fmt.Errorf("creating schema: %w", err)
return nil, fmt.Errorf("applying migrations: %w", err)
}
log.Debug("Database connection established successfully", "path", path)
@@ -198,9 +246,120 @@ func (db *DB) QueryRowWithLog(
return db.conn.QueryRowContext(ctx, query, args...)
}
func (db *DB) createSchema(ctx context.Context) error {
_, err := db.conn.ExecContext(ctx, schemaSQL)
// collectMigrations reads the embedded schema directory and returns
// migration filenames sorted lexicographically.
func collectMigrations() ([]string, error) {
entries, err := schemaFS.ReadDir("schema")
if err != nil {
return nil, fmt.Errorf("failed to read schema directory: %w", err)
}
var migrations []string
for _, entry := range entries {
if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".sql") {
migrations = append(migrations, entry.Name())
}
}
sort.Strings(migrations)
return migrations, nil
}
// bootstrapMigrationsTable ensures the schema_migrations table exists
// by applying 000.sql if the table is missing.
func bootstrapMigrationsTable(ctx context.Context, db *sql.DB) error {
var tableExists int
err := db.QueryRowContext(ctx,
"SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='schema_migrations'",
).Scan(&tableExists)
if err != nil {
return fmt.Errorf("failed to check for migrations table: %w", err)
}
if tableExists > 0 {
return nil
}
content, err := schemaFS.ReadFile("schema/000.sql")
if err != nil {
return fmt.Errorf("failed to read bootstrap migration 000.sql: %w", err)
}
log.Info("applying bootstrap migration", "version", bootstrapVersion)
_, err = db.ExecContext(ctx, string(content))
if err != nil {
return fmt.Errorf("failed to apply bootstrap migration: %w", err)
}
return nil
}
// applyMigrations applies all pending migrations to db. It first bootstraps
// the schema_migrations table via 000.sql, then iterates through remaining
// migration files in order.
func applyMigrations(ctx context.Context, db *sql.DB) error {
if err := bootstrapMigrationsTable(ctx, db); err != nil {
return err
}
migrations, err := collectMigrations()
if err != nil {
return err
}
for _, migration := range migrations {
version, parseErr := ParseMigrationVersion(migration)
if parseErr != nil {
return parseErr
}
// Check if already applied.
var count int
err := db.QueryRowContext(ctx,
"SELECT COUNT(*) FROM schema_migrations WHERE version = ?",
version,
).Scan(&count)
if err != nil {
return fmt.Errorf("failed to check migration status: %w", err)
}
if count > 0 {
log.Debug("migration already applied", "version", version)
continue
}
// Read and apply migration.
content, readErr := schemaFS.ReadFile(filepath.Join("schema", migration))
if readErr != nil {
return fmt.Errorf("failed to read migration %s: %w", migration, readErr)
}
log.Info("applying migration", "version", version)
_, execErr := db.ExecContext(ctx, string(content))
if execErr != nil {
return fmt.Errorf("failed to apply migration %s: %w", migration, execErr)
}
// Record migration as applied.
_, recErr := db.ExecContext(ctx,
"INSERT INTO schema_migrations (version) VALUES (?)",
version,
)
if recErr != nil {
return fmt.Errorf("failed to record migration %s: %w", migration, recErr)
}
log.Info("migration applied successfully", "version", version)
}
return nil
}
// NewTestDB creates an in-memory SQLite database for testing purposes.

View File

@@ -26,9 +26,10 @@ func TestDatabase(t *testing.T) {
t.Fatal("database connection is nil")
}
// Test schema creation (already done in New)
// Test schema creation (already done in New via migrations)
// Verify tables exist
tables := []string{
"schema_migrations",
"files", "file_chunks", "chunks", "blobs",
"blob_chunks", "chunk_files", "snapshots",
}

View File

@@ -0,0 +1,9 @@
-- Migration 000: Schema migrations tracking table
-- Applied as a bootstrap step before the normal migration loop.
CREATE TABLE IF NOT EXISTS schema_migrations (
version INTEGER PRIMARY KEY,
applied_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
INSERT OR IGNORE INTO schema_migrations (version) VALUES (0);

View File

@@ -1,6 +1,5 @@
-- Vaultik Database Schema
-- Note: This database does not support migrations. If the schema changes,
-- delete the local database and perform a full backup to recreate it.
-- Migration 001: Initial Vaultik schema
-- All core tables for tracking files, chunks, blobs, snapshots, and uploads.
-- Files table: stores metadata about files in the filesystem
CREATE TABLE IF NOT EXISTS files (

View File

@@ -1,11 +0,0 @@
-- Track blob upload metrics
CREATE TABLE IF NOT EXISTS uploads (
blob_hash TEXT PRIMARY KEY,
uploaded_at TIMESTAMP NOT NULL,
size INTEGER NOT NULL,
duration_ms INTEGER NOT NULL,
FOREIGN KEY (blob_hash) REFERENCES blobs(blob_hash)
);
CREATE INDEX idx_uploads_uploaded_at ON uploads(uploaded_at);
CREATE INDEX idx_uploads_duration ON uploads(duration_ms);