- Changed blob table to use ID (UUID) as primary key instead of hash - Blob records are now created at packing start, enabling immediate chunk associations - Implemented streaming chunking to process large files without memory exhaustion - Fixed blob manifest generation to include all referenced blobs - Updated all foreign key references from blob_hash to blob_id - Added progress reporting and improved error handling - Enforced encryption requirement for all blob packing - Updated tests to use test encryption keys - Added Cyrillic transliteration to README
288 lines
8.3 KiB
Go
288 lines
8.3 KiB
Go
package cli
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
|
|
"git.eeqj.de/sneak/vaultik/internal/backup"
|
|
"git.eeqj.de/sneak/vaultik/internal/config"
|
|
"git.eeqj.de/sneak/vaultik/internal/crypto"
|
|
"git.eeqj.de/sneak/vaultik/internal/database"
|
|
"git.eeqj.de/sneak/vaultik/internal/globals"
|
|
"git.eeqj.de/sneak/vaultik/internal/log"
|
|
"git.eeqj.de/sneak/vaultik/internal/s3"
|
|
"github.com/spf13/cobra"
|
|
"go.uber.org/fx"
|
|
)
|
|
|
|
// BackupOptions contains options for the backup command
|
|
type BackupOptions struct {
|
|
ConfigPath string
|
|
Daemon bool
|
|
Cron bool
|
|
Prune bool
|
|
}
|
|
|
|
// BackupApp contains all dependencies needed for running backups
|
|
type BackupApp struct {
|
|
Globals *globals.Globals
|
|
Config *config.Config
|
|
Repositories *database.Repositories
|
|
ScannerFactory backup.ScannerFactory
|
|
S3Client *s3.Client
|
|
DB *database.DB
|
|
Lifecycle fx.Lifecycle
|
|
Shutdowner fx.Shutdowner
|
|
}
|
|
|
|
// NewBackupCommand creates the backup command
|
|
func NewBackupCommand() *cobra.Command {
|
|
opts := &BackupOptions{}
|
|
|
|
cmd := &cobra.Command{
|
|
Use: "backup",
|
|
Short: "Perform incremental backup",
|
|
Long: `Backup configured directories using incremental deduplication and encryption.
|
|
|
|
Config is located at /etc/vaultik/config.yml, but can be overridden by specifying
|
|
a path using --config or by setting VAULTIK_CONFIG to a path.`,
|
|
Args: cobra.NoArgs,
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
// If --config not specified, check environment variable
|
|
if opts.ConfigPath == "" {
|
|
opts.ConfigPath = os.Getenv("VAULTIK_CONFIG")
|
|
}
|
|
// If still not specified, use default
|
|
if opts.ConfigPath == "" {
|
|
defaultConfig := "/etc/vaultik/config.yml"
|
|
if _, err := os.Stat(defaultConfig); err == nil {
|
|
opts.ConfigPath = defaultConfig
|
|
} else {
|
|
return fmt.Errorf("no config file specified, VAULTIK_CONFIG not set, and %s not found", defaultConfig)
|
|
}
|
|
}
|
|
return runBackup(cmd.Context(), opts)
|
|
},
|
|
}
|
|
|
|
cmd.Flags().StringVar(&opts.ConfigPath, "config", "", "Path to config file")
|
|
cmd.Flags().BoolVar(&opts.Daemon, "daemon", false, "Run in daemon mode with inotify monitoring")
|
|
cmd.Flags().BoolVar(&opts.Cron, "cron", false, "Run in cron mode (silent unless error)")
|
|
cmd.Flags().BoolVar(&opts.Prune, "prune", false, "Delete all previous snapshots and unreferenced blobs after backup")
|
|
|
|
return cmd
|
|
}
|
|
|
|
func runBackup(ctx context.Context, opts *BackupOptions) error {
|
|
rootFlags := GetRootFlags()
|
|
return RunWithApp(ctx, AppOptions{
|
|
ConfigPath: opts.ConfigPath,
|
|
LogOptions: log.LogOptions{
|
|
Verbose: rootFlags.Verbose,
|
|
Debug: rootFlags.Debug,
|
|
Cron: opts.Cron,
|
|
},
|
|
Modules: []fx.Option{
|
|
backup.Module,
|
|
s3.Module,
|
|
fx.Provide(fx.Annotate(
|
|
func(g *globals.Globals, cfg *config.Config, repos *database.Repositories,
|
|
scannerFactory backup.ScannerFactory, s3Client *s3.Client, db *database.DB,
|
|
lc fx.Lifecycle, shutdowner fx.Shutdowner) *BackupApp {
|
|
return &BackupApp{
|
|
Globals: g,
|
|
Config: cfg,
|
|
Repositories: repos,
|
|
ScannerFactory: scannerFactory,
|
|
S3Client: s3Client,
|
|
DB: db,
|
|
Lifecycle: lc,
|
|
Shutdowner: shutdowner,
|
|
}
|
|
},
|
|
)),
|
|
},
|
|
Invokes: []fx.Option{
|
|
fx.Invoke(func(app *BackupApp, lc fx.Lifecycle) {
|
|
// Create a cancellable context for the backup
|
|
backupCtx, backupCancel := context.WithCancel(context.Background())
|
|
|
|
lc.Append(fx.Hook{
|
|
OnStart: func(ctx context.Context) error {
|
|
// Start the backup in a goroutine
|
|
go func() {
|
|
// Run the backup
|
|
if err := app.runBackup(backupCtx, opts); err != nil {
|
|
if err != context.Canceled {
|
|
log.Error("Backup failed", "error", err)
|
|
}
|
|
}
|
|
|
|
// Shutdown the app when backup completes
|
|
if err := app.Shutdowner.Shutdown(); err != nil {
|
|
log.Error("Failed to shutdown", "error", err)
|
|
}
|
|
}()
|
|
return nil
|
|
},
|
|
OnStop: func(ctx context.Context) error {
|
|
log.Debug("Stopping backup")
|
|
// Cancel the backup context
|
|
backupCancel()
|
|
return nil
|
|
},
|
|
})
|
|
}),
|
|
},
|
|
})
|
|
}
|
|
|
|
// runBackup executes the backup operation
|
|
func (app *BackupApp) runBackup(ctx context.Context, opts *BackupOptions) error {
|
|
log.Info("Starting backup",
|
|
"config", opts.ConfigPath,
|
|
"version", app.Globals.Version,
|
|
"commit", app.Globals.Commit,
|
|
"index_path", app.Config.IndexPath,
|
|
)
|
|
|
|
if opts.Daemon {
|
|
log.Info("Running in daemon mode")
|
|
// TODO: Implement daemon mode with inotify
|
|
return fmt.Errorf("daemon mode not yet implemented")
|
|
}
|
|
|
|
// Resolve source directories to absolute paths
|
|
resolvedDirs := make([]string, 0, len(app.Config.SourceDirs))
|
|
for _, dir := range app.Config.SourceDirs {
|
|
absPath, err := filepath.Abs(dir)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to resolve absolute path for %s: %w", dir, err)
|
|
}
|
|
|
|
// Resolve symlinks
|
|
resolvedPath, err := filepath.EvalSymlinks(absPath)
|
|
if err != nil {
|
|
// If the path doesn't exist yet, use the absolute path
|
|
if os.IsNotExist(err) {
|
|
resolvedPath = absPath
|
|
} else {
|
|
return fmt.Errorf("failed to resolve symlinks for %s: %w", absPath, err)
|
|
}
|
|
}
|
|
|
|
resolvedDirs = append(resolvedDirs, resolvedPath)
|
|
}
|
|
|
|
// Create scanner with progress enabled (unless in cron mode)
|
|
scanner := app.ScannerFactory(backup.ScannerParams{
|
|
EnableProgress: !opts.Cron,
|
|
})
|
|
|
|
// Perform a single backup run
|
|
log.Notice("Starting backup", "source_dirs", len(resolvedDirs))
|
|
for i, dir := range resolvedDirs {
|
|
log.Info("Source directory", "index", i+1, "path", dir)
|
|
}
|
|
|
|
totalFiles := 0
|
|
totalBytes := int64(0)
|
|
totalChunks := 0
|
|
totalBlobs := 0
|
|
|
|
// Create a new snapshot at the beginning of backup
|
|
hostname := app.Config.Hostname
|
|
if hostname == "" {
|
|
hostname, _ = os.Hostname()
|
|
}
|
|
|
|
// Create encryptor if age recipients are configured
|
|
var encryptor backup.Encryptor
|
|
if len(app.Config.AgeRecipients) > 0 {
|
|
cryptoEncryptor, err := crypto.NewEncryptor(app.Config.AgeRecipients)
|
|
if err != nil {
|
|
return fmt.Errorf("creating encryptor: %w", err)
|
|
}
|
|
encryptor = cryptoEncryptor
|
|
}
|
|
|
|
snapshotManager := backup.NewSnapshotManager(app.Repositories, app.S3Client, encryptor)
|
|
snapshotID, err := snapshotManager.CreateSnapshot(ctx, hostname, app.Globals.Version)
|
|
if err != nil {
|
|
return fmt.Errorf("creating snapshot: %w", err)
|
|
}
|
|
log.Info("Created snapshot", "snapshot_id", snapshotID)
|
|
|
|
for _, dir := range resolvedDirs {
|
|
// Check if context is cancelled
|
|
select {
|
|
case <-ctx.Done():
|
|
log.Info("Backup cancelled")
|
|
return ctx.Err()
|
|
default:
|
|
}
|
|
|
|
log.Info("Scanning directory", "path", dir)
|
|
result, err := scanner.Scan(ctx, dir, snapshotID)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to scan %s: %w", dir, err)
|
|
}
|
|
|
|
totalFiles += result.FilesScanned
|
|
totalBytes += result.BytesScanned
|
|
totalChunks += result.ChunksCreated
|
|
totalBlobs += result.BlobsCreated
|
|
|
|
log.Info("Directory scan complete",
|
|
"path", dir,
|
|
"files", result.FilesScanned,
|
|
"files_skipped", result.FilesSkipped,
|
|
"bytes", result.BytesScanned,
|
|
"bytes_skipped", result.BytesSkipped,
|
|
"chunks", result.ChunksCreated,
|
|
"blobs", result.BlobsCreated,
|
|
"duration", result.EndTime.Sub(result.StartTime))
|
|
}
|
|
|
|
// Update snapshot statistics
|
|
stats := backup.BackupStats{
|
|
FilesScanned: totalFiles,
|
|
BytesScanned: totalBytes,
|
|
ChunksCreated: totalChunks,
|
|
BlobsCreated: totalBlobs,
|
|
BytesUploaded: totalBytes, // TODO: Track actual uploaded bytes
|
|
}
|
|
|
|
if err := snapshotManager.UpdateSnapshotStats(ctx, snapshotID, stats); err != nil {
|
|
return fmt.Errorf("updating snapshot stats: %w", err)
|
|
}
|
|
|
|
// Mark snapshot as complete
|
|
if err := snapshotManager.CompleteSnapshot(ctx, snapshotID); err != nil {
|
|
return fmt.Errorf("completing snapshot: %w", err)
|
|
}
|
|
|
|
// Export snapshot metadata
|
|
// Export snapshot metadata without closing the database
|
|
// The export function should handle its own database connection
|
|
if err := snapshotManager.ExportSnapshotMetadata(ctx, app.Config.IndexPath, snapshotID); err != nil {
|
|
return fmt.Errorf("exporting snapshot metadata: %w", err)
|
|
}
|
|
|
|
log.Notice("Backup complete",
|
|
"snapshot_id", snapshotID,
|
|
"total_files", totalFiles,
|
|
"total_bytes", totalBytes,
|
|
"total_chunks", totalChunks,
|
|
"total_blobs", totalBlobs)
|
|
|
|
if opts.Prune {
|
|
log.Info("Pruning enabled - will delete old snapshots after backup")
|
|
// TODO: Implement pruning
|
|
}
|
|
|
|
return nil
|
|
}
|