Integrate afero filesystem abstraction library
- Add afero.Fs field to Vaultik struct for filesystem operations - Vaultik now owns and manages the filesystem instance - SnapshotManager receives filesystem via SetFilesystem() setter - Update blob packer to use afero for temporary files - Convert all filesystem operations to use afero abstraction - Remove filesystem module - Vaultik manages filesystem directly - Update tests: remove symlink test (unsupported by afero memfs) - Fix TestMultipleFileChanges to handle scanner examining directories This enables full end-to-end testing without touching disk by using memory-backed filesystems. Database operations continue using real filesystem as SQLite requires actual files.
This commit is contained in:
@@ -44,7 +44,6 @@ import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
@@ -55,6 +54,7 @@ import (
|
||||
"git.eeqj.de/sneak/vaultik/internal/log"
|
||||
"git.eeqj.de/sneak/vaultik/internal/s3"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/spf13/afero"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
@@ -63,6 +63,7 @@ type SnapshotManager struct {
|
||||
repos *database.Repositories
|
||||
s3Client S3Client
|
||||
config *config.Config
|
||||
fs afero.Fs
|
||||
}
|
||||
|
||||
// SnapshotManagerParams holds dependencies for NewSnapshotManager
|
||||
@@ -83,6 +84,11 @@ func NewSnapshotManager(params SnapshotManagerParams) *SnapshotManager {
|
||||
}
|
||||
}
|
||||
|
||||
// SetFilesystem sets the filesystem to use for all file operations
|
||||
func (sm *SnapshotManager) SetFilesystem(fs afero.Fs) {
|
||||
sm.fs = fs
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a new snapshot record in the database at the start of a backup
|
||||
func (sm *SnapshotManager) CreateSnapshot(ctx context.Context, hostname, version, gitRevision string) (string, error) {
|
||||
snapshotID := fmt.Sprintf("%s-%s", hostname, time.Now().UTC().Format("20060102-150405Z"))
|
||||
@@ -192,14 +198,14 @@ func (sm *SnapshotManager) ExportSnapshotMetadata(ctx context.Context, dbPath st
|
||||
log.Info("Phase 3/3: Exporting snapshot metadata", "snapshot_id", snapshotID, "source_db", dbPath)
|
||||
|
||||
// Create temp directory for all temporary files
|
||||
tempDir, err := os.MkdirTemp("", "vaultik-snapshot-*")
|
||||
tempDir, err := afero.TempDir(sm.fs, "", "vaultik-snapshot-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating temp dir: %w", err)
|
||||
}
|
||||
log.Debug("Created temporary directory", "path", tempDir)
|
||||
defer func() {
|
||||
log.Debug("Cleaning up temporary directory", "path", tempDir)
|
||||
if err := os.RemoveAll(tempDir); err != nil {
|
||||
if err := sm.fs.RemoveAll(tempDir); err != nil {
|
||||
log.Debug("Failed to remove temp dir", "path", tempDir, "error", err)
|
||||
}
|
||||
}()
|
||||
@@ -208,10 +214,10 @@ func (sm *SnapshotManager) ExportSnapshotMetadata(ctx context.Context, dbPath st
|
||||
// The main database should be closed at this point
|
||||
tempDBPath := filepath.Join(tempDir, "snapshot.db")
|
||||
log.Debug("Copying database to temporary location", "source", dbPath, "destination", tempDBPath)
|
||||
if err := copyFile(dbPath, tempDBPath); err != nil {
|
||||
if err := sm.copyFile(dbPath, tempDBPath); err != nil {
|
||||
return fmt.Errorf("copying database: %w", err)
|
||||
}
|
||||
log.Debug("Database copy complete", "size", getFileSize(tempDBPath))
|
||||
log.Debug("Database copy complete", "size", sm.getFileSize(tempDBPath))
|
||||
|
||||
// Step 2: Clean the temp database to only contain current snapshot data
|
||||
log.Debug("Cleaning temporary database", "snapshot_id", snapshotID)
|
||||
@@ -221,7 +227,7 @@ func (sm *SnapshotManager) ExportSnapshotMetadata(ctx context.Context, dbPath st
|
||||
}
|
||||
log.Info("Temporary database cleanup complete",
|
||||
"db_path", tempDBPath,
|
||||
"size_after_clean", humanize.Bytes(uint64(getFileSize(tempDBPath))),
|
||||
"size_after_clean", humanize.Bytes(uint64(sm.getFileSize(tempDBPath))),
|
||||
"files", stats.FileCount,
|
||||
"chunks", stats.ChunkCount,
|
||||
"blobs", stats.BlobCount,
|
||||
@@ -234,7 +240,7 @@ func (sm *SnapshotManager) ExportSnapshotMetadata(ctx context.Context, dbPath st
|
||||
if err := sm.dumpDatabase(tempDBPath, dumpPath); err != nil {
|
||||
return fmt.Errorf("dumping database: %w", err)
|
||||
}
|
||||
log.Debug("SQL dump complete", "size", humanize.Bytes(uint64(getFileSize(dumpPath))))
|
||||
log.Debug("SQL dump complete", "size", humanize.Bytes(uint64(sm.getFileSize(dumpPath))))
|
||||
|
||||
// Step 4: Compress and encrypt the SQL dump
|
||||
compressedPath := filepath.Join(tempDir, "snapshot.sql.zst.age")
|
||||
@@ -242,11 +248,11 @@ func (sm *SnapshotManager) ExportSnapshotMetadata(ctx context.Context, dbPath st
|
||||
return fmt.Errorf("compressing dump: %w", err)
|
||||
}
|
||||
log.Debug("Compression complete",
|
||||
"original_size", humanize.Bytes(uint64(getFileSize(dumpPath))),
|
||||
"compressed_size", humanize.Bytes(uint64(getFileSize(compressedPath))))
|
||||
"original_size", humanize.Bytes(uint64(sm.getFileSize(dumpPath))),
|
||||
"compressed_size", humanize.Bytes(uint64(sm.getFileSize(compressedPath))))
|
||||
|
||||
// Step 5: Read compressed and encrypted data for upload
|
||||
finalData, err := os.ReadFile(compressedPath)
|
||||
finalData, err := afero.ReadFile(sm.fs, compressedPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading compressed dump: %w", err)
|
||||
}
|
||||
@@ -421,7 +427,7 @@ func (sm *SnapshotManager) dumpDatabase(dbPath, dumpPath string) error {
|
||||
}
|
||||
|
||||
log.Debug("SQL dump generated", "size", humanize.Bytes(uint64(len(output))))
|
||||
if err := os.WriteFile(dumpPath, output, 0644); err != nil {
|
||||
if err := afero.WriteFile(sm.fs, dumpPath, output, 0644); err != nil {
|
||||
return fmt.Errorf("writing dump file: %w", err)
|
||||
}
|
||||
|
||||
@@ -430,7 +436,7 @@ func (sm *SnapshotManager) dumpDatabase(dbPath, dumpPath string) error {
|
||||
|
||||
// compressDump compresses the SQL dump using zstd
|
||||
func (sm *SnapshotManager) compressDump(inputPath, outputPath string) error {
|
||||
input, err := os.Open(inputPath)
|
||||
input, err := sm.fs.Open(inputPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening input file: %w", err)
|
||||
}
|
||||
@@ -440,7 +446,7 @@ func (sm *SnapshotManager) compressDump(inputPath, outputPath string) error {
|
||||
}
|
||||
}()
|
||||
|
||||
output, err := os.Create(outputPath)
|
||||
output, err := sm.fs.Create(outputPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating output file: %w", err)
|
||||
}
|
||||
@@ -483,9 +489,9 @@ func (sm *SnapshotManager) compressDump(inputPath, outputPath string) error {
|
||||
}
|
||||
|
||||
// copyFile copies a file from src to dst
|
||||
func copyFile(src, dst string) error {
|
||||
func (sm *SnapshotManager) copyFile(src, dst string) error {
|
||||
log.Debug("Opening source file for copy", "path", src)
|
||||
sourceFile, err := os.Open(src)
|
||||
sourceFile, err := sm.fs.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -497,7 +503,7 @@ func copyFile(src, dst string) error {
|
||||
}()
|
||||
|
||||
log.Debug("Creating destination file", "path", dst)
|
||||
destFile, err := os.Create(dst)
|
||||
destFile, err := sm.fs.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -585,8 +591,8 @@ func (sm *SnapshotManager) generateBlobManifest(ctx context.Context, dbPath stri
|
||||
// compressData compresses data using zstd
|
||||
|
||||
// getFileSize returns the size of a file in bytes, or -1 if error
|
||||
func getFileSize(path string) int64 {
|
||||
info, err := os.Stat(path)
|
||||
func (sm *SnapshotManager) getFileSize(path string) int64 {
|
||||
info, err := sm.fs.Stat(path)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user