Integrate afero filesystem abstraction library

- Add afero.Fs field to Vaultik struct for filesystem operations
- Vaultik now owns and manages the filesystem instance
- SnapshotManager receives filesystem via SetFilesystem() setter
- Update blob packer to use afero for temporary files
- Convert all filesystem operations to use afero abstraction
- Remove filesystem module - Vaultik manages filesystem directly
- Update tests: remove symlink test (unsupported by afero memfs)
- Fix TestMultipleFileChanges to handle scanner examining directories

This enables full end-to-end testing without touching disk by using
memory-backed filesystems. Database operations continue using real
filesystem as SQLite requires actual files.
This commit is contained in:
2025-07-26 15:33:18 +02:00
parent e29a995120
commit bb38f8c5d6
9 changed files with 78 additions and 146 deletions

View File

@@ -20,7 +20,6 @@ import (
"encoding/hex"
"fmt"
"io"
"os"
"sync"
"time"
@@ -28,6 +27,7 @@ import (
"git.eeqj.de/sneak/vaultik/internal/database"
"git.eeqj.de/sneak/vaultik/internal/log"
"github.com/google/uuid"
"github.com/spf13/afero"
)
// BlobHandler is a callback function invoked when a blob is finalized and ready for upload.
@@ -44,6 +44,7 @@ type PackerConfig struct {
Recipients []string // Age recipients for encryption
Repositories *database.Repositories // Database repositories for tracking blob metadata
BlobHandler BlobHandler // Optional callback when blob is ready for upload
Fs afero.Fs // Filesystem for temporary files
}
// Packer accumulates chunks and packs them into blobs.
@@ -55,6 +56,7 @@ type Packer struct {
recipients []string // Age recipients for encryption
blobHandler BlobHandler // Called when blob is ready
repos *database.Repositories // For creating blob records
fs afero.Fs // Filesystem for temporary files
// Mutex for thread-safe blob creation
mu sync.Mutex
@@ -69,7 +71,7 @@ type blobInProgress struct {
id string // UUID of the blob
chunks []*chunkInfo // Track chunk metadata
chunkSet map[string]bool // Track unique chunks in this blob
tempFile *os.File // Temporary file for encrypted compressed data
tempFile afero.File // Temporary file for encrypted compressed data
writer *blobgen.Writer // Unified compression/encryption/hashing writer
startTime time.Time
size int64 // Current uncompressed size
@@ -113,7 +115,7 @@ type BlobChunkRef struct {
type BlobWithReader struct {
*FinishedBlob
Reader io.ReadSeeker
TempFile *os.File // Optional, only set for disk-based blobs
TempFile afero.File // Optional, only set for disk-based blobs
}
// NewPacker creates a new blob packer that accumulates chunks into blobs.
@@ -126,12 +128,16 @@ func NewPacker(cfg PackerConfig) (*Packer, error) {
if cfg.MaxBlobSize <= 0 {
return nil, fmt.Errorf("max blob size must be positive")
}
if cfg.Fs == nil {
return nil, fmt.Errorf("filesystem is required")
}
return &Packer{
maxBlobSize: cfg.MaxBlobSize,
compressionLevel: cfg.CompressionLevel,
recipients: cfg.Recipients,
blobHandler: cfg.BlobHandler,
repos: cfg.Repositories,
fs: cfg.Fs,
finishedBlobs: make([]*FinishedBlob, 0),
}, nil
}
@@ -255,7 +261,7 @@ func (p *Packer) startNewBlob() error {
}
// Create temporary file
tempFile, err := os.CreateTemp("", "vaultik-blob-*.tmp")
tempFile, err := afero.TempFile(p.fs, "", "vaultik-blob-*.tmp")
if err != nil {
return fmt.Errorf("creating temp file: %w", err)
}
@@ -264,7 +270,7 @@ func (p *Packer) startNewBlob() error {
writer, err := blobgen.NewWriter(tempFile, p.compressionLevel, p.recipients)
if err != nil {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
_ = p.fs.Remove(tempFile.Name())
return fmt.Errorf("creating blobgen writer: %w", err)
}
@@ -469,7 +475,7 @@ func (p *Packer) cleanupTempFile() {
if p.currentBlob != nil && p.currentBlob.tempFile != nil {
name := p.currentBlob.tempFile.Name()
_ = p.currentBlob.tempFile.Close()
_ = os.Remove(name)
_ = p.fs.Remove(name)
}
}

View File

@@ -13,6 +13,7 @@ import (
"git.eeqj.de/sneak/vaultik/internal/database"
"git.eeqj.de/sneak/vaultik/internal/log"
"github.com/klauspost/compress/zstd"
"github.com/spf13/afero"
)
const (
@@ -45,6 +46,7 @@ func TestPacker(t *testing.T) {
CompressionLevel: 3,
Recipients: []string{testPublicKey},
Repositories: repos,
Fs: afero.NewMemMapFs(),
}
packer, err := NewPacker(cfg)
if err != nil {
@@ -134,6 +136,7 @@ func TestPacker(t *testing.T) {
CompressionLevel: 3,
Recipients: []string{testPublicKey},
Repositories: repos,
Fs: afero.NewMemMapFs(),
}
packer, err := NewPacker(cfg)
if err != nil {
@@ -216,6 +219,7 @@ func TestPacker(t *testing.T) {
CompressionLevel: 3,
Recipients: []string{testPublicKey},
Repositories: repos,
Fs: afero.NewMemMapFs(),
}
packer, err := NewPacker(cfg)
if err != nil {
@@ -304,6 +308,7 @@ func TestPacker(t *testing.T) {
CompressionLevel: 3,
Recipients: []string{testPublicKey},
Repositories: repos,
Fs: afero.NewMemMapFs(),
}
packer, err := NewPacker(cfg)
if err != nil {