vaultik/internal/vaultik/integration_test.go
sneak d7cd9aac27 Add end-to-end integration tests for Vaultik
- Create comprehensive integration tests with mock S3 client
- Add in-memory filesystem and SQLite database support for testing
- Test full backup workflow including chunking, packing, and uploading
- Add test to verify encrypted blob content
- Fix scanner to use afero filesystem for temp file cleanup
- Demonstrate successful backup and verification with mock dependencies
2025-07-26 15:52:23 +02:00

380 lines
11 KiB
Go

package vaultik_test
import (
"bytes"
"context"
"database/sql"
"fmt"
"io"
"sync"
"testing"
"time"
"git.eeqj.de/sneak/vaultik/internal/config"
"git.eeqj.de/sneak/vaultik/internal/database"
"git.eeqj.de/sneak/vaultik/internal/log"
"git.eeqj.de/sneak/vaultik/internal/s3"
"git.eeqj.de/sneak/vaultik/internal/snapshot"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// MockS3Client implements a mock S3 client for testing
type MockS3Client struct {
mu sync.Mutex
storage map[string][]byte
calls []string
}
func NewMockS3Client() *MockS3Client {
return &MockS3Client{
storage: make(map[string][]byte),
calls: make([]string, 0),
}
}
func (m *MockS3Client) PutObject(ctx context.Context, key string, reader io.Reader) error {
m.mu.Lock()
defer m.mu.Unlock()
m.calls = append(m.calls, "PutObject:"+key)
data, err := io.ReadAll(reader)
if err != nil {
return err
}
m.storage[key] = data
return nil
}
func (m *MockS3Client) PutObjectWithProgress(ctx context.Context, key string, reader io.Reader, size int64, progress s3.ProgressCallback) error {
// For testing, just call PutObject
return m.PutObject(ctx, key, reader)
}
func (m *MockS3Client) GetObject(ctx context.Context, key string) (io.ReadCloser, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.calls = append(m.calls, "GetObject:"+key)
data, exists := m.storage[key]
if !exists {
return nil, fmt.Errorf("key not found: %s", key)
}
return io.NopCloser(bytes.NewReader(data)), nil
}
func (m *MockS3Client) StatObject(ctx context.Context, key string) (*s3.ObjectInfo, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.calls = append(m.calls, "StatObject:"+key)
data, exists := m.storage[key]
if !exists {
return nil, fmt.Errorf("key not found: %s", key)
}
return &s3.ObjectInfo{
Key: key,
Size: int64(len(data)),
}, nil
}
func (m *MockS3Client) DeleteObject(ctx context.Context, key string) error {
m.mu.Lock()
defer m.mu.Unlock()
m.calls = append(m.calls, "DeleteObject:"+key)
delete(m.storage, key)
return nil
}
func (m *MockS3Client) ListObjects(ctx context.Context, prefix string) ([]*s3.ObjectInfo, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.calls = append(m.calls, "ListObjects:"+prefix)
var objects []*s3.ObjectInfo
for key, data := range m.storage {
if len(prefix) == 0 || (len(key) >= len(prefix) && key[:len(prefix)] == prefix) {
objects = append(objects, &s3.ObjectInfo{
Key: key,
Size: int64(len(data)),
})
}
}
return objects, nil
}
// GetCalls returns the list of S3 operations that were called
func (m *MockS3Client) GetCalls() []string {
m.mu.Lock()
defer m.mu.Unlock()
calls := make([]string, len(m.calls))
copy(calls, m.calls)
return calls
}
// GetStorageSize returns the number of objects in storage
func (m *MockS3Client) GetStorageSize() int {
m.mu.Lock()
defer m.mu.Unlock()
return len(m.storage)
}
// TestEndToEndBackup tests the full backup workflow with mocked dependencies
func TestEndToEndBackup(t *testing.T) {
// Initialize logger
log.Initialize(log.Config{})
// Create in-memory filesystem
fs := afero.NewMemMapFs()
// Create test directory structure and files
testFiles := map[string]string{
"/home/user/documents/file1.txt": "This is file 1 content",
"/home/user/documents/file2.txt": "This is file 2 content with more data",
"/home/user/pictures/photo1.jpg": "Binary photo data here...",
"/home/user/code/main.go": "package main\n\nfunc main() {\n\tprintln(\"Hello, World!\")\n}",
}
// Create all directories first
dirs := []string{
"/home/user/documents",
"/home/user/pictures",
"/home/user/code",
}
for _, dir := range dirs {
if err := fs.MkdirAll(dir, 0755); err != nil {
t.Fatalf("failed to create directory %s: %v", dir, err)
}
}
// Create test files
for path, content := range testFiles {
if err := afero.WriteFile(fs, path, []byte(content), 0644); err != nil {
t.Fatalf("failed to create test file %s: %v", path, err)
}
}
// Create mock S3 client
mockS3 := NewMockS3Client()
// Create test configuration
cfg := &config.Config{
SourceDirs: []string{"/home/user"},
Exclude: []string{"*.tmp", "*.log"},
ChunkSize: config.Size(16 * 1024), // 16KB chunks
BlobSizeLimit: config.Size(100 * 1024), // 100KB blobs
CompressionLevel: 3,
AgeRecipients: []string{"age1ezrjmfpwsc95svdg0y54mums3zevgzu0x0ecq2f7tp8a05gl0sjq9q9wjg"}, // Test public key
AgeSecretKey: "AGE-SECRET-KEY-19CR5YSFW59HM4TLD6GXVEDMZFTVVF7PPHKUT68TXSFPK7APHXA2QS2NJA5", // Test private key
S3: config.S3Config{
Endpoint: "http://localhost:9000", // MinIO endpoint for testing
Region: "us-east-1",
Bucket: "test-bucket",
AccessKeyID: "test-access",
SecretAccessKey: "test-secret",
},
IndexPath: ":memory:", // In-memory SQLite database
}
// For a true end-to-end test, we'll create a simpler test that focuses on
// the core backup logic using the scanner directly with our mock S3 client
ctx := context.Background()
// Create in-memory database
db, err := database.New(ctx, ":memory:")
require.NoError(t, err)
defer func() {
if err := db.Close(); err != nil {
t.Errorf("failed to close database: %v", err)
}
}()
repos := database.NewRepositories(db)
// Create scanner with mock S3 client
scanner := snapshot.NewScanner(snapshot.ScannerConfig{
FS: fs,
ChunkSize: cfg.ChunkSize.Int64(),
Repositories: repos,
S3Client: mockS3,
MaxBlobSize: cfg.BlobSizeLimit.Int64(),
CompressionLevel: cfg.CompressionLevel,
AgeRecipients: cfg.AgeRecipients,
EnableProgress: false,
})
// Create a snapshot record
snapshotID := "test-snapshot-001"
err = repos.WithTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
snapshot := &database.Snapshot{
ID: snapshotID,
Hostname: "test-host",
VaultikVersion: "test-version",
StartedAt: time.Now(),
}
return repos.Snapshots.Create(ctx, tx, snapshot)
})
require.NoError(t, err)
// Run the backup scan
result, err := scanner.Scan(ctx, "/home/user", snapshotID)
require.NoError(t, err)
// Verify scan results
// The scanner counts both files and directories, so we have:
// 4 files + 4 directories (/home, /home/user, /home/user/documents, /home/user/pictures, /home/user/code)
assert.GreaterOrEqual(t, result.FilesScanned, 4, "Should scan at least 4 files")
assert.Greater(t, result.BytesScanned, int64(0), "Should scan some bytes")
assert.Greater(t, result.ChunksCreated, 0, "Should create chunks")
assert.Greater(t, result.BlobsCreated, 0, "Should create blobs")
// Verify S3 operations
calls := mockS3.GetCalls()
t.Logf("S3 operations performed: %v", calls)
// Should have uploaded at least one blob
blobUploads := 0
for _, call := range calls {
if len(call) > 10 && call[:10] == "PutObject:" {
if len(call) > 16 && call[10:16] == "blobs/" {
blobUploads++
}
}
}
assert.Greater(t, blobUploads, 0, "Should upload at least one blob")
// Verify files in database
files, err := repos.Files.ListByPrefix(ctx, "/home/user")
require.NoError(t, err)
// Count only regular files (not directories)
regularFiles := 0
for _, f := range files {
if f.Mode&0x80000000 == 0 { // Check if regular file (not directory)
regularFiles++
}
}
assert.Equal(t, 4, regularFiles, "Should have 4 regular files in database")
// Verify chunks were created by checking a specific file
fileChunks, err := repos.FileChunks.GetByPath(ctx, "/home/user/documents/file1.txt")
require.NoError(t, err)
assert.Greater(t, len(fileChunks), 0, "Should have chunks for file1.txt")
// Verify blobs were uploaded to S3
assert.Greater(t, mockS3.GetStorageSize(), 0, "Should have blobs in S3 storage")
// Complete the snapshot - just verify we got results
// In a real integration test, we'd update the snapshot record
// Create snapshot manager to test metadata export
snapshotManager := &snapshot.SnapshotManager{}
snapshotManager.SetFilesystem(fs)
// Note: We can't fully test snapshot metadata export without a proper S3 client mock
// that implements all required methods. This would require refactoring the S3 client
// interface to be more testable.
t.Logf("Backup completed successfully:")
t.Logf(" Files scanned: %d", result.FilesScanned)
t.Logf(" Bytes scanned: %d", result.BytesScanned)
t.Logf(" Chunks created: %d", result.ChunksCreated)
t.Logf(" Blobs created: %d", result.BlobsCreated)
t.Logf(" S3 storage size: %d objects", mockS3.GetStorageSize())
}
// TestBackupAndVerify tests backing up files and verifying the blobs
func TestBackupAndVerify(t *testing.T) {
// Initialize logger
log.Initialize(log.Config{})
// Create in-memory filesystem
fs := afero.NewMemMapFs()
// Create test files
testContent := "This is a test file with some content that should be backed up"
err := fs.MkdirAll("/data", 0755)
require.NoError(t, err)
err = afero.WriteFile(fs, "/data/test.txt", []byte(testContent), 0644)
require.NoError(t, err)
// Create mock S3 client
mockS3 := NewMockS3Client()
// Create test database
ctx := context.Background()
db, err := database.New(ctx, ":memory:")
require.NoError(t, err)
defer func() {
if err := db.Close(); err != nil {
t.Errorf("failed to close database: %v", err)
}
}()
repos := database.NewRepositories(db)
// Create scanner
scanner := snapshot.NewScanner(snapshot.ScannerConfig{
FS: fs,
ChunkSize: int64(1024 * 16), // 16KB chunks
Repositories: repos,
S3Client: mockS3,
MaxBlobSize: int64(1024 * 1024), // 1MB blobs
CompressionLevel: 3,
AgeRecipients: []string{"age1ezrjmfpwsc95svdg0y54mums3zevgzu0x0ecq2f7tp8a05gl0sjq9q9wjg"}, // Test public key
})
// Create a snapshot
snapshotID := "test-snapshot-001"
err = repos.WithTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
snapshot := &database.Snapshot{
ID: snapshotID,
Hostname: "test-host",
VaultikVersion: "test-version",
StartedAt: time.Now(),
}
return repos.Snapshots.Create(ctx, tx, snapshot)
})
require.NoError(t, err)
// Run the backup
result, err := scanner.Scan(ctx, "/data", snapshotID)
require.NoError(t, err)
// Verify backup created blobs
assert.Greater(t, result.BlobsCreated, 0, "Should create at least one blob")
assert.Equal(t, mockS3.GetStorageSize(), result.BlobsCreated, "S3 should have the blobs")
// Verify we can retrieve the blob from S3
objects, err := mockS3.ListObjects(ctx, "blobs/")
require.NoError(t, err)
assert.Len(t, objects, result.BlobsCreated, "Should have correct number of blobs in S3")
// Get the first blob and verify it exists
if len(objects) > 0 {
blobKey := objects[0].Key
t.Logf("Verifying blob: %s", blobKey)
// Get blob info
blobInfo, err := mockS3.StatObject(ctx, blobKey)
require.NoError(t, err)
assert.Greater(t, blobInfo.Size, int64(0), "Blob should have content")
// Get blob content
reader, err := mockS3.GetObject(ctx, blobKey)
require.NoError(t, err)
defer func() { _ = reader.Close() }()
// Verify blob data is encrypted (should not contain plaintext)
blobData, err := io.ReadAll(reader)
require.NoError(t, err)
assert.NotContains(t, string(blobData), testContent, "Blob should be encrypted")
assert.Greater(t, len(blobData), 0, "Blob should have data")
}
t.Logf("Backup and verify test completed successfully")
}