Add deterministic deduplication, rclone backend, and database purge command
- Implement deterministic blob hashing using double SHA256 of uncompressed plaintext data, enabling deduplication even after local DB is cleared - Add Stat() check before blob upload to skip existing blobs in storage - Add rclone storage backend for additional remote storage options - Add 'vaultik database purge' command to erase local state DB - Add 'vaultik remote check' command to verify remote connectivity - Show configured snapshots in 'vaultik snapshot list' output - Skip macOS resource fork files (._*) when listing remote snapshots - Use multi-threaded zstd compression (CPUs - 2 threads) - Add writer tests for double hashing behavior
This commit is contained in:
@@ -5,6 +5,8 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -15,6 +17,7 @@ import (
|
||||
"git.eeqj.de/sneak/vaultik/internal/snapshot"
|
||||
"git.eeqj.de/sneak/vaultik/internal/storage"
|
||||
"git.eeqj.de/sneak/vaultik/internal/types"
|
||||
"git.eeqj.de/sneak/vaultik/internal/vaultik"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -403,3 +406,138 @@ func TestBackupAndVerify(t *testing.T) {
|
||||
|
||||
t.Logf("Backup and verify test completed successfully")
|
||||
}
|
||||
|
||||
// TestBackupAndRestore tests the full backup and restore workflow
|
||||
// This test verifies that the restore code correctly handles the binary SQLite
|
||||
// database format that is exported by the snapshot manager.
|
||||
func TestBackupAndRestore(t *testing.T) {
|
||||
// Initialize logger
|
||||
log.Initialize(log.Config{})
|
||||
|
||||
// Create real temp directory for the database (SQLite needs real filesystem)
|
||||
realTempDir, err := os.MkdirTemp("", "vaultik-test-")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(realTempDir) }()
|
||||
|
||||
// Use real OS filesystem for this test
|
||||
fs := afero.NewOsFs()
|
||||
|
||||
// Create test directory structure and files
|
||||
dataDir := filepath.Join(realTempDir, "data")
|
||||
testFiles := map[string]string{
|
||||
filepath.Join(dataDir, "file1.txt"): "This is file 1 content",
|
||||
filepath.Join(dataDir, "file2.txt"): "This is file 2 content with more data",
|
||||
filepath.Join(dataDir, "subdir", "file3.txt"): "This is file 3 in a subdirectory",
|
||||
}
|
||||
|
||||
// Create directories and files
|
||||
for path, content := range testFiles {
|
||||
dir := filepath.Dir(path)
|
||||
if err := fs.MkdirAll(dir, 0755); err != nil {
|
||||
t.Fatalf("failed to create directory %s: %v", dir, err)
|
||||
}
|
||||
if err := afero.WriteFile(fs, path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("failed to create test file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create mock storage
|
||||
mockStorage := NewMockStorer()
|
||||
|
||||
// Test keypair
|
||||
agePublicKey := "age1ezrjmfpwsc95svdg0y54mums3zevgzu0x0ecq2f7tp8a05gl0sjq9q9wjg"
|
||||
ageSecretKey := "AGE-SECRET-KEY-19CR5YSFW59HM4TLD6GXVEDMZFTVVF7PPHKUT68TXSFPK7APHXA2QS2NJA5"
|
||||
|
||||
// Create database file
|
||||
dbPath := filepath.Join(realTempDir, "test.db")
|
||||
db, err := database.New(ctx, dbPath)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = db.Close() }()
|
||||
|
||||
repos := database.NewRepositories(db)
|
||||
|
||||
// Create config for snapshot manager
|
||||
cfg := &config.Config{
|
||||
AgeSecretKey: ageSecretKey,
|
||||
AgeRecipients: []string{agePublicKey},
|
||||
CompressionLevel: 3,
|
||||
}
|
||||
|
||||
// Create snapshot manager
|
||||
sm := snapshot.NewSnapshotManager(snapshot.SnapshotManagerParams{
|
||||
Repos: repos,
|
||||
Storage: mockStorage,
|
||||
Config: cfg,
|
||||
})
|
||||
sm.SetFilesystem(fs)
|
||||
|
||||
// Create scanner
|
||||
scanner := snapshot.NewScanner(snapshot.ScannerConfig{
|
||||
FS: fs,
|
||||
Storage: mockStorage,
|
||||
ChunkSize: int64(16 * 1024),
|
||||
MaxBlobSize: int64(100 * 1024),
|
||||
CompressionLevel: 3,
|
||||
AgeRecipients: []string{agePublicKey},
|
||||
Repositories: repos,
|
||||
})
|
||||
|
||||
// Create a snapshot
|
||||
snapshotID, err := sm.CreateSnapshot(ctx, "test-host", "test-version", "test-git")
|
||||
require.NoError(t, err)
|
||||
t.Logf("Created snapshot: %s", snapshotID)
|
||||
|
||||
// Run the backup (scan)
|
||||
result, err := scanner.Scan(ctx, dataDir, snapshotID)
|
||||
require.NoError(t, err)
|
||||
t.Logf("Scan complete: %d files, %d blobs", result.FilesScanned, result.BlobsCreated)
|
||||
|
||||
// Complete the snapshot
|
||||
err = sm.CompleteSnapshot(ctx, snapshotID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Export snapshot metadata (this uploads db.zst.age and manifest.json.zst)
|
||||
err = sm.ExportSnapshotMetadata(ctx, dbPath, snapshotID)
|
||||
require.NoError(t, err)
|
||||
t.Logf("Exported snapshot metadata")
|
||||
|
||||
// Verify metadata was uploaded
|
||||
keys, err := mockStorage.List(ctx, "metadata/")
|
||||
require.NoError(t, err)
|
||||
t.Logf("Metadata keys: %v", keys)
|
||||
assert.GreaterOrEqual(t, len(keys), 2, "Should have at least db.zst.age and manifest.json.zst")
|
||||
|
||||
// Close the source database
|
||||
err = db.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create Vaultik instance for restore
|
||||
vaultikApp := &vaultik.Vaultik{
|
||||
Config: cfg,
|
||||
Storage: mockStorage,
|
||||
Fs: fs,
|
||||
Stdout: io.Discard,
|
||||
Stderr: io.Discard,
|
||||
}
|
||||
vaultikApp.SetContext(ctx)
|
||||
|
||||
// Try to restore - this should work with binary SQLite format
|
||||
restoreDir := filepath.Join(realTempDir, "restored")
|
||||
err = vaultikApp.Restore(&vaultik.RestoreOptions{
|
||||
SnapshotID: snapshotID,
|
||||
TargetDir: restoreDir,
|
||||
})
|
||||
require.NoError(t, err, "Restore should succeed with binary SQLite database format")
|
||||
|
||||
// Verify restored files match originals
|
||||
for origPath, expectedContent := range testFiles {
|
||||
restoredPath := filepath.Join(restoreDir, origPath)
|
||||
restoredContent, err := afero.ReadFile(fs, restoredPath)
|
||||
require.NoError(t, err, "Should be able to read restored file: %s", restoredPath)
|
||||
assert.Equal(t, expectedContent, string(restoredContent), "Restored content should match original for: %s", origPath)
|
||||
}
|
||||
|
||||
t.Log("Backup and restore test completed successfully")
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user