vaultik/internal/vaultik/verify.go
sneak d7cd9aac27 Add end-to-end integration tests for Vaultik
- Create comprehensive integration tests with mock S3 client
- Add in-memory filesystem and SQLite database support for testing
- Test full backup workflow including chunking, packing, and uploading
- Add test to verify encrypted blob content
- Fix scanner to use afero filesystem for temp file cleanup
- Demonstrate successful backup and verification with mock dependencies
2025-07-26 15:52:23 +02:00

397 lines
11 KiB
Go

package vaultik
import (
"crypto/sha256"
"database/sql"
"encoding/hex"
"fmt"
"io"
"os"
"git.eeqj.de/sneak/vaultik/internal/log"
"git.eeqj.de/sneak/vaultik/internal/snapshot"
"github.com/dustin/go-humanize"
"github.com/klauspost/compress/zstd"
_ "github.com/mattn/go-sqlite3"
)
// VerifyOptions contains options for the verify command
type VerifyOptions struct {
Deep bool
}
// RunDeepVerify executes deep verification operation
func (v *Vaultik) RunDeepVerify(snapshotID string, opts *VerifyOptions) error {
// Check for decryption capability
if !v.CanDecrypt() {
return fmt.Errorf("age_secret_key missing from config - required for deep verification")
}
log.Info("Starting snapshot verification",
"snapshot_id", snapshotID,
"mode", map[bool]string{true: "deep", false: "shallow"}[opts.Deep],
)
// Step 1: Download manifest
manifestPath := fmt.Sprintf("metadata/%s/manifest.json.zst", snapshotID)
log.Info("Downloading manifest", "path", manifestPath)
manifestReader, err := v.S3Client.GetObject(v.ctx, manifestPath)
if err != nil {
return fmt.Errorf("failed to download manifest: %w", err)
}
defer func() { _ = manifestReader.Close() }()
// Decompress manifest
manifest, err := snapshot.DecodeManifest(manifestReader)
if err != nil {
return fmt.Errorf("failed to decode manifest: %w", err)
}
log.Info("Manifest loaded",
"blob_count", manifest.BlobCount,
"total_size", humanize.Bytes(uint64(manifest.TotalCompressedSize)),
)
// Step 2: Download and decrypt database
dbPath := fmt.Sprintf("metadata/%s/db.zst.age", snapshotID)
log.Info("Downloading encrypted database", "path", dbPath)
dbReader, err := v.S3Client.GetObject(v.ctx, dbPath)
if err != nil {
return fmt.Errorf("failed to download database: %w", err)
}
defer func() { _ = dbReader.Close() }()
// Decrypt and decompress database
tempDB, err := v.decryptAndLoadDatabase(dbReader, v.Config.AgeSecretKey)
if err != nil {
return fmt.Errorf("failed to decrypt database: %w", err)
}
defer func() {
if tempDB != nil {
_ = tempDB.Close()
}
}()
// Step 3: Compare blob lists
if err := v.verifyBlobLists(snapshotID, manifest, tempDB.DB); err != nil {
return err
}
// Step 4: Verify blob existence
if err := v.verifyBlobExistence(manifest); err != nil {
return err
}
// Step 5: Deep verification if requested
if opts.Deep {
if err := v.performDeepVerification(manifest, tempDB.DB); err != nil {
return err
}
}
log.Info("✓ Verification completed successfully",
"snapshot_id", snapshotID,
"mode", map[bool]string{true: "deep", false: "shallow"}[opts.Deep],
)
return nil
}
// tempDB wraps sql.DB with cleanup
type tempDB struct {
*sql.DB
tempPath string
}
func (t *tempDB) Close() error {
err := t.DB.Close()
_ = os.Remove(t.tempPath)
return err
}
// decryptAndLoadDatabase decrypts and loads the database from the encrypted stream
func (v *Vaultik) decryptAndLoadDatabase(reader io.ReadCloser, secretKey string) (*tempDB, error) {
// Get decryptor
decryptor, err := v.GetDecryptor()
if err != nil {
return nil, fmt.Errorf("failed to get decryptor: %w", err)
}
// Decrypt the stream
decryptedReader, err := decryptor.DecryptStream(reader)
if err != nil {
return nil, fmt.Errorf("failed to decrypt database: %w", err)
}
// Decompress the database
decompressor, err := zstd.NewReader(decryptedReader)
if err != nil {
return nil, fmt.Errorf("failed to create decompressor: %w", err)
}
defer decompressor.Close()
// Create temporary file for database
tempFile, err := os.CreateTemp("", "vaultik-verify-*.db")
if err != nil {
return nil, fmt.Errorf("failed to create temp file: %w", err)
}
tempPath := tempFile.Name()
// Copy decompressed data to temp file
if _, err := io.Copy(tempFile, decompressor); err != nil {
_ = tempFile.Close()
_ = os.Remove(tempPath)
return nil, fmt.Errorf("failed to write database: %w", err)
}
// Close temp file before opening with sqlite
if err := tempFile.Close(); err != nil {
_ = os.Remove(tempPath)
return nil, fmt.Errorf("failed to close temp file: %w", err)
}
// Open the database
db, err := sql.Open("sqlite3", tempPath)
if err != nil {
_ = os.Remove(tempPath)
return nil, fmt.Errorf("failed to open database: %w", err)
}
return &tempDB{
DB: db,
tempPath: tempPath,
}, nil
}
// verifyBlobLists compares the blob lists between manifest and database
func (v *Vaultik) verifyBlobLists(snapshotID string, manifest *snapshot.Manifest, db *sql.DB) error {
log.Info("Verifying blob lists match between manifest and database")
// Get blobs from database
query := `
SELECT b.blob_hash, b.compressed_size
FROM snapshot_blobs sb
JOIN blobs b ON sb.blob_hash = b.blob_hash
WHERE sb.snapshot_id = ?
ORDER BY b.blob_hash
`
rows, err := db.QueryContext(v.ctx, query, snapshotID)
if err != nil {
return fmt.Errorf("failed to query snapshot blobs: %w", err)
}
defer func() { _ = rows.Close() }()
// Build map of database blobs
dbBlobs := make(map[string]int64)
for rows.Next() {
var hash string
var size int64
if err := rows.Scan(&hash, &size); err != nil {
return fmt.Errorf("failed to scan blob row: %w", err)
}
dbBlobs[hash] = size
}
// Build map of manifest blobs
manifestBlobs := make(map[string]int64)
for _, blob := range manifest.Blobs {
manifestBlobs[blob.Hash] = blob.CompressedSize
}
// Compare counts
if len(dbBlobs) != len(manifestBlobs) {
return fmt.Errorf("blob count mismatch: database has %d blobs, manifest has %d blobs",
len(dbBlobs), len(manifestBlobs))
}
// Check each blob exists in both
for hash, dbSize := range dbBlobs {
manifestSize, exists := manifestBlobs[hash]
if !exists {
return fmt.Errorf("blob %s exists in database but not in manifest", hash)
}
if dbSize != manifestSize {
return fmt.Errorf("blob %s size mismatch: database has %d bytes, manifest has %d bytes",
hash, dbSize, manifestSize)
}
}
for hash := range manifestBlobs {
if _, exists := dbBlobs[hash]; !exists {
return fmt.Errorf("blob %s exists in manifest but not in database", hash)
}
}
log.Info("✓ Blob lists match", "blob_count", len(dbBlobs))
return nil
}
// verifyBlobExistence checks that all blobs exist in S3
func (v *Vaultik) verifyBlobExistence(manifest *snapshot.Manifest) error {
log.Info("Verifying blob existence in S3", "blob_count", len(manifest.Blobs))
for i, blob := range manifest.Blobs {
// Construct blob path
blobPath := fmt.Sprintf("blobs/%s/%s/%s", blob.Hash[:2], blob.Hash[2:4], blob.Hash)
// Check blob exists with HeadObject
stat, err := v.S3Client.StatObject(v.ctx, blobPath)
if err != nil {
return fmt.Errorf("blob %s missing from S3: %w", blob.Hash, err)
}
// Verify size matches
if stat.Size != blob.CompressedSize {
return fmt.Errorf("blob %s size mismatch: S3 has %d bytes, manifest has %d bytes",
blob.Hash, stat.Size, blob.CompressedSize)
}
// Progress update every 100 blobs
if (i+1)%100 == 0 || i == len(manifest.Blobs)-1 {
log.Info("Blob existence check progress",
"checked", i+1,
"total", len(manifest.Blobs),
"percent", fmt.Sprintf("%.1f%%", float64(i+1)/float64(len(manifest.Blobs))*100),
)
}
}
log.Info("✓ All blobs exist in S3")
return nil
}
// performDeepVerification downloads and verifies the content of each blob
func (v *Vaultik) performDeepVerification(manifest *snapshot.Manifest, db *sql.DB) error {
log.Info("Starting deep verification - downloading and verifying all blobs")
totalBytes := int64(0)
for i, blobInfo := range manifest.Blobs {
// Verify individual blob
if err := v.verifyBlob(blobInfo, db); err != nil {
return fmt.Errorf("blob %s verification failed: %w", blobInfo.Hash, err)
}
totalBytes += blobInfo.CompressedSize
// Progress update
log.Info("Deep verification progress",
"blob", fmt.Sprintf("%d/%d", i+1, len(manifest.Blobs)),
"total_downloaded", humanize.Bytes(uint64(totalBytes)),
"percent", fmt.Sprintf("%.1f%%", float64(i+1)/float64(len(manifest.Blobs))*100),
)
}
log.Info("✓ Deep verification completed successfully",
"blobs_verified", len(manifest.Blobs),
"total_size", humanize.Bytes(uint64(totalBytes)),
)
return nil
}
// verifyBlob downloads and verifies a single blob
func (v *Vaultik) verifyBlob(blobInfo snapshot.BlobInfo, db *sql.DB) error {
// Download blob
blobPath := fmt.Sprintf("blobs/%s/%s/%s", blobInfo.Hash[:2], blobInfo.Hash[2:4], blobInfo.Hash)
reader, err := v.S3Client.GetObject(v.ctx, blobPath)
if err != nil {
return fmt.Errorf("failed to download: %w", err)
}
defer func() { _ = reader.Close() }()
// Get decryptor
decryptor, err := v.GetDecryptor()
if err != nil {
return fmt.Errorf("failed to get decryptor: %w", err)
}
// Decrypt blob
decryptedReader, err := decryptor.DecryptStream(reader)
if err != nil {
return fmt.Errorf("failed to decrypt: %w", err)
}
// Decompress blob
decompressor, err := zstd.NewReader(decryptedReader)
if err != nil {
return fmt.Errorf("failed to decompress: %w", err)
}
defer decompressor.Close()
// Query blob chunks from database to get offsets and lengths
query := `
SELECT bc.chunk_hash, bc.offset, bc.length
FROM blob_chunks bc
JOIN blobs b ON bc.blob_id = b.id
WHERE b.blob_hash = ?
ORDER BY bc.offset
`
rows, err := db.QueryContext(v.ctx, query, blobInfo.Hash)
if err != nil {
return fmt.Errorf("failed to query blob chunks: %w", err)
}
defer func() { _ = rows.Close() }()
var lastOffset int64 = -1
chunkCount := 0
totalRead := int64(0)
// Verify each chunk in the blob
for rows.Next() {
var chunkHash string
var offset, length int64
if err := rows.Scan(&chunkHash, &offset, &length); err != nil {
return fmt.Errorf("failed to scan chunk row: %w", err)
}
// Verify chunk ordering
if offset <= lastOffset {
return fmt.Errorf("chunks out of order: offset %d after %d", offset, lastOffset)
}
lastOffset = offset
// Read chunk data from decompressed stream
if offset > totalRead {
// Skip to the correct offset
skipBytes := offset - totalRead
if _, err := io.CopyN(io.Discard, decompressor, skipBytes); err != nil {
return fmt.Errorf("failed to skip to offset %d: %w", offset, err)
}
totalRead = offset
}
// Read chunk data
chunkData := make([]byte, length)
if _, err := io.ReadFull(decompressor, chunkData); err != nil {
return fmt.Errorf("failed to read chunk at offset %d: %w", offset, err)
}
totalRead += length
// Verify chunk hash
hasher := sha256.New()
hasher.Write(chunkData)
calculatedHash := hex.EncodeToString(hasher.Sum(nil))
if calculatedHash != chunkHash {
return fmt.Errorf("chunk hash mismatch at offset %d: calculated %s, expected %s",
offset, calculatedHash, chunkHash)
}
chunkCount++
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating blob chunks: %w", err)
}
log.Debug("Blob verified",
"hash", blobInfo.Hash,
"chunks", chunkCount,
"size", humanize.Bytes(uint64(blobInfo.CompressedSize)),
)
return nil
}