vaultik/internal/blob/packer_test.go
sneak d3afa65420 Fix foreign key constraints and improve snapshot tracking
- Add unified compression/encryption package in internal/blobgen
- Update DATAMODEL.md to reflect current schema implementation
- Refactor snapshot cleanup into well-named methods for clarity
- Add snapshot_id to uploads table to track new blobs per snapshot
- Fix blob count reporting for incremental backups
- Add DeleteOrphaned method to BlobChunkRepository
- Fix cleanup order to respect foreign key constraints
- Update tests to reflect schema changes
2025-07-26 02:22:25 +02:00

380 lines
9.6 KiB
Go

package blob
import (
"bytes"
"context"
"crypto/sha256"
"database/sql"
"encoding/hex"
"io"
"testing"
"filippo.io/age"
"git.eeqj.de/sneak/vaultik/internal/database"
"git.eeqj.de/sneak/vaultik/internal/log"
"github.com/klauspost/compress/zstd"
)
const (
// Test key from test/insecure-integration-test.key
testPrivateKey = "AGE-SECRET-KEY-19CR5YSFW59HM4TLD6GXVEDMZFTVVF7PPHKUT68TXSFPK7APHXA2QS2NJA5"
testPublicKey = "age1ezrjmfpwsc95svdg0y54mums3zevgzu0x0ecq2f7tp8a05gl0sjq9q9wjg"
)
func TestPacker(t *testing.T) {
// Initialize logger for tests
log.Initialize(log.Config{})
// Parse test identity
identity, err := age.ParseX25519Identity(testPrivateKey)
if err != nil {
t.Fatalf("failed to parse test identity: %v", err)
}
t.Run("single chunk creates single blob", func(t *testing.T) {
// Create test database
db, err := database.NewTestDB()
if err != nil {
t.Fatalf("failed to create test db: %v", err)
}
defer func() { _ = db.Close() }()
repos := database.NewRepositories(db)
cfg := PackerConfig{
MaxBlobSize: 10 * 1024 * 1024, // 10MB
CompressionLevel: 3,
Recipients: []string{testPublicKey},
Repositories: repos,
}
packer, err := NewPacker(cfg)
if err != nil {
t.Fatalf("failed to create packer: %v", err)
}
// Create a chunk
data := []byte("Hello, World!")
hash := sha256.Sum256(data)
hashStr := hex.EncodeToString(hash[:])
// Create chunk in database first
dbChunk := &database.Chunk{
ChunkHash: hashStr,
Size: int64(len(data)),
}
err = repos.WithTx(context.Background(), func(ctx context.Context, tx *sql.Tx) error {
return repos.Chunks.Create(ctx, tx, dbChunk)
})
if err != nil {
t.Fatalf("failed to create chunk in db: %v", err)
}
chunk := &ChunkRef{
Hash: hashStr,
Data: data,
}
// Add chunk
if err := packer.AddChunk(chunk); err != nil {
t.Fatalf("failed to add chunk: %v", err)
}
// Flush
if err := packer.Flush(); err != nil {
t.Fatalf("failed to flush: %v", err)
}
// Get finished blobs
blobs := packer.GetFinishedBlobs()
if len(blobs) != 1 {
t.Fatalf("expected 1 blob, got %d", len(blobs))
}
blob := blobs[0]
if len(blob.Chunks) != 1 {
t.Errorf("expected 1 chunk in blob, got %d", len(blob.Chunks))
}
// Note: Very small data may not compress well
t.Logf("Compression: %d -> %d bytes", blob.Uncompressed, blob.Compressed)
// Decrypt the blob data
decrypted, err := age.Decrypt(bytes.NewReader(blob.Data), identity)
if err != nil {
t.Fatalf("failed to decrypt blob: %v", err)
}
// Decompress the decrypted data
reader, err := zstd.NewReader(decrypted)
if err != nil {
t.Fatalf("failed to create decompressor: %v", err)
}
defer reader.Close()
var decompressed bytes.Buffer
if _, err := io.Copy(&decompressed, reader); err != nil {
t.Fatalf("failed to decompress: %v", err)
}
if !bytes.Equal(decompressed.Bytes(), data) {
t.Error("decompressed data doesn't match original")
}
})
t.Run("multiple chunks packed together", func(t *testing.T) {
// Create test database
db, err := database.NewTestDB()
if err != nil {
t.Fatalf("failed to create test db: %v", err)
}
defer func() { _ = db.Close() }()
repos := database.NewRepositories(db)
cfg := PackerConfig{
MaxBlobSize: 10 * 1024 * 1024, // 10MB
CompressionLevel: 3,
Recipients: []string{testPublicKey},
Repositories: repos,
}
packer, err := NewPacker(cfg)
if err != nil {
t.Fatalf("failed to create packer: %v", err)
}
// Create multiple small chunks
chunks := make([]*ChunkRef, 10)
for i := 0; i < 10; i++ {
data := bytes.Repeat([]byte{byte(i)}, 1000)
hash := sha256.Sum256(data)
hashStr := hex.EncodeToString(hash[:])
// Create chunk in database first
dbChunk := &database.Chunk{
ChunkHash: hashStr,
Size: int64(len(data)),
}
err = repos.WithTx(context.Background(), func(ctx context.Context, tx *sql.Tx) error {
return repos.Chunks.Create(ctx, tx, dbChunk)
})
if err != nil {
t.Fatalf("failed to create chunk in db: %v", err)
}
chunks[i] = &ChunkRef{
Hash: hashStr,
Data: data,
}
}
// Add all chunks
for _, chunk := range chunks {
err := packer.AddChunk(chunk)
if err != nil {
t.Fatalf("failed to add chunk: %v", err)
}
}
// Flush
if err := packer.Flush(); err != nil {
t.Fatalf("failed to flush: %v", err)
}
// Should have one blob with all chunks
blobs := packer.GetFinishedBlobs()
if len(blobs) != 1 {
t.Fatalf("expected 1 blob, got %d", len(blobs))
}
if len(blobs[0].Chunks) != 10 {
t.Errorf("expected 10 chunks in blob, got %d", len(blobs[0].Chunks))
}
// Verify offsets are correct
expectedOffset := int64(0)
for i, chunkRef := range blobs[0].Chunks {
if chunkRef.Offset != expectedOffset {
t.Errorf("chunk %d: expected offset %d, got %d", i, expectedOffset, chunkRef.Offset)
}
if chunkRef.Length != 1000 {
t.Errorf("chunk %d: expected length 1000, got %d", i, chunkRef.Length)
}
expectedOffset += chunkRef.Length
}
})
t.Run("blob size limit enforced", func(t *testing.T) {
// Create test database
db, err := database.NewTestDB()
if err != nil {
t.Fatalf("failed to create test db: %v", err)
}
defer func() { _ = db.Close() }()
repos := database.NewRepositories(db)
// Small blob size limit to force multiple blobs
cfg := PackerConfig{
MaxBlobSize: 5000, // 5KB max
CompressionLevel: 3,
Recipients: []string{testPublicKey},
Repositories: repos,
}
packer, err := NewPacker(cfg)
if err != nil {
t.Fatalf("failed to create packer: %v", err)
}
// Create chunks that will exceed the limit
chunks := make([]*ChunkRef, 10)
for i := 0; i < 10; i++ {
data := bytes.Repeat([]byte{byte(i)}, 1000) // 1KB each
hash := sha256.Sum256(data)
hashStr := hex.EncodeToString(hash[:])
// Create chunk in database first
dbChunk := &database.Chunk{
ChunkHash: hashStr,
Size: int64(len(data)),
}
err = repos.WithTx(context.Background(), func(ctx context.Context, tx *sql.Tx) error {
return repos.Chunks.Create(ctx, tx, dbChunk)
})
if err != nil {
t.Fatalf("failed to create chunk in db: %v", err)
}
chunks[i] = &ChunkRef{
Hash: hashStr,
Data: data,
}
}
blobCount := 0
// Add chunks and handle size limit errors
for _, chunk := range chunks {
err := packer.AddChunk(chunk)
if err == ErrBlobSizeLimitExceeded {
// Finalize current blob
if err := packer.FinalizeBlob(); err != nil {
t.Fatalf("failed to finalize blob: %v", err)
}
blobCount++
// Retry adding the chunk
if err := packer.AddChunk(chunk); err != nil {
t.Fatalf("failed to add chunk after finalize: %v", err)
}
} else if err != nil {
t.Fatalf("failed to add chunk: %v", err)
}
}
// Flush remaining
if err := packer.Flush(); err != nil {
t.Fatalf("failed to flush: %v", err)
}
// Get all blobs
blobs := packer.GetFinishedBlobs()
totalBlobs := blobCount + len(blobs)
// Should have multiple blobs due to size limit
if totalBlobs < 2 {
t.Errorf("expected multiple blobs due to size limit, got %d", totalBlobs)
}
// Verify each blob respects size limit (approximately)
for _, blob := range blobs {
if blob.Compressed > 6000 { // Allow some overhead
t.Errorf("blob size %d exceeds limit", blob.Compressed)
}
}
})
t.Run("with encryption", func(t *testing.T) {
// Create test database
db, err := database.NewTestDB()
if err != nil {
t.Fatalf("failed to create test db: %v", err)
}
defer func() { _ = db.Close() }()
repos := database.NewRepositories(db)
// Generate test identity (using the one from parent test)
cfg := PackerConfig{
MaxBlobSize: 10 * 1024 * 1024, // 10MB
CompressionLevel: 3,
Recipients: []string{testPublicKey},
Repositories: repos,
}
packer, err := NewPacker(cfg)
if err != nil {
t.Fatalf("failed to create packer: %v", err)
}
// Create test data
data := bytes.Repeat([]byte("Test data for encryption!"), 100)
hash := sha256.Sum256(data)
hashStr := hex.EncodeToString(hash[:])
// Create chunk in database first
dbChunk := &database.Chunk{
ChunkHash: hashStr,
Size: int64(len(data)),
}
err = repos.WithTx(context.Background(), func(ctx context.Context, tx *sql.Tx) error {
return repos.Chunks.Create(ctx, tx, dbChunk)
})
if err != nil {
t.Fatalf("failed to create chunk in db: %v", err)
}
chunk := &ChunkRef{
Hash: hashStr,
Data: data,
}
// Add chunk and flush
if err := packer.AddChunk(chunk); err != nil {
t.Fatalf("failed to add chunk: %v", err)
}
if err := packer.Flush(); err != nil {
t.Fatalf("failed to flush: %v", err)
}
// Get blob
blobs := packer.GetFinishedBlobs()
if len(blobs) != 1 {
t.Fatalf("expected 1 blob, got %d", len(blobs))
}
blob := blobs[0]
// Decrypt the blob
decrypted, err := age.Decrypt(bytes.NewReader(blob.Data), identity)
if err != nil {
t.Fatalf("failed to decrypt blob: %v", err)
}
var decryptedData bytes.Buffer
if _, err := decryptedData.ReadFrom(decrypted); err != nil {
t.Fatalf("failed to read decrypted data: %v", err)
}
// Decompress
reader, err := zstd.NewReader(&decryptedData)
if err != nil {
t.Fatalf("failed to create decompressor: %v", err)
}
defer reader.Close()
var decompressed bytes.Buffer
if _, err := decompressed.ReadFrom(reader); err != nil {
t.Fatalf("failed to decompress: %v", err)
}
// Verify data
if !bytes.Equal(decompressed.Bytes(), data) {
t.Error("decrypted and decompressed data doesn't match original")
}
})
}