diff --git a/internal/blobgen/compress_test.go b/internal/blobgen/compress_test.go new file mode 100644 index 0000000..6d1240c --- /dev/null +++ b/internal/blobgen/compress_test.go @@ -0,0 +1,64 @@ +package blobgen + +import ( + "bytes" + "crypto/rand" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// testRecipient is a static age recipient for tests. +const testRecipient = "age1cplgrwj77ta54dnmydvvmzn64ltk83ankxl5sww04mrtmu62kv3s89gmvv" + +// TestCompressStreamNoDoubleClose is a regression test for issue #28. +// It verifies that CompressStream does not panic or return an error due to +// double-closing the underlying blobgen.Writer. Before the fix in PR #33, +// the explicit Close() on the happy path combined with defer Close() would +// cause a double close. +func TestCompressStreamNoDoubleClose(t *testing.T) { + input := []byte("regression test data for issue #28 double-close fix") + var buf bytes.Buffer + + written, hash, err := CompressStream(&buf, bytes.NewReader(input), 3, []string{testRecipient}) + require.NoError(t, err, "CompressStream should not return an error") + assert.True(t, written > 0, "expected bytes written > 0") + assert.NotEmpty(t, hash, "expected non-empty hash") + assert.True(t, buf.Len() > 0, "expected non-empty output") +} + +// TestCompressStreamLargeInput exercises CompressStream with a larger payload +// to ensure no double-close issues surface under heavier I/O. +func TestCompressStreamLargeInput(t *testing.T) { + data := make([]byte, 512*1024) // 512 KB + _, err := rand.Read(data) + require.NoError(t, err) + + var buf bytes.Buffer + written, hash, err := CompressStream(&buf, bytes.NewReader(data), 3, []string{testRecipient}) + require.NoError(t, err) + assert.True(t, written > 0) + assert.NotEmpty(t, hash) +} + +// TestCompressStreamEmptyInput verifies CompressStream handles empty input +// without double-close issues. +func TestCompressStreamEmptyInput(t *testing.T) { + var buf bytes.Buffer + _, hash, err := CompressStream(&buf, strings.NewReader(""), 3, []string{testRecipient}) + require.NoError(t, err) + assert.NotEmpty(t, hash) +} + +// TestCompressDataNoDoubleClose mirrors the stream test for CompressData, +// ensuring the explicit Close + error-path Close pattern is also safe. +func TestCompressDataNoDoubleClose(t *testing.T) { + input := []byte("CompressData regression test for double-close") + result, err := CompressData(input, 3, []string{testRecipient}) + require.NoError(t, err) + assert.True(t, result.CompressedSize > 0) + assert.True(t, result.UncompressedSize == int64(len(input))) + assert.NotEmpty(t, result.SHA256) +} diff --git a/internal/vaultik/blobcache.go b/internal/vaultik/blobcache.go index 50b5565..cdcee69 100644 --- a/internal/vaultik/blobcache.go +++ b/internal/vaultik/blobcache.go @@ -7,9 +7,6 @@ import ( "sync" ) -// defaultMaxBlobCacheBytes is the default maximum size of the disk blob cache (10 GB). -const defaultMaxBlobCacheBytes = 10 << 30 // 10 GiB - // blobDiskCacheEntry tracks a cached blob on disk. type blobDiskCacheEntry struct { key string diff --git a/internal/vaultik/restore.go b/internal/vaultik/restore.go index d5ac3a7..afe58b7 100644 --- a/internal/vaultik/restore.go +++ b/internal/vaultik/restore.go @@ -109,7 +109,7 @@ func (v *Vaultik) Restore(opts *RestoreOptions) error { // Step 5: Restore files result := &RestoreResult{} - blobCache, err := newBlobDiskCache(defaultMaxBlobCacheBytes) + blobCache, err := newBlobDiskCache(4 * v.Config.BlobSizeLimit.Int64()) if err != nil { return fmt.Errorf("creating blob cache: %w", err) } @@ -122,6 +122,8 @@ func (v *Vaultik) Restore(opts *RestoreOptions) error { if err := v.restoreFile(v.ctx, repos, file, opts.TargetDir, identity, chunkToBlobMap, blobCache, result); err != nil { log.Error("Failed to restore file", "path", file.Path, "error", err) + result.FilesFailed++ + result.FailedFiles = append(result.FailedFiles, file.Path.String()) // Continue with other files continue } @@ -151,6 +153,13 @@ func (v *Vaultik) Restore(opts *RestoreOptions) error { result.Duration.Round(time.Second), ) + if result.FilesFailed > 0 { + _, _ = fmt.Fprintf(v.Stdout, "\nWARNING: %d file(s) failed to restore:\n", result.FilesFailed) + for _, path := range result.FailedFiles { + _, _ = fmt.Fprintf(v.Stdout, " - %s\n", path) + } + } + // Run verification if requested if opts.Verify { if err := v.verifyRestoredFiles(v.ctx, repos, files, opts.TargetDir, result); err != nil { @@ -171,6 +180,10 @@ func (v *Vaultik) Restore(opts *RestoreOptions) error { ) } + if result.FilesFailed > 0 { + return fmt.Errorf("%d file(s) failed to restore", result.FilesFailed) + } + return nil } diff --git a/internal/vaultik/snapshot.go b/internal/vaultik/snapshot.go index 94df29b..e0d93b2 100644 --- a/internal/vaultik/snapshot.go +++ b/internal/vaultik/snapshot.go @@ -1017,16 +1017,16 @@ func (v *Vaultik) deleteSnapshotFromLocalDB(snapshotID string) error { // Delete related records first to avoid foreign key constraints if err := v.Repositories.Snapshots.DeleteSnapshotFiles(v.ctx, snapshotID); err != nil { - log.Error("Failed to delete snapshot files", "snapshot_id", snapshotID, "error", err) + return fmt.Errorf("deleting snapshot files for %s: %w", snapshotID, err) } if err := v.Repositories.Snapshots.DeleteSnapshotBlobs(v.ctx, snapshotID); err != nil { - log.Error("Failed to delete snapshot blobs", "snapshot_id", snapshotID, "error", err) + return fmt.Errorf("deleting snapshot blobs for %s: %w", snapshotID, err) } if err := v.Repositories.Snapshots.DeleteSnapshotUploads(v.ctx, snapshotID); err != nil { - log.Error("Failed to delete snapshot uploads", "snapshot_id", snapshotID, "error", err) + return fmt.Errorf("deleting snapshot uploads for %s: %w", snapshotID, err) } if err := v.Repositories.Snapshots.Delete(v.ctx, snapshotID); err != nil { - log.Error("Failed to delete snapshot record", "snapshot_id", snapshotID, "error", err) + return fmt.Errorf("deleting snapshot record %s: %w", snapshotID, err) } return nil