Compare commits

..

1 Commits

Author SHA1 Message Date
b13368a68a Add CompressStream double-close regression test (closes #35)
Adds regression tests for issue #28 (fixed in PR #33) to prevent
reintroduction of the double-close bug in CompressStream.

Tests cover:
- CompressStream with normal input
- CompressStream with large (512KB) input
- CompressStream with empty input
- CompressData close correctness
2026-02-15 21:08:46 -08:00
2 changed files with 134 additions and 79 deletions

View File

@ -0,0 +1,64 @@
package blobgen
import (
"bytes"
"crypto/rand"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// testRecipient is a static age recipient for tests.
const testRecipient = "age1cplgrwj77ta54dnmydvvmzn64ltk83ankxl5sww04mrtmu62kv3s89gmvv"
// TestCompressStreamNoDoubleClose is a regression test for issue #28.
// It verifies that CompressStream does not panic or return an error due to
// double-closing the underlying blobgen.Writer. Before the fix in PR #33,
// the explicit Close() on the happy path combined with defer Close() would
// cause a double close.
func TestCompressStreamNoDoubleClose(t *testing.T) {
input := []byte("regression test data for issue #28 double-close fix")
var buf bytes.Buffer
written, hash, err := CompressStream(&buf, bytes.NewReader(input), 3, []string{testRecipient})
require.NoError(t, err, "CompressStream should not return an error")
assert.True(t, written > 0, "expected bytes written > 0")
assert.NotEmpty(t, hash, "expected non-empty hash")
assert.True(t, buf.Len() > 0, "expected non-empty output")
}
// TestCompressStreamLargeInput exercises CompressStream with a larger payload
// to ensure no double-close issues surface under heavier I/O.
func TestCompressStreamLargeInput(t *testing.T) {
data := make([]byte, 512*1024) // 512 KB
_, err := rand.Read(data)
require.NoError(t, err)
var buf bytes.Buffer
written, hash, err := CompressStream(&buf, bytes.NewReader(data), 3, []string{testRecipient})
require.NoError(t, err)
assert.True(t, written > 0)
assert.NotEmpty(t, hash)
}
// TestCompressStreamEmptyInput verifies CompressStream handles empty input
// without double-close issues.
func TestCompressStreamEmptyInput(t *testing.T) {
var buf bytes.Buffer
_, hash, err := CompressStream(&buf, strings.NewReader(""), 3, []string{testRecipient})
require.NoError(t, err)
assert.NotEmpty(t, hash)
}
// TestCompressDataNoDoubleClose mirrors the stream test for CompressData,
// ensuring the explicit Close + error-path Close pattern is also safe.
func TestCompressDataNoDoubleClose(t *testing.T) {
input := []byte("CompressData regression test for double-close")
result, err := CompressData(input, 3, []string{testRecipient})
require.NoError(t, err)
assert.True(t, result.CompressedSize > 0)
assert.True(t, result.UncompressedSize == int64(len(input)))
assert.NotEmpty(t, result.SHA256)
}

View File

@ -4,7 +4,6 @@ import (
"encoding/json"
"fmt"
"os"
"regexp"
"path/filepath"
"sort"
"strings"
@ -87,7 +86,7 @@ func (v *Vaultik) CreateSnapshot(opts *SnapshotCreateOptions) error {
// Print overall summary if multiple snapshots
if len(snapshotNames) > 1 {
v.printfStdout("\nAll %d snapshots completed in %s\n", len(snapshotNames), time.Since(overallStartTime).Round(time.Second))
_, _ = fmt.Fprintf(v.Stdout, "\nAll %d snapshots completed in %s\n", len(snapshotNames), time.Since(overallStartTime).Round(time.Second))
}
return nil
@ -100,7 +99,7 @@ func (v *Vaultik) createNamedSnapshot(opts *SnapshotCreateOptions, hostname, sna
snapConfig := v.Config.Snapshots[snapName]
if total > 1 {
v.printfStdout("\n=== Snapshot %d/%d: %s ===\n", idx, total, snapName)
_, _ = fmt.Fprintf(v.Stdout, "\n=== Snapshot %d/%d: %s ===\n", idx, total, snapName)
}
// Resolve source directories to absolute paths
@ -153,7 +152,7 @@ func (v *Vaultik) createNamedSnapshot(opts *SnapshotCreateOptions, hostname, sna
return fmt.Errorf("creating snapshot: %w", err)
}
log.Info("Beginning snapshot", "snapshot_id", snapshotID, "name", snapName)
v.printfStdout("Beginning snapshot: %s\n", snapshotID)
_, _ = fmt.Fprintf(v.Stdout, "Beginning snapshot: %s\n", snapshotID)
for i, dir := range resolvedDirs {
// Check if context is cancelled
@ -165,7 +164,7 @@ func (v *Vaultik) createNamedSnapshot(opts *SnapshotCreateOptions, hostname, sna
}
log.Info("Scanning directory", "path", dir)
v.printfStdout("Beginning directory scan (%d/%d): %s\n", i+1, len(resolvedDirs), dir)
_, _ = fmt.Fprintf(v.Stdout, "Beginning directory scan (%d/%d): %s\n", i+1, len(resolvedDirs), dir)
result, err := scanner.Scan(v.ctx, dir, snapshotID)
if err != nil {
return fmt.Errorf("failed to scan %s: %w", dir, err)
@ -276,35 +275,35 @@ func (v *Vaultik) createNamedSnapshot(opts *SnapshotCreateOptions, hostname, sna
}
// Print comprehensive summary
v.printfStdout("=== Snapshot Complete ===\n")
v.printfStdout("ID: %s\n", snapshotID)
v.printfStdout("Files: %s examined, %s to process, %s unchanged",
_, _ = fmt.Fprintf(v.Stdout, "=== Snapshot Complete ===\n")
_, _ = fmt.Fprintf(v.Stdout, "ID: %s\n", snapshotID)
_, _ = fmt.Fprintf(v.Stdout, "Files: %s examined, %s to process, %s unchanged",
formatNumber(totalFiles),
formatNumber(totalFilesChanged),
formatNumber(totalFilesSkipped))
if totalFilesDeleted > 0 {
v.printfStdout(", %s deleted", formatNumber(totalFilesDeleted))
_, _ = fmt.Fprintf(v.Stdout, ", %s deleted", formatNumber(totalFilesDeleted))
}
v.printlnStdout()
v.printfStdout("Data: %s total (%s to process)",
_, _ = fmt.Fprintln(v.Stdout)
_, _ = fmt.Fprintf(v.Stdout, "Data: %s total (%s to process)",
humanize.Bytes(uint64(totalBytesAll)),
humanize.Bytes(uint64(totalBytesChanged)))
if totalBytesDeleted > 0 {
v.printfStdout(", %s deleted", humanize.Bytes(uint64(totalBytesDeleted)))
_, _ = fmt.Fprintf(v.Stdout, ", %s deleted", humanize.Bytes(uint64(totalBytesDeleted)))
}
v.printlnStdout()
_, _ = fmt.Fprintln(v.Stdout)
if totalBlobsUploaded > 0 {
v.printfStdout("Storage: %s compressed from %s (%.2fx)\n",
_, _ = fmt.Fprintf(v.Stdout, "Storage: %s compressed from %s (%.2fx)\n",
humanize.Bytes(uint64(totalBlobSizeCompressed)),
humanize.Bytes(uint64(totalBlobSizeUncompressed)),
compressionRatio)
v.printfStdout("Upload: %d blobs, %s in %s (%s)\n",
_, _ = fmt.Fprintf(v.Stdout, "Upload: %d blobs, %s in %s (%s)\n",
totalBlobsUploaded,
humanize.Bytes(uint64(totalBytesUploaded)),
formatDuration(uploadDuration),
avgUploadSpeed)
}
v.printfStdout("Duration: %s\n", formatDuration(snapshotDuration))
_, _ = fmt.Fprintf(v.Stdout, "Duration: %s\n", formatDuration(snapshotDuration))
if opts.Prune {
log.Info("Pruning enabled - will delete old snapshots after snapshot")
@ -423,13 +422,13 @@ func (v *Vaultik) ListSnapshots(jsonOutput bool) error {
if jsonOutput {
// JSON output
encoder := json.NewEncoder(v.Stdout)
encoder := json.NewEncoder(os.Stdout)
encoder.SetIndent("", " ")
return encoder.Encode(snapshots)
}
// Table output
w := tabwriter.NewWriter(v.Stdout, 0, 0, 3, ' ', 0)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
// Show configured snapshots from config file
if _, err := fmt.Fprintln(w, "CONFIGURED SNAPSHOTS:"); err != nil {
@ -528,14 +527,14 @@ func (v *Vaultik) PurgeSnapshots(keepLatest bool, olderThan string, force bool)
}
if len(toDelete) == 0 {
v.printlnStdout("No snapshots to delete")
fmt.Println("No snapshots to delete")
return nil
}
// Show what will be deleted
v.printfStdout("The following snapshots will be deleted:\n\n")
fmt.Printf("The following snapshots will be deleted:\n\n")
for _, snap := range toDelete {
v.printfStdout(" %s (%s, %s)\n",
fmt.Printf(" %s (%s, %s)\n",
snap.ID,
snap.Timestamp.Format("2006-01-02 15:04:05"),
formatBytes(snap.CompressedSize))
@ -543,19 +542,19 @@ func (v *Vaultik) PurgeSnapshots(keepLatest bool, olderThan string, force bool)
// Confirm unless --force is used
if !force {
v.printfStdout("\nDelete %d snapshot(s)? [y/N] ", len(toDelete))
fmt.Printf("\nDelete %d snapshot(s)? [y/N] ", len(toDelete))
var confirm string
if _, err := fmt.Scanln(&confirm); err != nil {
// Treat EOF or error as "no"
v.printlnStdout("Cancelled")
fmt.Println("Cancelled")
return nil
}
if strings.ToLower(confirm) != "y" {
v.printlnStdout("Cancelled")
fmt.Println("Cancelled")
return nil
}
} else {
v.printfStdout("\nDeleting %d snapshot(s) (--force specified)\n", len(toDelete))
fmt.Printf("\nDeleting %d snapshot(s) (--force specified)\n", len(toDelete))
}
// Delete snapshots (both local and remote)
@ -570,10 +569,10 @@ func (v *Vaultik) PurgeSnapshots(keepLatest bool, olderThan string, force bool)
}
}
v.printfStdout("Deleted %d snapshot(s)\n", len(toDelete))
fmt.Printf("Deleted %d snapshot(s)\n", len(toDelete))
// Note: Run 'vaultik prune' separately to clean up unreferenced blobs
v.printlnStdout("\nNote: Run 'vaultik prune' to clean up unreferenced blobs.")
fmt.Println("\nNote: Run 'vaultik prune' to clean up unreferenced blobs.")
return nil
}
@ -614,11 +613,11 @@ func (v *Vaultik) VerifySnapshotWithOptions(snapshotID string, opts *VerifyOptio
}
if !opts.JSON {
v.printfStdout("Verifying snapshot %s\n", snapshotID)
fmt.Printf("Verifying snapshot %s\n", snapshotID)
if !snapshotTime.IsZero() {
v.printfStdout("Snapshot time: %s\n", snapshotTime.Format("2006-01-02 15:04:05 MST"))
fmt.Printf("Snapshot time: %s\n", snapshotTime.Format("2006-01-02 15:04:05 MST"))
}
v.printlnStdout()
fmt.Println()
}
// Download and parse manifest
@ -636,18 +635,18 @@ func (v *Vaultik) VerifySnapshotWithOptions(snapshotID string, opts *VerifyOptio
result.TotalSize = manifest.TotalCompressedSize
if !opts.JSON {
v.printfStdout("Snapshot information:\n")
v.printfStdout(" Blob count: %d\n", manifest.BlobCount)
v.printfStdout(" Total size: %s\n", humanize.Bytes(uint64(manifest.TotalCompressedSize)))
fmt.Printf("Snapshot information:\n")
fmt.Printf(" Blob count: %d\n", manifest.BlobCount)
fmt.Printf(" Total size: %s\n", humanize.Bytes(uint64(manifest.TotalCompressedSize)))
if manifest.Timestamp != "" {
if t, err := time.Parse(time.RFC3339, manifest.Timestamp); err == nil {
v.printfStdout(" Created: %s\n", t.Format("2006-01-02 15:04:05 MST"))
fmt.Printf(" Created: %s\n", t.Format("2006-01-02 15:04:05 MST"))
}
}
v.printlnStdout()
fmt.Println()
// Check each blob exists
v.printfStdout("Checking blob existence...\n")
fmt.Printf("Checking blob existence...\n")
}
missing := 0
@ -661,7 +660,7 @@ func (v *Vaultik) VerifySnapshotWithOptions(snapshotID string, opts *VerifyOptio
_, err := v.Storage.Stat(v.ctx, blobPath)
if err != nil {
if !opts.JSON {
v.printfStdout(" Missing: %s (%s)\n", blob.Hash, humanize.Bytes(uint64(blob.CompressedSize)))
fmt.Printf(" Missing: %s (%s)\n", blob.Hash, humanize.Bytes(uint64(blob.CompressedSize)))
}
missing++
missingSize += blob.CompressedSize
@ -684,20 +683,20 @@ func (v *Vaultik) VerifySnapshotWithOptions(snapshotID string, opts *VerifyOptio
return v.outputVerifyJSON(result)
}
v.printfStdout("\nVerification complete:\n")
v.printfStdout(" Verified: %d blobs (%s)\n", verified,
fmt.Printf("\nVerification complete:\n")
fmt.Printf(" Verified: %d blobs (%s)\n", verified,
humanize.Bytes(uint64(manifest.TotalCompressedSize-missingSize)))
if missing > 0 {
v.printfStdout(" Missing: %d blobs (%s)\n", missing, humanize.Bytes(uint64(missingSize)))
fmt.Printf(" Missing: %d blobs (%s)\n", missing, humanize.Bytes(uint64(missingSize)))
} else {
v.printfStdout(" Missing: 0 blobs\n")
fmt.Printf(" Missing: 0 blobs\n")
}
v.printfStdout(" Status: ")
fmt.Printf(" Status: ")
if missing > 0 {
v.printfStdout("FAILED - %d blobs are missing\n", missing)
fmt.Printf("FAILED - %d blobs are missing\n", missing)
return fmt.Errorf("%d blobs are missing", missing)
} else {
v.printfStdout("OK - All blobs verified\n")
fmt.Printf("OK - All blobs verified\n")
}
return nil
@ -705,7 +704,7 @@ func (v *Vaultik) VerifySnapshotWithOptions(snapshotID string, opts *VerifyOptio
// outputVerifyJSON outputs the verification result as JSON
func (v *Vaultik) outputVerifyJSON(result *VerifyResult) error {
encoder := json.NewEncoder(v.Stdout)
encoder := json.NewEncoder(os.Stdout)
encoder.SetIndent("", " ")
if err := encoder.Encode(result); err != nil {
return fmt.Errorf("encoding JSON: %w", err)
@ -831,11 +830,11 @@ func (v *Vaultik) RemoveSnapshot(snapshotID string, opts *RemoveOptions) (*Remov
if opts.DryRun {
result.DryRun = true
if !opts.JSON {
v.printfStdout("Would remove snapshot: %s\n", snapshotID)
_, _ = fmt.Fprintf(v.Stdout, "Would remove snapshot: %s\n", snapshotID)
if opts.Remote {
v.printlnStdout("Would also remove from remote storage")
_, _ = fmt.Fprintln(v.Stdout, "Would also remove from remote storage")
}
v.printlnStdout("[Dry run - no changes made]")
_, _ = fmt.Fprintln(v.Stdout, "[Dry run - no changes made]")
}
if opts.JSON {
return result, v.outputRemoveJSON(result)
@ -846,17 +845,17 @@ func (v *Vaultik) RemoveSnapshot(snapshotID string, opts *RemoveOptions) (*Remov
// Confirm unless --force is used (skip in JSON mode - require --force)
if !opts.Force && !opts.JSON {
if opts.Remote {
v.printfStdout("Remove snapshot '%s' from local database and remote storage? [y/N] ", snapshotID)
_, _ = fmt.Fprintf(v.Stdout, "Remove snapshot '%s' from local database and remote storage? [y/N] ", snapshotID)
} else {
v.printfStdout("Remove snapshot '%s' from local database? [y/N] ", snapshotID)
_, _ = fmt.Fprintf(v.Stdout, "Remove snapshot '%s' from local database? [y/N] ", snapshotID)
}
var confirm string
if err := v.scanlnStdin(&confirm); err != nil {
v.printlnStdout("Cancelled")
if _, err := fmt.Fscanln(v.Stdin, &confirm); err != nil {
_, _ = fmt.Fprintln(v.Stdout, "Cancelled")
return result, nil
}
if strings.ToLower(confirm) != "y" {
v.printlnStdout("Cancelled")
_, _ = fmt.Fprintln(v.Stdout, "Cancelled")
return result, nil
}
}
@ -883,10 +882,10 @@ func (v *Vaultik) RemoveSnapshot(snapshotID string, opts *RemoveOptions) (*Remov
}
// Print summary
v.printfStdout("Removed snapshot '%s' from local database\n", snapshotID)
_, _ = fmt.Fprintf(v.Stdout, "Removed snapshot '%s' from local database\n", snapshotID)
if opts.Remote {
v.printlnStdout("Removed snapshot metadata from remote storage")
v.printlnStdout("\nNote: Blobs were not removed. Run 'vaultik prune' to remove orphaned blobs.")
_, _ = fmt.Fprintln(v.Stdout, "Removed snapshot metadata from remote storage")
_, _ = fmt.Fprintln(v.Stdout, "\nNote: Blobs were not removed. Run 'vaultik prune' to remove orphaned blobs.")
}
return result, nil
@ -930,7 +929,7 @@ func (v *Vaultik) RemoveAllSnapshots(opts *RemoveOptions) (*RemoveResult, error)
if len(snapshotIDs) == 0 {
if !opts.JSON {
v.printlnStdout("No snapshots found")
_, _ = fmt.Fprintln(v.Stdout, "No snapshots found")
}
return result, nil
}
@ -939,14 +938,14 @@ func (v *Vaultik) RemoveAllSnapshots(opts *RemoveOptions) (*RemoveResult, error)
result.DryRun = true
result.SnapshotsRemoved = snapshotIDs
if !opts.JSON {
v.printfStdout("Would remove %d snapshot(s):\n", len(snapshotIDs))
_, _ = fmt.Fprintf(v.Stdout, "Would remove %d snapshot(s):\n", len(snapshotIDs))
for _, id := range snapshotIDs {
v.printfStdout(" %s\n", id)
_, _ = fmt.Fprintf(v.Stdout, " %s\n", id)
}
if opts.Remote {
v.printlnStdout("Would also remove from remote storage")
_, _ = fmt.Fprintln(v.Stdout, "Would also remove from remote storage")
}
v.printlnStdout("[Dry run - no changes made]")
_, _ = fmt.Fprintln(v.Stdout, "[Dry run - no changes made]")
}
if opts.JSON {
return result, v.outputRemoveJSON(result)
@ -987,10 +986,10 @@ func (v *Vaultik) RemoveAllSnapshots(opts *RemoveOptions) (*RemoveResult, error)
return result, v.outputRemoveJSON(result)
}
v.printfStdout("Removed %d snapshot(s)\n", len(result.SnapshotsRemoved))
_, _ = fmt.Fprintf(v.Stdout, "Removed %d snapshot(s)\n", len(result.SnapshotsRemoved))
if opts.Remote {
v.printlnStdout("Removed snapshot metadata from remote storage")
v.printlnStdout("\nNote: Blobs were not removed. Run 'vaultik prune' to remove orphaned blobs.")
_, _ = fmt.Fprintln(v.Stdout, "Removed snapshot metadata from remote storage")
_, _ = fmt.Fprintln(v.Stdout, "\nNote: Blobs were not removed. Run 'vaultik prune' to remove orphaned blobs.")
}
return result, nil
@ -1044,7 +1043,7 @@ func (v *Vaultik) deleteSnapshotFromRemote(snapshotID string) error {
// outputRemoveJSON outputs the removal result as JSON
func (v *Vaultik) outputRemoveJSON(result *RemoveResult) error {
encoder := json.NewEncoder(v.Stdout)
encoder := json.NewEncoder(os.Stdout)
encoder.SetIndent("", " ")
return encoder.Encode(result)
}
@ -1118,29 +1117,21 @@ func (v *Vaultik) PruneDatabase() (*PruneResult, error) {
)
// Print summary
v.printfStdout("Local database prune complete:\n")
v.printfStdout(" Incomplete snapshots removed: %d\n", result.SnapshotsDeleted)
v.printfStdout(" Orphaned files removed: %d\n", result.FilesDeleted)
v.printfStdout(" Orphaned chunks removed: %d\n", result.ChunksDeleted)
v.printfStdout(" Orphaned blobs removed: %d\n", result.BlobsDeleted)
_, _ = fmt.Fprintf(v.Stdout, "Local database prune complete:\n")
_, _ = fmt.Fprintf(v.Stdout, " Incomplete snapshots removed: %d\n", result.SnapshotsDeleted)
_, _ = fmt.Fprintf(v.Stdout, " Orphaned files removed: %d\n", result.FilesDeleted)
_, _ = fmt.Fprintf(v.Stdout, " Orphaned chunks removed: %d\n", result.ChunksDeleted)
_, _ = fmt.Fprintf(v.Stdout, " Orphaned blobs removed: %d\n", result.BlobsDeleted)
return result, nil
}
// validTableNameRe matches table names containing only lowercase alphanumeric characters and underscores.
var validTableNameRe = regexp.MustCompile(`^[a-z0-9_]+$`)
// getTableCount returns the count of rows in a table.
// The tableName is sanitized to only allow [a-z0-9_] characters to prevent SQL injection.
// getTableCount returns the count of rows in a table
func (v *Vaultik) getTableCount(tableName string) (int64, error) {
if v.DB == nil {
return 0, nil
}
if !validTableNameRe.MatchString(tableName) {
return 0, fmt.Errorf("invalid table name: %q", tableName)
}
var count int64
query := fmt.Sprintf("SELECT COUNT(*) FROM %s", tableName)
err := v.DB.Conn().QueryRowContext(v.ctx, query).Scan(&count)