1 Commits

Author SHA1 Message Date
clawbot
441c441eca fix: prevent double-close of blobgen.Writer in CompressStream
CompressStream had both a defer w.Close() and an explicit w.Close() call,
causing the compressor and encryptor to be closed twice. The second close
on the zstd encoder returns an error, and the age encryptor may write
duplicate finalization bytes, potentially corrupting the output stream.

Use a closed flag to prevent the deferred close from running after the
explicit close succeeds.
2026-02-08 12:03:36 -08:00
4 changed files with 107 additions and 99 deletions

View File

@@ -51,7 +51,13 @@ func CompressStream(dst io.Writer, src io.Reader, compressionLevel int, recipien
if err != nil {
return 0, "", fmt.Errorf("creating writer: %w", err)
}
defer func() { _ = w.Close() }()
closed := false
defer func() {
if !closed {
_ = w.Close()
}
}()
// Copy data
if _, err := io.Copy(w, src); err != nil {
@@ -62,6 +68,7 @@ func CompressStream(dst io.Writer, src io.Reader, compressionLevel int, recipien
if err := w.Close(); err != nil {
return 0, "", fmt.Errorf("closing writer: %w", err)
}
closed = true
return w.BytesWritten(), hex.EncodeToString(w.Sum256()), nil
}

View File

@@ -15,99 +15,99 @@ import (
// ShowInfo displays system and configuration information
func (v *Vaultik) ShowInfo() error {
// System Information
_, _ = fmt.Fprintf(v.Stdout, "=== System Information ===\n")
_, _ = fmt.Fprintf(v.Stdout, "OS/Architecture: %s/%s\n", runtime.GOOS, runtime.GOARCH)
_, _ = fmt.Fprintf(v.Stdout, "Version: %s\n", v.Globals.Version)
_, _ = fmt.Fprintf(v.Stdout, "Commit: %s\n", v.Globals.Commit)
_, _ = fmt.Fprintf(v.Stdout, "Go Version: %s\n", runtime.Version())
_, _ = fmt.Fprintln(v.Stdout, )
fmt.Printf("=== System Information ===\n")
fmt.Printf("OS/Architecture: %s/%s\n", runtime.GOOS, runtime.GOARCH)
fmt.Printf("Version: %s\n", v.Globals.Version)
fmt.Printf("Commit: %s\n", v.Globals.Commit)
fmt.Printf("Go Version: %s\n", runtime.Version())
fmt.Println()
// Storage Configuration
_, _ = fmt.Fprintf(v.Stdout, "=== Storage Configuration ===\n")
_, _ = fmt.Fprintf(v.Stdout, "S3 Bucket: %s\n", v.Config.S3.Bucket)
fmt.Printf("=== Storage Configuration ===\n")
fmt.Printf("S3 Bucket: %s\n", v.Config.S3.Bucket)
if v.Config.S3.Prefix != "" {
_, _ = fmt.Fprintf(v.Stdout, "S3 Prefix: %s\n", v.Config.S3.Prefix)
fmt.Printf("S3 Prefix: %s\n", v.Config.S3.Prefix)
}
_, _ = fmt.Fprintf(v.Stdout, "S3 Endpoint: %s\n", v.Config.S3.Endpoint)
_, _ = fmt.Fprintf(v.Stdout, "S3 Region: %s\n", v.Config.S3.Region)
_, _ = fmt.Fprintln(v.Stdout, )
fmt.Printf("S3 Endpoint: %s\n", v.Config.S3.Endpoint)
fmt.Printf("S3 Region: %s\n", v.Config.S3.Region)
fmt.Println()
// Backup Settings
_, _ = fmt.Fprintf(v.Stdout, "=== Backup Settings ===\n")
fmt.Printf("=== Backup Settings ===\n")
// Show configured snapshots
_, _ = fmt.Fprintf(v.Stdout, "Snapshots:\n")
fmt.Printf("Snapshots:\n")
for _, name := range v.Config.SnapshotNames() {
snap := v.Config.Snapshots[name]
_, _ = fmt.Fprintf(v.Stdout, " %s:\n", name)
fmt.Printf(" %s:\n", name)
for _, path := range snap.Paths {
_, _ = fmt.Fprintf(v.Stdout, " - %s\n", path)
fmt.Printf(" - %s\n", path)
}
if len(snap.Exclude) > 0 {
_, _ = fmt.Fprintf(v.Stdout, " exclude: %s\n", strings.Join(snap.Exclude, ", "))
fmt.Printf(" exclude: %s\n", strings.Join(snap.Exclude, ", "))
}
}
// Global exclude patterns
if len(v.Config.Exclude) > 0 {
_, _ = fmt.Fprintf(v.Stdout, "Global Exclude: %s\n", strings.Join(v.Config.Exclude, ", "))
fmt.Printf("Global Exclude: %s\n", strings.Join(v.Config.Exclude, ", "))
}
_, _ = fmt.Fprintf(v.Stdout, "Compression: zstd level %d\n", v.Config.CompressionLevel)
_, _ = fmt.Fprintf(v.Stdout, "Chunk Size: %s\n", humanize.Bytes(uint64(v.Config.ChunkSize)))
_, _ = fmt.Fprintf(v.Stdout, "Blob Size Limit: %s\n", humanize.Bytes(uint64(v.Config.BlobSizeLimit)))
_, _ = fmt.Fprintln(v.Stdout, )
fmt.Printf("Compression: zstd level %d\n", v.Config.CompressionLevel)
fmt.Printf("Chunk Size: %s\n", humanize.Bytes(uint64(v.Config.ChunkSize)))
fmt.Printf("Blob Size Limit: %s\n", humanize.Bytes(uint64(v.Config.BlobSizeLimit)))
fmt.Println()
// Encryption Configuration
_, _ = fmt.Fprintf(v.Stdout, "=== Encryption Configuration ===\n")
_, _ = fmt.Fprintf(v.Stdout, "Recipients:\n")
fmt.Printf("=== Encryption Configuration ===\n")
fmt.Printf("Recipients:\n")
for _, recipient := range v.Config.AgeRecipients {
_, _ = fmt.Fprintf(v.Stdout, " - %s\n", recipient)
fmt.Printf(" - %s\n", recipient)
}
_, _ = fmt.Fprintln(v.Stdout, )
fmt.Println()
// Daemon Settings (if applicable)
if v.Config.BackupInterval > 0 || v.Config.MinTimeBetweenRun > 0 {
_, _ = fmt.Fprintf(v.Stdout, "=== Daemon Settings ===\n")
fmt.Printf("=== Daemon Settings ===\n")
if v.Config.BackupInterval > 0 {
_, _ = fmt.Fprintf(v.Stdout, "Backup Interval: %s\n", v.Config.BackupInterval)
fmt.Printf("Backup Interval: %s\n", v.Config.BackupInterval)
}
if v.Config.MinTimeBetweenRun > 0 {
_, _ = fmt.Fprintf(v.Stdout, "Minimum Time: %s\n", v.Config.MinTimeBetweenRun)
fmt.Printf("Minimum Time: %s\n", v.Config.MinTimeBetweenRun)
}
_, _ = fmt.Fprintln(v.Stdout, )
fmt.Println()
}
// Local Database
_, _ = fmt.Fprintf(v.Stdout, "=== Local Database ===\n")
_, _ = fmt.Fprintf(v.Stdout, "Index Path: %s\n", v.Config.IndexPath)
fmt.Printf("=== Local Database ===\n")
fmt.Printf("Index Path: %s\n", v.Config.IndexPath)
// Check if index file exists and get its size
if info, err := v.Fs.Stat(v.Config.IndexPath); err == nil {
_, _ = fmt.Fprintf(v.Stdout, "Index Size: %s\n", humanize.Bytes(uint64(info.Size())))
fmt.Printf("Index Size: %s\n", humanize.Bytes(uint64(info.Size())))
// Get snapshot count from database
query := `SELECT COUNT(*) FROM snapshots WHERE completed_at IS NOT NULL`
var snapshotCount int
if err := v.DB.Conn().QueryRowContext(v.ctx, query).Scan(&snapshotCount); err == nil {
_, _ = fmt.Fprintf(v.Stdout, "Snapshots: %d\n", snapshotCount)
fmt.Printf("Snapshots: %d\n", snapshotCount)
}
// Get blob count from database
query = `SELECT COUNT(*) FROM blobs`
var blobCount int
if err := v.DB.Conn().QueryRowContext(v.ctx, query).Scan(&blobCount); err == nil {
_, _ = fmt.Fprintf(v.Stdout, "Blobs: %d\n", blobCount)
fmt.Printf("Blobs: %d\n", blobCount)
}
// Get file count from database
query = `SELECT COUNT(*) FROM files`
var fileCount int
if err := v.DB.Conn().QueryRowContext(v.ctx, query).Scan(&fileCount); err == nil {
_, _ = fmt.Fprintf(v.Stdout, "Files: %d\n", fileCount)
fmt.Printf("Files: %d\n", fileCount)
}
} else {
_, _ = fmt.Fprintf(v.Stdout, "Index Size: (not created)\n")
fmt.Printf("Index Size: (not created)\n")
}
return nil
@@ -157,15 +157,15 @@ func (v *Vaultik) RemoteInfo(jsonOutput bool) error {
result.StorageLocation = storageInfo.Location
if !jsonOutput {
_, _ = fmt.Fprintf(v.Stdout, "=== Remote Storage ===\n")
_, _ = fmt.Fprintf(v.Stdout, "Type: %s\n", storageInfo.Type)
_, _ = fmt.Fprintf(v.Stdout, "Location: %s\n", storageInfo.Location)
_, _ = fmt.Fprintln(v.Stdout, )
fmt.Printf("=== Remote Storage ===\n")
fmt.Printf("Type: %s\n", storageInfo.Type)
fmt.Printf("Location: %s\n", storageInfo.Location)
fmt.Println()
}
// List all snapshot metadata
if !jsonOutput {
_, _ = fmt.Fprintf(v.Stdout, "Scanning snapshot metadata...\n")
fmt.Printf("Scanning snapshot metadata...\n")
}
snapshotMetadata := make(map[string]*SnapshotMetadataInfo)
@@ -210,7 +210,7 @@ func (v *Vaultik) RemoteInfo(jsonOutput bool) error {
// Download and parse all manifests to get referenced blobs
if !jsonOutput {
_, _ = fmt.Fprintf(v.Stdout, "Downloading %d manifest(s)...\n", len(snapshotIDs))
fmt.Printf("Downloading %d manifest(s)...\n", len(snapshotIDs))
}
referencedBlobs := make(map[string]int64) // hash -> compressed size
@@ -260,7 +260,7 @@ func (v *Vaultik) RemoteInfo(jsonOutput bool) error {
// List all blobs on remote
if !jsonOutput {
_, _ = fmt.Fprintf(v.Stdout, "Scanning blobs...\n")
fmt.Printf("Scanning blobs...\n")
}
allBlobs := make(map[string]int64) // hash -> size from storage
@@ -298,14 +298,14 @@ func (v *Vaultik) RemoteInfo(jsonOutput bool) error {
}
// Human-readable output
_, _ = fmt.Fprintf(v.Stdout, "\n=== Snapshot Metadata ===\n")
fmt.Printf("\n=== Snapshot Metadata ===\n")
if len(result.Snapshots) == 0 {
_, _ = fmt.Fprintf(v.Stdout, "No snapshots found\n")
fmt.Printf("No snapshots found\n")
} else {
_, _ = fmt.Fprintf(v.Stdout, "%-45s %12s %12s %12s %10s %12s\n", "SNAPSHOT", "MANIFEST", "DATABASE", "TOTAL", "BLOBS", "BLOB SIZE")
_, _ = fmt.Fprintf(v.Stdout, "%-45s %12s %12s %12s %10s %12s\n", strings.Repeat("-", 45), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 10), strings.Repeat("-", 12))
fmt.Printf("%-45s %12s %12s %12s %10s %12s\n", "SNAPSHOT", "MANIFEST", "DATABASE", "TOTAL", "BLOBS", "BLOB SIZE")
fmt.Printf("%-45s %12s %12s %12s %10s %12s\n", strings.Repeat("-", 45), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 10), strings.Repeat("-", 12))
for _, info := range result.Snapshots {
_, _ = fmt.Fprintf(v.Stdout, "%-45s %12s %12s %12s %10s %12s\n",
fmt.Printf("%-45s %12s %12s %12s %10s %12s\n",
truncateString(info.SnapshotID, 45),
humanize.Bytes(uint64(info.ManifestSize)),
humanize.Bytes(uint64(info.DatabaseSize)),
@@ -314,23 +314,23 @@ func (v *Vaultik) RemoteInfo(jsonOutput bool) error {
humanize.Bytes(uint64(info.BlobsSize)),
)
}
_, _ = fmt.Fprintf(v.Stdout, "%-45s %12s %12s %12s %10s %12s\n", strings.Repeat("-", 45), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 10), strings.Repeat("-", 12))
_, _ = fmt.Fprintf(v.Stdout, "%-45s %12s %12s %12s\n", fmt.Sprintf("Total (%d snapshots)", result.TotalMetadataCount), "", "", humanize.Bytes(uint64(result.TotalMetadataSize)))
fmt.Printf("%-45s %12s %12s %12s %10s %12s\n", strings.Repeat("-", 45), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 10), strings.Repeat("-", 12))
fmt.Printf("%-45s %12s %12s %12s\n", fmt.Sprintf("Total (%d snapshots)", result.TotalMetadataCount), "", "", humanize.Bytes(uint64(result.TotalMetadataSize)))
}
_, _ = fmt.Fprintf(v.Stdout, "\n=== Blob Storage ===\n")
_, _ = fmt.Fprintf(v.Stdout, "Total blobs on remote: %s (%s)\n",
fmt.Printf("\n=== Blob Storage ===\n")
fmt.Printf("Total blobs on remote: %s (%s)\n",
humanize.Comma(int64(result.TotalBlobCount)),
humanize.Bytes(uint64(result.TotalBlobSize)))
_, _ = fmt.Fprintf(v.Stdout, "Referenced by snapshots: %s (%s)\n",
fmt.Printf("Referenced by snapshots: %s (%s)\n",
humanize.Comma(int64(result.ReferencedBlobCount)),
humanize.Bytes(uint64(result.ReferencedBlobSize)))
_, _ = fmt.Fprintf(v.Stdout, "Orphaned (unreferenced): %s (%s)\n",
fmt.Printf("Orphaned (unreferenced): %s (%s)\n",
humanize.Comma(int64(result.OrphanedBlobCount)),
humanize.Bytes(uint64(result.OrphanedBlobSize)))
if result.OrphanedBlobCount > 0 {
_, _ = fmt.Fprintf(v.Stdout, "\nRun 'vaultik prune --remote' to remove orphaned blobs.\n")
fmt.Printf("\nRun 'vaultik prune --remote' to remove orphaned blobs.\n")
}
return nil

View File

@@ -3,6 +3,7 @@ package vaultik
import (
"encoding/json"
"fmt"
"os"
"strings"
"git.eeqj.de/sneak/vaultik/internal/log"
@@ -120,29 +121,29 @@ func (v *Vaultik) PruneBlobs(opts *PruneOptions) error {
if len(unreferencedBlobs) == 0 {
log.Info("No unreferenced blobs found")
if opts.JSON {
return v.outputPruneBlobsJSON(result)
return outputPruneBlobsJSON(result)
}
_, _ = fmt.Fprintln(v.Stdout, "No unreferenced blobs to remove.")
fmt.Println("No unreferenced blobs to remove.")
return nil
}
// Show what will be deleted
log.Info("Found unreferenced blobs", "count", len(unreferencedBlobs), "total_size", humanize.Bytes(uint64(totalSize)))
if !opts.JSON {
_, _ = fmt.Fprintf(v.Stdout, "Found %d unreferenced blob(s) totaling %s\n", len(unreferencedBlobs), humanize.Bytes(uint64(totalSize)))
fmt.Printf("Found %d unreferenced blob(s) totaling %s\n", len(unreferencedBlobs), humanize.Bytes(uint64(totalSize)))
}
// Confirm unless --force is used (skip in JSON mode - require --force)
if !opts.Force && !opts.JSON {
_, _ = fmt.Fprintf(v.Stdout, "\nDelete %d unreferenced blob(s)? [y/N] ", len(unreferencedBlobs))
fmt.Printf("\nDelete %d unreferenced blob(s)? [y/N] ", len(unreferencedBlobs))
var confirm string
if _, err := fmt.Fscanln(v.Stdin, &confirm); err != nil {
if _, err := fmt.Scanln(&confirm); err != nil {
// Treat EOF or error as "no"
_, _ = fmt.Fprintln(v.Stdout, "Cancelled")
fmt.Println("Cancelled")
return nil
}
if strings.ToLower(confirm) != "y" {
_, _ = fmt.Fprintln(v.Stdout, "Cancelled")
fmt.Println("Cancelled")
return nil
}
}
@@ -184,20 +185,20 @@ func (v *Vaultik) PruneBlobs(opts *PruneOptions) error {
)
if opts.JSON {
return v.outputPruneBlobsJSON(result)
return outputPruneBlobsJSON(result)
}
_, _ = fmt.Fprintf(v.Stdout, "\nDeleted %d blob(s) totaling %s\n", deletedCount, humanize.Bytes(uint64(deletedSize)))
fmt.Printf("\nDeleted %d blob(s) totaling %s\n", deletedCount, humanize.Bytes(uint64(deletedSize)))
if deletedCount < len(unreferencedBlobs) {
_, _ = fmt.Fprintf(v.Stdout, "Failed to delete %d blob(s)\n", len(unreferencedBlobs)-deletedCount)
fmt.Printf("Failed to delete %d blob(s)\n", len(unreferencedBlobs)-deletedCount)
}
return nil
}
// outputPruneBlobsJSON outputs the prune result as JSON
func (v *Vaultik) outputPruneBlobsJSON(result *PruneBlobsResult) error {
encoder := json.NewEncoder(v.Stdout)
func outputPruneBlobsJSON(result *PruneBlobsResult) error {
encoder := json.NewEncoder(os.Stdout)
encoder.SetIndent("", " ")
return encoder.Encode(result)
}

View File

@@ -422,13 +422,13 @@ func (v *Vaultik) ListSnapshots(jsonOutput bool) error {
if jsonOutput {
// JSON output
encoder := json.NewEncoder(v.Stdout)
encoder := json.NewEncoder(os.Stdout)
encoder.SetIndent("", " ")
return encoder.Encode(snapshots)
}
// Table output
w := tabwriter.NewWriter(v.Stdout, 0, 0, 3, ' ', 0)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
// Show configured snapshots from config file
if _, err := fmt.Fprintln(w, "CONFIGURED SNAPSHOTS:"); err != nil {
@@ -527,14 +527,14 @@ func (v *Vaultik) PurgeSnapshots(keepLatest bool, olderThan string, force bool)
}
if len(toDelete) == 0 {
_, _ = fmt.Fprintln(v.Stdout, "No snapshots to delete")
fmt.Println("No snapshots to delete")
return nil
}
// Show what will be deleted
_, _ = fmt.Fprintf(v.Stdout, "The following snapshots will be deleted:\n\n")
fmt.Printf("The following snapshots will be deleted:\n\n")
for _, snap := range toDelete {
_, _ = fmt.Fprintf(v.Stdout, " %s (%s, %s)\n",
fmt.Printf(" %s (%s, %s)\n",
snap.ID,
snap.Timestamp.Format("2006-01-02 15:04:05"),
formatBytes(snap.CompressedSize))
@@ -542,19 +542,19 @@ func (v *Vaultik) PurgeSnapshots(keepLatest bool, olderThan string, force bool)
// Confirm unless --force is used
if !force {
_, _ = fmt.Fprintf(v.Stdout, "\nDelete %d snapshot(s)? [y/N] ", len(toDelete))
fmt.Printf("\nDelete %d snapshot(s)? [y/N] ", len(toDelete))
var confirm string
if _, err := fmt.Fscanln(v.Stdin, &confirm); err != nil {
if _, err := fmt.Scanln(&confirm); err != nil {
// Treat EOF or error as "no"
_, _ = fmt.Fprintln(v.Stdout, "Cancelled")
fmt.Println("Cancelled")
return nil
}
if strings.ToLower(confirm) != "y" {
_, _ = fmt.Fprintln(v.Stdout, "Cancelled")
fmt.Println("Cancelled")
return nil
}
} else {
_, _ = fmt.Fprintf(v.Stdout, "\nDeleting %d snapshot(s) (--force specified)\n", len(toDelete))
fmt.Printf("\nDeleting %d snapshot(s) (--force specified)\n", len(toDelete))
}
// Delete snapshots (both local and remote)
@@ -569,10 +569,10 @@ func (v *Vaultik) PurgeSnapshots(keepLatest bool, olderThan string, force bool)
}
}
_, _ = fmt.Fprintf(v.Stdout, "Deleted %d snapshot(s)\n", len(toDelete))
fmt.Printf("Deleted %d snapshot(s)\n", len(toDelete))
// Note: Run 'vaultik prune' separately to clean up unreferenced blobs
_, _ = fmt.Fprintln(v.Stdout, "\nNote: Run 'vaultik prune' to clean up unreferenced blobs.")
fmt.Println("\nNote: Run 'vaultik prune' to clean up unreferenced blobs.")
return nil
}
@@ -613,9 +613,9 @@ func (v *Vaultik) VerifySnapshotWithOptions(snapshotID string, opts *VerifyOptio
}
if !opts.JSON {
_, _ = fmt.Fprintf(v.Stdout, "Verifying snapshot %s\n", snapshotID)
fmt.Printf("Verifying snapshot %s\n", snapshotID)
if !snapshotTime.IsZero() {
_, _ = fmt.Fprintf(v.Stdout, "Snapshot time: %s\n", snapshotTime.Format("2006-01-02 15:04:05 MST"))
fmt.Printf("Snapshot time: %s\n", snapshotTime.Format("2006-01-02 15:04:05 MST"))
}
fmt.Println()
}
@@ -635,18 +635,18 @@ func (v *Vaultik) VerifySnapshotWithOptions(snapshotID string, opts *VerifyOptio
result.TotalSize = manifest.TotalCompressedSize
if !opts.JSON {
_, _ = fmt.Fprintf(v.Stdout, "Snapshot information:\n")
_, _ = fmt.Fprintf(v.Stdout, " Blob count: %d\n", manifest.BlobCount)
_, _ = fmt.Fprintf(v.Stdout, " Total size: %s\n", humanize.Bytes(uint64(manifest.TotalCompressedSize)))
fmt.Printf("Snapshot information:\n")
fmt.Printf(" Blob count: %d\n", manifest.BlobCount)
fmt.Printf(" Total size: %s\n", humanize.Bytes(uint64(manifest.TotalCompressedSize)))
if manifest.Timestamp != "" {
if t, err := time.Parse(time.RFC3339, manifest.Timestamp); err == nil {
_, _ = fmt.Fprintf(v.Stdout, " Created: %s\n", t.Format("2006-01-02 15:04:05 MST"))
fmt.Printf(" Created: %s\n", t.Format("2006-01-02 15:04:05 MST"))
}
}
_, _ = fmt.Fprintln(v.Stdout)
fmt.Println()
// Check each blob exists
_, _ = fmt.Fprintf(v.Stdout, "Checking blob existence...\n")
fmt.Printf("Checking blob existence...\n")
}
missing := 0
@@ -660,7 +660,7 @@ func (v *Vaultik) VerifySnapshotWithOptions(snapshotID string, opts *VerifyOptio
_, err := v.Storage.Stat(v.ctx, blobPath)
if err != nil {
if !opts.JSON {
_, _ = fmt.Fprintf(v.Stdout, " Missing: %s (%s)\n", blob.Hash, humanize.Bytes(uint64(blob.CompressedSize)))
fmt.Printf(" Missing: %s (%s)\n", blob.Hash, humanize.Bytes(uint64(blob.CompressedSize)))
}
missing++
missingSize += blob.CompressedSize
@@ -683,20 +683,20 @@ func (v *Vaultik) VerifySnapshotWithOptions(snapshotID string, opts *VerifyOptio
return v.outputVerifyJSON(result)
}
_, _ = fmt.Fprintf(v.Stdout, "\nVerification complete:\n")
_, _ = fmt.Fprintf(v.Stdout, " Verified: %d blobs (%s)\n", verified,
fmt.Printf("\nVerification complete:\n")
fmt.Printf(" Verified: %d blobs (%s)\n", verified,
humanize.Bytes(uint64(manifest.TotalCompressedSize-missingSize)))
if missing > 0 {
_, _ = fmt.Fprintf(v.Stdout, " Missing: %d blobs (%s)\n", missing, humanize.Bytes(uint64(missingSize)))
fmt.Printf(" Missing: %d blobs (%s)\n", missing, humanize.Bytes(uint64(missingSize)))
} else {
_, _ = fmt.Fprintf(v.Stdout, " Missing: 0 blobs\n")
fmt.Printf(" Missing: 0 blobs\n")
}
_, _ = fmt.Fprintf(v.Stdout, " Status: ")
fmt.Printf(" Status: ")
if missing > 0 {
_, _ = fmt.Fprintf(v.Stdout, "FAILED - %d blobs are missing\n", missing)
fmt.Printf("FAILED - %d blobs are missing\n", missing)
return fmt.Errorf("%d blobs are missing", missing)
} else {
_, _ = fmt.Fprintf(v.Stdout, "OK - All blobs verified\n")
fmt.Printf("OK - All blobs verified\n")
}
return nil
@@ -704,7 +704,7 @@ func (v *Vaultik) VerifySnapshotWithOptions(snapshotID string, opts *VerifyOptio
// outputVerifyJSON outputs the verification result as JSON
func (v *Vaultik) outputVerifyJSON(result *VerifyResult) error {
encoder := json.NewEncoder(v.Stdout)
encoder := json.NewEncoder(os.Stdout)
encoder.SetIndent("", " ")
if err := encoder.Encode(result); err != nil {
return fmt.Errorf("encoding JSON: %w", err)
@@ -1043,7 +1043,7 @@ func (v *Vaultik) deleteSnapshotFromRemote(snapshotID string) error {
// outputRemoveJSON outputs the removal result as JSON
func (v *Vaultik) outputRemoveJSON(result *RemoveResult) error {
encoder := json.NewEncoder(v.Stdout)
encoder := json.NewEncoder(os.Stdout)
encoder.SetIndent("", " ")
return encoder.Encode(result)
}