vaultik/internal/vaultik/info.go
clawbot 197e3c0641 fix: use v.Stdout/v.Stdin instead of os.Stdout for all user-facing output
Multiple methods wrote directly to os.Stdout instead of using the injectable
v.Stdout writer, breaking the TestVaultik testing infrastructure and making
output impossible to capture or redirect.

Fixed in: ListSnapshots, PurgeSnapshots, VerifySnapshotWithOptions,
PruneBlobs, outputPruneBlobsJSON, outputRemoveJSON, ShowInfo, RemoteInfo.
2026-02-20 00:17:31 -08:00

349 lines
11 KiB
Go

package vaultik
import (
"encoding/json"
"fmt"
"runtime"
"sort"
"strings"
"git.eeqj.de/sneak/vaultik/internal/log"
"git.eeqj.de/sneak/vaultik/internal/snapshot"
"github.com/dustin/go-humanize"
)
// ShowInfo displays system and configuration information
func (v *Vaultik) ShowInfo() error {
// System Information
_, _ = fmt.Fprintf(v.Stdout, "=== System Information ===\n")
_, _ = fmt.Fprintf(v.Stdout, "OS/Architecture: %s/%s\n", runtime.GOOS, runtime.GOARCH)
_, _ = fmt.Fprintf(v.Stdout, "Version: %s\n", v.Globals.Version)
_, _ = fmt.Fprintf(v.Stdout, "Commit: %s\n", v.Globals.Commit)
_, _ = fmt.Fprintf(v.Stdout, "Go Version: %s\n", runtime.Version())
_, _ = fmt.Fprintln(v.Stdout, )
// Storage Configuration
_, _ = fmt.Fprintf(v.Stdout, "=== Storage Configuration ===\n")
_, _ = fmt.Fprintf(v.Stdout, "S3 Bucket: %s\n", v.Config.S3.Bucket)
if v.Config.S3.Prefix != "" {
_, _ = fmt.Fprintf(v.Stdout, "S3 Prefix: %s\n", v.Config.S3.Prefix)
}
_, _ = fmt.Fprintf(v.Stdout, "S3 Endpoint: %s\n", v.Config.S3.Endpoint)
_, _ = fmt.Fprintf(v.Stdout, "S3 Region: %s\n", v.Config.S3.Region)
_, _ = fmt.Fprintln(v.Stdout, )
// Backup Settings
_, _ = fmt.Fprintf(v.Stdout, "=== Backup Settings ===\n")
// Show configured snapshots
_, _ = fmt.Fprintf(v.Stdout, "Snapshots:\n")
for _, name := range v.Config.SnapshotNames() {
snap := v.Config.Snapshots[name]
_, _ = fmt.Fprintf(v.Stdout, " %s:\n", name)
for _, path := range snap.Paths {
_, _ = fmt.Fprintf(v.Stdout, " - %s\n", path)
}
if len(snap.Exclude) > 0 {
_, _ = fmt.Fprintf(v.Stdout, " exclude: %s\n", strings.Join(snap.Exclude, ", "))
}
}
// Global exclude patterns
if len(v.Config.Exclude) > 0 {
_, _ = fmt.Fprintf(v.Stdout, "Global Exclude: %s\n", strings.Join(v.Config.Exclude, ", "))
}
_, _ = fmt.Fprintf(v.Stdout, "Compression: zstd level %d\n", v.Config.CompressionLevel)
_, _ = fmt.Fprintf(v.Stdout, "Chunk Size: %s\n", humanize.Bytes(uint64(v.Config.ChunkSize)))
_, _ = fmt.Fprintf(v.Stdout, "Blob Size Limit: %s\n", humanize.Bytes(uint64(v.Config.BlobSizeLimit)))
_, _ = fmt.Fprintln(v.Stdout, )
// Encryption Configuration
_, _ = fmt.Fprintf(v.Stdout, "=== Encryption Configuration ===\n")
_, _ = fmt.Fprintf(v.Stdout, "Recipients:\n")
for _, recipient := range v.Config.AgeRecipients {
_, _ = fmt.Fprintf(v.Stdout, " - %s\n", recipient)
}
_, _ = fmt.Fprintln(v.Stdout, )
// Daemon Settings (if applicable)
if v.Config.BackupInterval > 0 || v.Config.MinTimeBetweenRun > 0 {
_, _ = fmt.Fprintf(v.Stdout, "=== Daemon Settings ===\n")
if v.Config.BackupInterval > 0 {
_, _ = fmt.Fprintf(v.Stdout, "Backup Interval: %s\n", v.Config.BackupInterval)
}
if v.Config.MinTimeBetweenRun > 0 {
_, _ = fmt.Fprintf(v.Stdout, "Minimum Time: %s\n", v.Config.MinTimeBetweenRun)
}
_, _ = fmt.Fprintln(v.Stdout, )
}
// Local Database
_, _ = fmt.Fprintf(v.Stdout, "=== Local Database ===\n")
_, _ = fmt.Fprintf(v.Stdout, "Index Path: %s\n", v.Config.IndexPath)
// Check if index file exists and get its size
if info, err := v.Fs.Stat(v.Config.IndexPath); err == nil {
_, _ = fmt.Fprintf(v.Stdout, "Index Size: %s\n", humanize.Bytes(uint64(info.Size())))
// Get snapshot count from database
query := `SELECT COUNT(*) FROM snapshots WHERE completed_at IS NOT NULL`
var snapshotCount int
if err := v.DB.Conn().QueryRowContext(v.ctx, query).Scan(&snapshotCount); err == nil {
_, _ = fmt.Fprintf(v.Stdout, "Snapshots: %d\n", snapshotCount)
}
// Get blob count from database
query = `SELECT COUNT(*) FROM blobs`
var blobCount int
if err := v.DB.Conn().QueryRowContext(v.ctx, query).Scan(&blobCount); err == nil {
_, _ = fmt.Fprintf(v.Stdout, "Blobs: %d\n", blobCount)
}
// Get file count from database
query = `SELECT COUNT(*) FROM files`
var fileCount int
if err := v.DB.Conn().QueryRowContext(v.ctx, query).Scan(&fileCount); err == nil {
_, _ = fmt.Fprintf(v.Stdout, "Files: %d\n", fileCount)
}
} else {
_, _ = fmt.Fprintf(v.Stdout, "Index Size: (not created)\n")
}
return nil
}
// SnapshotMetadataInfo contains information about a single snapshot's metadata
type SnapshotMetadataInfo struct {
SnapshotID string `json:"snapshot_id"`
ManifestSize int64 `json:"manifest_size"`
DatabaseSize int64 `json:"database_size"`
TotalSize int64 `json:"total_size"`
BlobCount int `json:"blob_count"`
BlobsSize int64 `json:"blobs_size"`
}
// RemoteInfoResult contains all remote storage information
type RemoteInfoResult struct {
// Storage info
StorageType string `json:"storage_type"`
StorageLocation string `json:"storage_location"`
// Snapshot metadata
Snapshots []SnapshotMetadataInfo `json:"snapshots"`
TotalMetadataSize int64 `json:"total_metadata_size"`
TotalMetadataCount int `json:"total_metadata_count"`
// All blobs on remote
TotalBlobCount int `json:"total_blob_count"`
TotalBlobSize int64 `json:"total_blob_size"`
// Referenced blobs (from manifests)
ReferencedBlobCount int `json:"referenced_blob_count"`
ReferencedBlobSize int64 `json:"referenced_blob_size"`
// Orphaned blobs
OrphanedBlobCount int `json:"orphaned_blob_count"`
OrphanedBlobSize int64 `json:"orphaned_blob_size"`
}
// RemoteInfo displays information about remote storage
func (v *Vaultik) RemoteInfo(jsonOutput bool) error {
result := &RemoteInfoResult{}
// Get storage info
storageInfo := v.Storage.Info()
result.StorageType = storageInfo.Type
result.StorageLocation = storageInfo.Location
if !jsonOutput {
_, _ = fmt.Fprintf(v.Stdout, "=== Remote Storage ===\n")
_, _ = fmt.Fprintf(v.Stdout, "Type: %s\n", storageInfo.Type)
_, _ = fmt.Fprintf(v.Stdout, "Location: %s\n", storageInfo.Location)
_, _ = fmt.Fprintln(v.Stdout, )
}
// List all snapshot metadata
if !jsonOutput {
_, _ = fmt.Fprintf(v.Stdout, "Scanning snapshot metadata...\n")
}
snapshotMetadata := make(map[string]*SnapshotMetadataInfo)
// Collect metadata files
metadataCh := v.Storage.ListStream(v.ctx, "metadata/")
for obj := range metadataCh {
if obj.Err != nil {
return fmt.Errorf("listing metadata: %w", obj.Err)
}
// Parse key: metadata/<snapshot-id>/<filename>
parts := strings.Split(obj.Key, "/")
if len(parts) < 3 {
continue
}
snapshotID := parts[1]
if _, exists := snapshotMetadata[snapshotID]; !exists {
snapshotMetadata[snapshotID] = &SnapshotMetadataInfo{
SnapshotID: snapshotID,
}
}
info := snapshotMetadata[snapshotID]
filename := parts[2]
if strings.HasPrefix(filename, "manifest") {
info.ManifestSize = obj.Size
} else if strings.HasPrefix(filename, "db") {
info.DatabaseSize = obj.Size
}
info.TotalSize = info.ManifestSize + info.DatabaseSize
}
// Sort snapshots by ID for consistent output
var snapshotIDs []string
for id := range snapshotMetadata {
snapshotIDs = append(snapshotIDs, id)
}
sort.Strings(snapshotIDs)
// Download and parse all manifests to get referenced blobs
if !jsonOutput {
_, _ = fmt.Fprintf(v.Stdout, "Downloading %d manifest(s)...\n", len(snapshotIDs))
}
referencedBlobs := make(map[string]int64) // hash -> compressed size
for _, snapshotID := range snapshotIDs {
manifestKey := fmt.Sprintf("metadata/%s/manifest.json.zst", snapshotID)
reader, err := v.Storage.Get(v.ctx, manifestKey)
if err != nil {
log.Warn("Failed to get manifest", "snapshot", snapshotID, "error", err)
continue
}
manifest, err := snapshot.DecodeManifest(reader)
_ = reader.Close()
if err != nil {
log.Warn("Failed to decode manifest", "snapshot", snapshotID, "error", err)
continue
}
// Record blob info from manifest
info := snapshotMetadata[snapshotID]
info.BlobCount = manifest.BlobCount
var blobsSize int64
for _, blob := range manifest.Blobs {
referencedBlobs[blob.Hash] = blob.CompressedSize
blobsSize += blob.CompressedSize
}
info.BlobsSize = blobsSize
}
// Build result snapshots
var totalMetadataSize int64
for _, id := range snapshotIDs {
info := snapshotMetadata[id]
result.Snapshots = append(result.Snapshots, *info)
totalMetadataSize += info.TotalSize
}
result.TotalMetadataSize = totalMetadataSize
result.TotalMetadataCount = len(snapshotIDs)
// Calculate referenced blob stats
for _, size := range referencedBlobs {
result.ReferencedBlobCount++
result.ReferencedBlobSize += size
}
// List all blobs on remote
if !jsonOutput {
_, _ = fmt.Fprintf(v.Stdout, "Scanning blobs...\n")
}
allBlobs := make(map[string]int64) // hash -> size from storage
blobCh := v.Storage.ListStream(v.ctx, "blobs/")
for obj := range blobCh {
if obj.Err != nil {
return fmt.Errorf("listing blobs: %w", obj.Err)
}
// Extract hash from key: blobs/xx/yy/hash
parts := strings.Split(obj.Key, "/")
if len(parts) < 4 {
continue
}
hash := parts[3]
allBlobs[hash] = obj.Size
result.TotalBlobCount++
result.TotalBlobSize += obj.Size
}
// Calculate orphaned blobs
for hash, size := range allBlobs {
if _, referenced := referencedBlobs[hash]; !referenced {
result.OrphanedBlobCount++
result.OrphanedBlobSize += size
}
}
// Output results
if jsonOutput {
enc := json.NewEncoder(v.Stdout)
enc.SetIndent("", " ")
return enc.Encode(result)
}
// Human-readable output
_, _ = fmt.Fprintf(v.Stdout, "\n=== Snapshot Metadata ===\n")
if len(result.Snapshots) == 0 {
_, _ = fmt.Fprintf(v.Stdout, "No snapshots found\n")
} else {
_, _ = fmt.Fprintf(v.Stdout, "%-45s %12s %12s %12s %10s %12s\n", "SNAPSHOT", "MANIFEST", "DATABASE", "TOTAL", "BLOBS", "BLOB SIZE")
_, _ = fmt.Fprintf(v.Stdout, "%-45s %12s %12s %12s %10s %12s\n", strings.Repeat("-", 45), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 10), strings.Repeat("-", 12))
for _, info := range result.Snapshots {
_, _ = fmt.Fprintf(v.Stdout, "%-45s %12s %12s %12s %10s %12s\n",
truncateString(info.SnapshotID, 45),
humanize.Bytes(uint64(info.ManifestSize)),
humanize.Bytes(uint64(info.DatabaseSize)),
humanize.Bytes(uint64(info.TotalSize)),
humanize.Comma(int64(info.BlobCount)),
humanize.Bytes(uint64(info.BlobsSize)),
)
}
_, _ = fmt.Fprintf(v.Stdout, "%-45s %12s %12s %12s %10s %12s\n", strings.Repeat("-", 45), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 12), strings.Repeat("-", 10), strings.Repeat("-", 12))
_, _ = fmt.Fprintf(v.Stdout, "%-45s %12s %12s %12s\n", fmt.Sprintf("Total (%d snapshots)", result.TotalMetadataCount), "", "", humanize.Bytes(uint64(result.TotalMetadataSize)))
}
_, _ = fmt.Fprintf(v.Stdout, "\n=== Blob Storage ===\n")
_, _ = fmt.Fprintf(v.Stdout, "Total blobs on remote: %s (%s)\n",
humanize.Comma(int64(result.TotalBlobCount)),
humanize.Bytes(uint64(result.TotalBlobSize)))
_, _ = fmt.Fprintf(v.Stdout, "Referenced by snapshots: %s (%s)\n",
humanize.Comma(int64(result.ReferencedBlobCount)),
humanize.Bytes(uint64(result.ReferencedBlobSize)))
_, _ = fmt.Fprintf(v.Stdout, "Orphaned (unreferenced): %s (%s)\n",
humanize.Comma(int64(result.OrphanedBlobCount)),
humanize.Bytes(uint64(result.OrphanedBlobSize)))
if result.OrphanedBlobCount > 0 {
_, _ = fmt.Fprintf(v.Stdout, "\nRun 'vaultik prune --remote' to remove orphaned blobs.\n")
}
return nil
}
// truncateString truncates a string to maxLen, adding "..." if truncated
func truncateString(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
if maxLen <= 3 {
return s[:maxLen]
}
return s[:maxLen-3] + "..."
}