- Create comprehensive integration tests with mock S3 client - Add in-memory filesystem and SQLite database support for testing - Test full backup workflow including chunking, packing, and uploading - Add test to verify encrypted blob content - Fix scanner to use afero filesystem for temp file cleanup - Demonstrate successful backup and verification with mock dependencies
702 lines
22 KiB
Go
702 lines
22 KiB
Go
package vaultik
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"sort"
|
|
"strings"
|
|
"text/tabwriter"
|
|
"time"
|
|
|
|
"git.eeqj.de/sneak/vaultik/internal/database"
|
|
"git.eeqj.de/sneak/vaultik/internal/log"
|
|
"git.eeqj.de/sneak/vaultik/internal/snapshot"
|
|
"github.com/dustin/go-humanize"
|
|
)
|
|
|
|
// SnapshotCreateOptions contains options for the snapshot create command
|
|
type SnapshotCreateOptions struct {
|
|
Daemon bool
|
|
Cron bool
|
|
Prune bool
|
|
}
|
|
|
|
// CreateSnapshot executes the snapshot creation operation
|
|
func (v *Vaultik) CreateSnapshot(opts *SnapshotCreateOptions) error {
|
|
snapshotStartTime := time.Now()
|
|
|
|
log.Info("Starting snapshot creation",
|
|
"version", v.Globals.Version,
|
|
"commit", v.Globals.Commit,
|
|
"index_path", v.Config.IndexPath,
|
|
)
|
|
|
|
// Clean up incomplete snapshots FIRST, before any scanning
|
|
// This is critical for data safety - see CleanupIncompleteSnapshots for details
|
|
hostname := v.Config.Hostname
|
|
if hostname == "" {
|
|
hostname, _ = os.Hostname()
|
|
}
|
|
|
|
// CRITICAL: This MUST succeed. If we fail to clean up incomplete snapshots,
|
|
// the deduplication logic will think files from the incomplete snapshot were
|
|
// already backed up and skip them, resulting in data loss.
|
|
if err := v.SnapshotManager.CleanupIncompleteSnapshots(v.ctx, hostname); err != nil {
|
|
return fmt.Errorf("cleanup incomplete snapshots: %w", err)
|
|
}
|
|
|
|
if opts.Daemon {
|
|
log.Info("Running in daemon mode")
|
|
// TODO: Implement daemon mode with inotify
|
|
return fmt.Errorf("daemon mode not yet implemented")
|
|
}
|
|
|
|
// Resolve source directories to absolute paths
|
|
resolvedDirs := make([]string, 0, len(v.Config.SourceDirs))
|
|
for _, dir := range v.Config.SourceDirs {
|
|
absPath, err := filepath.Abs(dir)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to resolve absolute path for %s: %w", dir, err)
|
|
}
|
|
|
|
// Resolve symlinks
|
|
resolvedPath, err := filepath.EvalSymlinks(absPath)
|
|
if err != nil {
|
|
// If the path doesn't exist yet, use the absolute path
|
|
if os.IsNotExist(err) {
|
|
resolvedPath = absPath
|
|
} else {
|
|
return fmt.Errorf("failed to resolve symlinks for %s: %w", absPath, err)
|
|
}
|
|
}
|
|
|
|
resolvedDirs = append(resolvedDirs, resolvedPath)
|
|
}
|
|
|
|
// Create scanner with progress enabled (unless in cron mode)
|
|
scanner := v.ScannerFactory(snapshot.ScannerParams{
|
|
EnableProgress: !opts.Cron,
|
|
Fs: v.Fs,
|
|
})
|
|
|
|
// Statistics tracking
|
|
totalFiles := 0
|
|
totalBytes := int64(0)
|
|
totalChunks := 0
|
|
totalBlobs := 0
|
|
totalBytesSkipped := int64(0)
|
|
totalFilesSkipped := 0
|
|
totalFilesDeleted := 0
|
|
totalBytesDeleted := int64(0)
|
|
totalBytesUploaded := int64(0)
|
|
totalBlobsUploaded := 0
|
|
uploadDuration := time.Duration(0)
|
|
|
|
// Create a new snapshot at the beginning
|
|
snapshotID, err := v.SnapshotManager.CreateSnapshot(v.ctx, hostname, v.Globals.Version, v.Globals.Commit)
|
|
if err != nil {
|
|
return fmt.Errorf("creating snapshot: %w", err)
|
|
}
|
|
log.Info("Beginning snapshot", "snapshot_id", snapshotID)
|
|
_, _ = fmt.Fprintf(v.Stdout, "Beginning snapshot: %s\n", snapshotID)
|
|
|
|
for i, dir := range resolvedDirs {
|
|
// Check if context is cancelled
|
|
select {
|
|
case <-v.ctx.Done():
|
|
log.Info("Snapshot creation cancelled")
|
|
return v.ctx.Err()
|
|
default:
|
|
}
|
|
|
|
log.Info("Scanning directory", "path", dir)
|
|
_, _ = fmt.Fprintf(v.Stdout, "Beginning directory scan (%d/%d): %s\n", i+1, len(resolvedDirs), dir)
|
|
result, err := scanner.Scan(v.ctx, dir, snapshotID)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to scan %s: %w", dir, err)
|
|
}
|
|
|
|
totalFiles += result.FilesScanned
|
|
totalBytes += result.BytesScanned
|
|
totalChunks += result.ChunksCreated
|
|
totalBlobs += result.BlobsCreated
|
|
totalFilesSkipped += result.FilesSkipped
|
|
totalBytesSkipped += result.BytesSkipped
|
|
totalFilesDeleted += result.FilesDeleted
|
|
totalBytesDeleted += result.BytesDeleted
|
|
|
|
log.Info("Directory scan complete",
|
|
"path", dir,
|
|
"files", result.FilesScanned,
|
|
"files_skipped", result.FilesSkipped,
|
|
"bytes", result.BytesScanned,
|
|
"bytes_skipped", result.BytesSkipped,
|
|
"chunks", result.ChunksCreated,
|
|
"blobs", result.BlobsCreated,
|
|
"duration", result.EndTime.Sub(result.StartTime))
|
|
|
|
// Remove per-directory summary - the scanner already prints its own summary
|
|
}
|
|
|
|
// Get upload statistics from scanner progress if available
|
|
if s := scanner.GetProgress(); s != nil {
|
|
stats := s.GetStats()
|
|
totalBytesUploaded = stats.BytesUploaded.Load()
|
|
totalBlobsUploaded = int(stats.BlobsUploaded.Load())
|
|
uploadDuration = time.Duration(stats.UploadDurationMs.Load()) * time.Millisecond
|
|
}
|
|
|
|
// Update snapshot statistics with extended fields
|
|
extStats := snapshot.ExtendedBackupStats{
|
|
BackupStats: snapshot.BackupStats{
|
|
FilesScanned: totalFiles,
|
|
BytesScanned: totalBytes,
|
|
ChunksCreated: totalChunks,
|
|
BlobsCreated: totalBlobs,
|
|
BytesUploaded: totalBytesUploaded,
|
|
},
|
|
BlobUncompressedSize: 0, // Will be set from database query below
|
|
CompressionLevel: v.Config.CompressionLevel,
|
|
UploadDurationMs: uploadDuration.Milliseconds(),
|
|
}
|
|
|
|
if err := v.SnapshotManager.UpdateSnapshotStatsExtended(v.ctx, snapshotID, extStats); err != nil {
|
|
return fmt.Errorf("updating snapshot stats: %w", err)
|
|
}
|
|
|
|
// Mark snapshot as complete
|
|
if err := v.SnapshotManager.CompleteSnapshot(v.ctx, snapshotID); err != nil {
|
|
return fmt.Errorf("completing snapshot: %w", err)
|
|
}
|
|
|
|
// Export snapshot metadata
|
|
// Export snapshot metadata without closing the database
|
|
// The export function should handle its own database connection
|
|
if err := v.SnapshotManager.ExportSnapshotMetadata(v.ctx, v.Config.IndexPath, snapshotID); err != nil {
|
|
return fmt.Errorf("exporting snapshot metadata: %w", err)
|
|
}
|
|
|
|
// Calculate final statistics
|
|
snapshotDuration := time.Since(snapshotStartTime)
|
|
totalFilesChanged := totalFiles - totalFilesSkipped
|
|
totalBytesChanged := totalBytes
|
|
totalBytesAll := totalBytes + totalBytesSkipped
|
|
|
|
// Calculate upload speed
|
|
var avgUploadSpeed string
|
|
if totalBytesUploaded > 0 && uploadDuration > 0 {
|
|
bytesPerSec := float64(totalBytesUploaded) / uploadDuration.Seconds()
|
|
bitsPerSec := bytesPerSec * 8
|
|
if bitsPerSec >= 1e9 {
|
|
avgUploadSpeed = fmt.Sprintf("%.1f Gbit/s", bitsPerSec/1e9)
|
|
} else if bitsPerSec >= 1e6 {
|
|
avgUploadSpeed = fmt.Sprintf("%.0f Mbit/s", bitsPerSec/1e6)
|
|
} else if bitsPerSec >= 1e3 {
|
|
avgUploadSpeed = fmt.Sprintf("%.0f Kbit/s", bitsPerSec/1e3)
|
|
} else {
|
|
avgUploadSpeed = fmt.Sprintf("%.0f bit/s", bitsPerSec)
|
|
}
|
|
} else {
|
|
avgUploadSpeed = "N/A"
|
|
}
|
|
|
|
// Get total blob sizes from database
|
|
totalBlobSizeCompressed := int64(0)
|
|
totalBlobSizeUncompressed := int64(0)
|
|
if blobHashes, err := v.Repositories.Snapshots.GetBlobHashes(v.ctx, snapshotID); err == nil {
|
|
for _, hash := range blobHashes {
|
|
if blob, err := v.Repositories.Blobs.GetByHash(v.ctx, hash); err == nil && blob != nil {
|
|
totalBlobSizeCompressed += blob.CompressedSize
|
|
totalBlobSizeUncompressed += blob.UncompressedSize
|
|
}
|
|
}
|
|
}
|
|
|
|
// Calculate compression ratio
|
|
var compressionRatio float64
|
|
if totalBlobSizeUncompressed > 0 {
|
|
compressionRatio = float64(totalBlobSizeCompressed) / float64(totalBlobSizeUncompressed)
|
|
} else {
|
|
compressionRatio = 1.0
|
|
}
|
|
|
|
// Print comprehensive summary
|
|
_, _ = fmt.Fprintf(v.Stdout, "=== Snapshot Complete ===\n")
|
|
_, _ = fmt.Fprintf(v.Stdout, "ID: %s\n", snapshotID)
|
|
_, _ = fmt.Fprintf(v.Stdout, "Files: %s examined, %s to process, %s unchanged",
|
|
formatNumber(totalFiles),
|
|
formatNumber(totalFilesChanged),
|
|
formatNumber(totalFilesSkipped))
|
|
if totalFilesDeleted > 0 {
|
|
_, _ = fmt.Fprintf(v.Stdout, ", %s deleted", formatNumber(totalFilesDeleted))
|
|
}
|
|
_, _ = fmt.Fprintln(v.Stdout)
|
|
_, _ = fmt.Fprintf(v.Stdout, "Data: %s total (%s to process)",
|
|
humanize.Bytes(uint64(totalBytesAll)),
|
|
humanize.Bytes(uint64(totalBytesChanged)))
|
|
if totalBytesDeleted > 0 {
|
|
_, _ = fmt.Fprintf(v.Stdout, ", %s deleted", humanize.Bytes(uint64(totalBytesDeleted)))
|
|
}
|
|
_, _ = fmt.Fprintln(v.Stdout)
|
|
if totalBlobsUploaded > 0 {
|
|
_, _ = fmt.Fprintf(v.Stdout, "Storage: %s compressed from %s (%.2fx)\n",
|
|
humanize.Bytes(uint64(totalBlobSizeCompressed)),
|
|
humanize.Bytes(uint64(totalBlobSizeUncompressed)),
|
|
compressionRatio)
|
|
_, _ = fmt.Fprintf(v.Stdout, "Upload: %d blobs, %s in %s (%s)\n",
|
|
totalBlobsUploaded,
|
|
humanize.Bytes(uint64(totalBytesUploaded)),
|
|
formatDuration(uploadDuration),
|
|
avgUploadSpeed)
|
|
}
|
|
_, _ = fmt.Fprintf(v.Stdout, "Duration: %s\n", formatDuration(snapshotDuration))
|
|
|
|
if opts.Prune {
|
|
log.Info("Pruning enabled - will delete old snapshots after snapshot")
|
|
// TODO: Implement pruning
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// ListSnapshots lists all snapshots
|
|
func (v *Vaultik) ListSnapshots(jsonOutput bool) error {
|
|
// Get all remote snapshots
|
|
remoteSnapshots := make(map[string]bool)
|
|
objectCh := v.S3Client.ListObjectsStream(v.ctx, "metadata/", false)
|
|
|
|
for object := range objectCh {
|
|
if object.Err != nil {
|
|
return fmt.Errorf("listing remote snapshots: %w", object.Err)
|
|
}
|
|
|
|
// Extract snapshot ID from paths like metadata/hostname-20240115-143052Z/
|
|
parts := strings.Split(object.Key, "/")
|
|
if len(parts) >= 2 && parts[0] == "metadata" && parts[1] != "" {
|
|
remoteSnapshots[parts[1]] = true
|
|
}
|
|
}
|
|
|
|
// Get all local snapshots
|
|
localSnapshots, err := v.Repositories.Snapshots.ListRecent(v.ctx, 10000)
|
|
if err != nil {
|
|
return fmt.Errorf("listing local snapshots: %w", err)
|
|
}
|
|
|
|
// Build a map of local snapshots for quick lookup
|
|
localSnapshotMap := make(map[string]*database.Snapshot)
|
|
for _, s := range localSnapshots {
|
|
localSnapshotMap[s.ID] = s
|
|
}
|
|
|
|
// Remove local snapshots that don't exist remotely
|
|
for _, snapshot := range localSnapshots {
|
|
if !remoteSnapshots[snapshot.ID] {
|
|
log.Info("Removing local snapshot not found in remote", "snapshot_id", snapshot.ID)
|
|
|
|
// Delete related records first to avoid foreign key constraints
|
|
if err := v.Repositories.Snapshots.DeleteSnapshotFiles(v.ctx, snapshot.ID); err != nil {
|
|
log.Error("Failed to delete snapshot files", "snapshot_id", snapshot.ID, "error", err)
|
|
}
|
|
if err := v.Repositories.Snapshots.DeleteSnapshotBlobs(v.ctx, snapshot.ID); err != nil {
|
|
log.Error("Failed to delete snapshot blobs", "snapshot_id", snapshot.ID, "error", err)
|
|
}
|
|
if err := v.Repositories.Snapshots.DeleteSnapshotUploads(v.ctx, snapshot.ID); err != nil {
|
|
log.Error("Failed to delete snapshot uploads", "snapshot_id", snapshot.ID, "error", err)
|
|
}
|
|
|
|
// Now delete the snapshot itself
|
|
if err := v.Repositories.Snapshots.Delete(v.ctx, snapshot.ID); err != nil {
|
|
log.Error("Failed to delete local snapshot", "snapshot_id", snapshot.ID, "error", err)
|
|
} else {
|
|
log.Info("Deleted local snapshot not found in remote", "snapshot_id", snapshot.ID)
|
|
delete(localSnapshotMap, snapshot.ID)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Build final snapshot list
|
|
snapshots := make([]SnapshotInfo, 0, len(remoteSnapshots))
|
|
|
|
for snapshotID := range remoteSnapshots {
|
|
// Check if we have this snapshot locally
|
|
if localSnap, exists := localSnapshotMap[snapshotID]; exists && localSnap.CompletedAt != nil {
|
|
// Get total compressed size of all blobs referenced by this snapshot
|
|
totalSize, err := v.Repositories.Snapshots.GetSnapshotTotalCompressedSize(v.ctx, snapshotID)
|
|
if err != nil {
|
|
log.Warn("Failed to get total compressed size", "id", snapshotID, "error", err)
|
|
// Fall back to stored blob size
|
|
totalSize = localSnap.BlobSize
|
|
}
|
|
|
|
snapshots = append(snapshots, SnapshotInfo{
|
|
ID: localSnap.ID,
|
|
Timestamp: localSnap.StartedAt,
|
|
CompressedSize: totalSize,
|
|
})
|
|
} else {
|
|
// Remote snapshot not in local DB - fetch manifest to get size
|
|
timestamp, err := parseSnapshotTimestamp(snapshotID)
|
|
if err != nil {
|
|
log.Warn("Failed to parse snapshot timestamp", "id", snapshotID, "error", err)
|
|
continue
|
|
}
|
|
|
|
// Try to download manifest to get size
|
|
totalSize, err := v.getManifestSize(snapshotID)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to get manifest size for %s: %w", snapshotID, err)
|
|
}
|
|
|
|
snapshots = append(snapshots, SnapshotInfo{
|
|
ID: snapshotID,
|
|
Timestamp: timestamp,
|
|
CompressedSize: totalSize,
|
|
})
|
|
}
|
|
}
|
|
|
|
// Sort by timestamp (newest first)
|
|
sort.Slice(snapshots, func(i, j int) bool {
|
|
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
|
})
|
|
|
|
if jsonOutput {
|
|
// JSON output
|
|
encoder := json.NewEncoder(os.Stdout)
|
|
encoder.SetIndent("", " ")
|
|
return encoder.Encode(snapshots)
|
|
}
|
|
|
|
// Table output
|
|
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
|
|
if _, err := fmt.Fprintln(w, "SNAPSHOT ID\tTIMESTAMP\tCOMPRESSED SIZE"); err != nil {
|
|
return err
|
|
}
|
|
if _, err := fmt.Fprintln(w, "───────────\t─────────\t───────────────"); err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, snap := range snapshots {
|
|
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n",
|
|
snap.ID,
|
|
snap.Timestamp.Format("2006-01-02 15:04:05"),
|
|
formatBytes(snap.CompressedSize)); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return w.Flush()
|
|
}
|
|
|
|
// PurgeSnapshots removes old snapshots based on criteria
|
|
func (v *Vaultik) PurgeSnapshots(keepLatest bool, olderThan string, force bool) error {
|
|
// Sync with remote first
|
|
if err := v.syncWithRemote(); err != nil {
|
|
return fmt.Errorf("syncing with remote: %w", err)
|
|
}
|
|
|
|
// Get snapshots from local database
|
|
dbSnapshots, err := v.Repositories.Snapshots.ListRecent(v.ctx, 10000)
|
|
if err != nil {
|
|
return fmt.Errorf("listing snapshots: %w", err)
|
|
}
|
|
|
|
// Convert to SnapshotInfo format, only including completed snapshots
|
|
snapshots := make([]SnapshotInfo, 0, len(dbSnapshots))
|
|
for _, s := range dbSnapshots {
|
|
if s.CompletedAt != nil {
|
|
snapshots = append(snapshots, SnapshotInfo{
|
|
ID: s.ID,
|
|
Timestamp: s.StartedAt,
|
|
CompressedSize: s.BlobSize,
|
|
})
|
|
}
|
|
}
|
|
|
|
// Sort by timestamp (newest first)
|
|
sort.Slice(snapshots, func(i, j int) bool {
|
|
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
|
})
|
|
|
|
var toDelete []SnapshotInfo
|
|
|
|
if keepLatest {
|
|
// Keep only the most recent snapshot
|
|
if len(snapshots) > 1 {
|
|
toDelete = snapshots[1:]
|
|
}
|
|
} else if olderThan != "" {
|
|
// Parse duration
|
|
duration, err := parseDuration(olderThan)
|
|
if err != nil {
|
|
return fmt.Errorf("invalid duration: %w", err)
|
|
}
|
|
|
|
cutoff := time.Now().UTC().Add(-duration)
|
|
for _, snap := range snapshots {
|
|
if snap.Timestamp.Before(cutoff) {
|
|
toDelete = append(toDelete, snap)
|
|
}
|
|
}
|
|
}
|
|
|
|
if len(toDelete) == 0 {
|
|
fmt.Println("No snapshots to delete")
|
|
return nil
|
|
}
|
|
|
|
// Show what will be deleted
|
|
fmt.Printf("The following snapshots will be deleted:\n\n")
|
|
for _, snap := range toDelete {
|
|
fmt.Printf(" %s (%s, %s)\n",
|
|
snap.ID,
|
|
snap.Timestamp.Format("2006-01-02 15:04:05"),
|
|
formatBytes(snap.CompressedSize))
|
|
}
|
|
|
|
// Confirm unless --force is used
|
|
if !force {
|
|
fmt.Printf("\nDelete %d snapshot(s)? [y/N] ", len(toDelete))
|
|
var confirm string
|
|
if _, err := fmt.Scanln(&confirm); err != nil {
|
|
// Treat EOF or error as "no"
|
|
fmt.Println("Cancelled")
|
|
return nil
|
|
}
|
|
if strings.ToLower(confirm) != "y" {
|
|
fmt.Println("Cancelled")
|
|
return nil
|
|
}
|
|
} else {
|
|
fmt.Printf("\nDeleting %d snapshot(s) (--force specified)\n", len(toDelete))
|
|
}
|
|
|
|
// Delete snapshots
|
|
for _, snap := range toDelete {
|
|
log.Info("Deleting snapshot", "id", snap.ID)
|
|
if err := v.deleteSnapshot(snap.ID); err != nil {
|
|
return fmt.Errorf("deleting snapshot %s: %w", snap.ID, err)
|
|
}
|
|
}
|
|
|
|
fmt.Printf("Deleted %d snapshot(s)\n", len(toDelete))
|
|
|
|
// Note: Run 'vaultik prune' separately to clean up unreferenced blobs
|
|
fmt.Println("\nNote: Run 'vaultik prune' to clean up unreferenced blobs.")
|
|
|
|
return nil
|
|
}
|
|
|
|
// VerifySnapshot checks snapshot integrity
|
|
func (v *Vaultik) VerifySnapshot(snapshotID string, deep bool) error {
|
|
// Parse snapshot ID to extract timestamp
|
|
parts := strings.Split(snapshotID, "-")
|
|
var snapshotTime time.Time
|
|
if len(parts) >= 3 {
|
|
// Format: hostname-YYYYMMDD-HHMMSSZ
|
|
dateStr := parts[len(parts)-2]
|
|
timeStr := parts[len(parts)-1]
|
|
if len(dateStr) == 8 && len(timeStr) == 7 && strings.HasSuffix(timeStr, "Z") {
|
|
timeStr = timeStr[:6] // Remove Z
|
|
timestamp, err := time.Parse("20060102150405", dateStr+timeStr)
|
|
if err == nil {
|
|
snapshotTime = timestamp
|
|
}
|
|
}
|
|
}
|
|
|
|
fmt.Printf("Verifying snapshot %s\n", snapshotID)
|
|
if !snapshotTime.IsZero() {
|
|
fmt.Printf("Snapshot time: %s\n", snapshotTime.Format("2006-01-02 15:04:05 MST"))
|
|
}
|
|
fmt.Println()
|
|
|
|
// Download and parse manifest
|
|
manifest, err := v.downloadManifest(snapshotID)
|
|
if err != nil {
|
|
return fmt.Errorf("downloading manifest: %w", err)
|
|
}
|
|
|
|
fmt.Printf("Snapshot information:\n")
|
|
fmt.Printf(" Blob count: %d\n", manifest.BlobCount)
|
|
fmt.Printf(" Total size: %s\n", humanize.Bytes(uint64(manifest.TotalCompressedSize)))
|
|
if manifest.Timestamp != "" {
|
|
if t, err := time.Parse(time.RFC3339, manifest.Timestamp); err == nil {
|
|
fmt.Printf(" Created: %s\n", t.Format("2006-01-02 15:04:05 MST"))
|
|
}
|
|
}
|
|
fmt.Println()
|
|
|
|
// Check each blob exists
|
|
fmt.Printf("Checking blob existence...\n")
|
|
missing := 0
|
|
verified := 0
|
|
missingSize := int64(0)
|
|
|
|
for _, blob := range manifest.Blobs {
|
|
blobPath := fmt.Sprintf("blobs/%s/%s/%s", blob.Hash[:2], blob.Hash[2:4], blob.Hash)
|
|
|
|
if deep {
|
|
// Download and verify hash
|
|
// TODO: Implement deep verification
|
|
fmt.Printf("Deep verification not yet implemented\n")
|
|
return nil
|
|
} else {
|
|
// Just check existence
|
|
_, err := v.S3Client.StatObject(v.ctx, blobPath)
|
|
if err != nil {
|
|
fmt.Printf(" Missing: %s (%s)\n", blob.Hash, humanize.Bytes(uint64(blob.CompressedSize)))
|
|
missing++
|
|
missingSize += blob.CompressedSize
|
|
} else {
|
|
verified++
|
|
}
|
|
}
|
|
}
|
|
|
|
fmt.Printf("\nVerification complete:\n")
|
|
fmt.Printf(" Verified: %d blobs (%s)\n", verified,
|
|
humanize.Bytes(uint64(manifest.TotalCompressedSize-missingSize)))
|
|
if missing > 0 {
|
|
fmt.Printf(" Missing: %d blobs (%s)\n", missing, humanize.Bytes(uint64(missingSize)))
|
|
} else {
|
|
fmt.Printf(" Missing: 0 blobs\n")
|
|
}
|
|
fmt.Printf(" Status: ")
|
|
if missing > 0 {
|
|
fmt.Printf("FAILED - %d blobs are missing\n", missing)
|
|
return fmt.Errorf("%d blobs are missing", missing)
|
|
} else {
|
|
fmt.Printf("OK - All blobs verified\n")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// Helper methods that were previously on SnapshotApp
|
|
|
|
func (v *Vaultik) getManifestSize(snapshotID string) (int64, error) {
|
|
manifestPath := fmt.Sprintf("metadata/%s/manifest.json.zst", snapshotID)
|
|
|
|
reader, err := v.S3Client.GetObject(v.ctx, manifestPath)
|
|
if err != nil {
|
|
return 0, fmt.Errorf("downloading manifest: %w", err)
|
|
}
|
|
defer func() { _ = reader.Close() }()
|
|
|
|
manifest, err := snapshot.DecodeManifest(reader)
|
|
if err != nil {
|
|
return 0, fmt.Errorf("decoding manifest: %w", err)
|
|
}
|
|
|
|
return manifest.TotalCompressedSize, nil
|
|
}
|
|
|
|
func (v *Vaultik) downloadManifest(snapshotID string) (*snapshot.Manifest, error) {
|
|
manifestPath := fmt.Sprintf("metadata/%s/manifest.json.zst", snapshotID)
|
|
|
|
reader, err := v.S3Client.GetObject(v.ctx, manifestPath)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer func() { _ = reader.Close() }()
|
|
|
|
manifest, err := snapshot.DecodeManifest(reader)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("decoding manifest: %w", err)
|
|
}
|
|
|
|
return manifest, nil
|
|
}
|
|
|
|
func (v *Vaultik) deleteSnapshot(snapshotID string) error {
|
|
// First, delete from S3
|
|
// List all objects under metadata/{snapshotID}/
|
|
prefix := fmt.Sprintf("metadata/%s/", snapshotID)
|
|
objectCh := v.S3Client.ListObjectsStream(v.ctx, prefix, true)
|
|
|
|
var objectsToDelete []string
|
|
for object := range objectCh {
|
|
if object.Err != nil {
|
|
return fmt.Errorf("listing objects: %w", object.Err)
|
|
}
|
|
objectsToDelete = append(objectsToDelete, object.Key)
|
|
}
|
|
|
|
// Delete all objects
|
|
for _, key := range objectsToDelete {
|
|
if err := v.S3Client.RemoveObject(v.ctx, key); err != nil {
|
|
return fmt.Errorf("removing %s: %w", key, err)
|
|
}
|
|
}
|
|
|
|
// Then, delete from local database
|
|
// Delete related records first to avoid foreign key constraints
|
|
if err := v.Repositories.Snapshots.DeleteSnapshotFiles(v.ctx, snapshotID); err != nil {
|
|
log.Error("Failed to delete snapshot files", "snapshot_id", snapshotID, "error", err)
|
|
}
|
|
if err := v.Repositories.Snapshots.DeleteSnapshotBlobs(v.ctx, snapshotID); err != nil {
|
|
log.Error("Failed to delete snapshot blobs", "snapshot_id", snapshotID, "error", err)
|
|
}
|
|
if err := v.Repositories.Snapshots.DeleteSnapshotUploads(v.ctx, snapshotID); err != nil {
|
|
log.Error("Failed to delete snapshot uploads", "snapshot_id", snapshotID, "error", err)
|
|
}
|
|
|
|
// Now delete the snapshot itself
|
|
if err := v.Repositories.Snapshots.Delete(v.ctx, snapshotID); err != nil {
|
|
return fmt.Errorf("deleting snapshot from database: %w", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (v *Vaultik) syncWithRemote() error {
|
|
log.Info("Syncing with remote snapshots")
|
|
|
|
// Get all remote snapshot IDs
|
|
remoteSnapshots := make(map[string]bool)
|
|
objectCh := v.S3Client.ListObjectsStream(v.ctx, "metadata/", false)
|
|
|
|
for object := range objectCh {
|
|
if object.Err != nil {
|
|
return fmt.Errorf("listing remote snapshots: %w", object.Err)
|
|
}
|
|
|
|
// Extract snapshot ID from paths like metadata/hostname-20240115-143052Z/
|
|
parts := strings.Split(object.Key, "/")
|
|
if len(parts) >= 2 && parts[0] == "metadata" && parts[1] != "" {
|
|
remoteSnapshots[parts[1]] = true
|
|
}
|
|
}
|
|
|
|
log.Debug("Found remote snapshots", "count", len(remoteSnapshots))
|
|
|
|
// Get all local snapshots (use a high limit to get all)
|
|
localSnapshots, err := v.Repositories.Snapshots.ListRecent(v.ctx, 10000)
|
|
if err != nil {
|
|
return fmt.Errorf("listing local snapshots: %w", err)
|
|
}
|
|
|
|
// Remove local snapshots that don't exist remotely
|
|
removedCount := 0
|
|
for _, snapshot := range localSnapshots {
|
|
if !remoteSnapshots[snapshot.ID] {
|
|
log.Info("Removing local snapshot not found in remote", "snapshot_id", snapshot.ID)
|
|
if err := v.Repositories.Snapshots.Delete(v.ctx, snapshot.ID); err != nil {
|
|
log.Error("Failed to delete local snapshot", "snapshot_id", snapshot.ID, "error", err)
|
|
} else {
|
|
removedCount++
|
|
}
|
|
}
|
|
}
|
|
|
|
if removedCount > 0 {
|
|
log.Info("Removed local snapshots not found in remote", "count", removedCount)
|
|
}
|
|
|
|
return nil
|
|
}
|