Refactor: Move Vaultik struct and methods to internal/vaultik package

- Created new internal/vaultik package with unified Vaultik struct
- Moved all command methods (snapshot, info, prune, verify) from CLI to vaultik package
- Implemented single constructor that handles crypto capabilities automatically
- Added CanDecrypt() method to check if decryption is available
- Updated all CLI commands to use the new vaultik.Vaultik struct
- Removed old fragmented App structs and WithCrypto wrapper
- Fixed context management - Vaultik now owns its context lifecycle
- Cleaned up package imports and dependencies

This creates a cleaner separation between CLI/Cobra code and business logic,
with all vaultik operations now centralized in the internal/vaultik package.
This commit is contained in:
2025-07-26 14:47:26 +02:00
parent 5c70405a85
commit e29a995120
22 changed files with 1494 additions and 1320 deletions

View File

@@ -69,8 +69,10 @@ type ScannerConfig struct {
type ScanResult struct {
FilesScanned int
FilesSkipped int
FilesDeleted int
BytesScanned int64
BytesSkipped int64
BytesDeleted int64
ChunksCreated int
BlobsCreated int
StartTime time.Time
@@ -138,6 +140,11 @@ func (s *Scanner) Scan(ctx context.Context, path string, snapshotID string) (*Sc
defer s.progress.Stop()
}
// Phase 0: Check for deleted files from previous snapshots
if err := s.detectDeletedFiles(ctx, path, result); err != nil {
return nil, fmt.Errorf("detecting deleted files: %w", err)
}
// Phase 1: Scan directory and collect files to process
log.Info("Phase 1/3: Scanning directory structure")
filesToProcess, err := s.scanPhase(ctx, path, result)
@@ -163,28 +170,29 @@ func (s *Scanner) Scan(ctx context.Context, path string, snapshotID string) (*Sc
"files_skipped", result.FilesSkipped,
"bytes_skipped", humanize.Bytes(uint64(result.BytesSkipped)))
// Print detailed scan summary
fmt.Printf("\n=== Scan Summary ===\n")
fmt.Printf("Total files examined: %d\n", result.FilesScanned)
fmt.Printf("Files with content changes: %d\n", len(filesToProcess))
fmt.Printf("Files with unchanged content: %d\n", result.FilesSkipped)
fmt.Printf("Total size of changed files: %s\n", humanize.Bytes(uint64(totalSizeToProcess)))
fmt.Printf("Total size of unchanged files: %s\n", humanize.Bytes(uint64(result.BytesSkipped)))
if len(filesToProcess) > 0 {
fmt.Printf("\nStarting snapshot of %d changed files...\n\n", len(filesToProcess))
} else {
fmt.Printf("\nNo file contents have changed.\n")
fmt.Printf("Creating metadata-only snapshot to capture current state...\n\n")
// Print scan summary
fmt.Printf("Scan complete: %s examined (%s), %s to process (%s)",
formatNumber(result.FilesScanned),
humanize.Bytes(uint64(totalSizeToProcess+result.BytesSkipped)),
formatNumber(len(filesToProcess)),
humanize.Bytes(uint64(totalSizeToProcess)))
if result.FilesDeleted > 0 {
fmt.Printf(", %s deleted (%s)",
formatNumber(result.FilesDeleted),
humanize.Bytes(uint64(result.BytesDeleted)))
}
fmt.Println()
// Phase 2: Process files and create chunks
if len(filesToProcess) > 0 {
fmt.Printf("Processing %s files...\n", formatNumber(len(filesToProcess)))
log.Info("Phase 2/3: Creating snapshot (chunking, compressing, encrypting, and uploading blobs)")
if err := s.processPhase(ctx, filesToProcess, result); err != nil {
return nil, fmt.Errorf("process phase failed: %w", err)
}
} else {
log.Info("Phase 2/3: Skipping (no file contents changed, metadata-only snapshot)")
fmt.Printf("No files need processing. Creating metadata-only snapshot.\n")
log.Info("Phase 2/3: Skipping (no files need processing, metadata-only snapshot)")
}
// Get final stats from packer
@@ -266,10 +274,9 @@ func (s *Scanner) scanPhase(ctx context.Context, path string, result *ScanResult
changedCount := len(filesToProcess)
mu.Unlock()
fmt.Printf("Scan progress: %d files examined, %s total size, %d files changed\n",
filesScanned,
humanize.Bytes(uint64(bytesScanned)),
changedCount)
fmt.Printf("Scan progress: %s files examined, %s changed\n",
formatNumber(int(filesScanned)),
formatNumber(changedCount))
lastStatusTime = time.Now()
}
@@ -320,8 +327,7 @@ func (s *Scanner) processPhase(ctx context.Context, filesToProcess []*FileToProc
eta = elapsed / time.Duration(filesProcessed) * time.Duration(remaining)
}
fmt.Printf("Snapshot progress: %d/%d files processed, %d chunks created, %d blobs uploaded",
filesProcessed, totalFiles, result.ChunksCreated, result.BlobsCreated)
fmt.Printf("Progress: %s/%s files", formatNumber(filesProcessed), formatNumber(totalFiles))
if remaining > 0 && eta > 0 {
fmt.Printf(", ETA: %s", eta.Round(time.Second))
}
@@ -558,8 +564,6 @@ func (s *Scanner) associateExistingChunks(ctx context.Context, path string) erro
// handleBlobReady is called by the packer when a blob is finalized
func (s *Scanner) handleBlobReady(blobWithReader *blob.BlobWithReader) error {
log.Debug("Invoking blob upload handler", "blob_hash", blobWithReader.Hash[:8]+"...")
startTime := time.Now().UTC()
finishedBlob := blobWithReader.FinishedBlob
@@ -854,3 +858,33 @@ func (s *Scanner) processFileStreaming(ctx context.Context, fileToProcess *FileT
func (s *Scanner) GetProgress() *ProgressReporter {
return s.progress
}
// detectDeletedFiles finds files that existed in previous snapshots but no longer exist
func (s *Scanner) detectDeletedFiles(ctx context.Context, path string, result *ScanResult) error {
// Get all files with this path prefix from the database
files, err := s.repos.Files.ListByPrefix(ctx, path)
if err != nil {
return fmt.Errorf("listing files by prefix: %w", err)
}
for _, file := range files {
// Check if the file still exists on disk
_, err := s.fs.Stat(file.Path)
if os.IsNotExist(err) {
// File has been deleted
result.FilesDeleted++
result.BytesDeleted += file.Size
log.Debug("Detected deleted file", "path", file.Path, "size", file.Size)
}
}
return nil
}
// formatNumber formats a number with comma separators
func formatNumber(n int) string {
if n < 1000 {
return fmt.Sprintf("%d", n)
}
return humanize.Comma(int64(n))
}