- Changed blob table to use ID (UUID) as primary key instead of hash - Blob records are now created at packing start, enabling immediate chunk associations - Implemented streaming chunking to process large files without memory exhaustion - Fixed blob manifest generation to include all referenced blobs - Updated all foreign key references from blob_hash to blob_id - Added progress reporting and improved error handling - Enforced encryption requirement for all blob packing - Updated tests to use test encryption keys - Added Cyrillic transliteration to README
716 lines
20 KiB
Go
716 lines
20 KiB
Go
package backup
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"git.eeqj.de/sneak/vaultik/internal/blob"
|
|
"git.eeqj.de/sneak/vaultik/internal/chunker"
|
|
"git.eeqj.de/sneak/vaultik/internal/crypto"
|
|
"git.eeqj.de/sneak/vaultik/internal/database"
|
|
"git.eeqj.de/sneak/vaultik/internal/log"
|
|
"github.com/dustin/go-humanize"
|
|
"github.com/spf13/afero"
|
|
)
|
|
|
|
// FileToProcess holds information about a file that needs processing
|
|
type FileToProcess struct {
|
|
Path string
|
|
FileInfo os.FileInfo
|
|
File *database.File
|
|
}
|
|
|
|
// Scanner scans directories and populates the database with file and chunk information
|
|
type Scanner struct {
|
|
fs afero.Fs
|
|
chunker *chunker.Chunker
|
|
packer *blob.Packer
|
|
repos *database.Repositories
|
|
s3Client S3Client
|
|
maxBlobSize int64
|
|
compressionLevel int
|
|
ageRecipient string
|
|
snapshotID string // Current snapshot being processed
|
|
progress *ProgressReporter
|
|
|
|
// Mutex for coordinating blob creation
|
|
packerMu sync.Mutex // Blocks chunk production during blob creation
|
|
|
|
// Context for cancellation
|
|
scanCtx context.Context
|
|
}
|
|
|
|
// S3Client interface for blob storage operations
|
|
type S3Client interface {
|
|
PutObject(ctx context.Context, key string, data io.Reader) error
|
|
}
|
|
|
|
// ScannerConfig contains configuration for the scanner
|
|
type ScannerConfig struct {
|
|
FS afero.Fs
|
|
ChunkSize int64
|
|
Repositories *database.Repositories
|
|
S3Client S3Client
|
|
MaxBlobSize int64
|
|
CompressionLevel int
|
|
AgeRecipients []string // Optional, empty means no encryption
|
|
EnableProgress bool // Enable progress reporting
|
|
}
|
|
|
|
// ScanResult contains the results of a scan operation
|
|
type ScanResult struct {
|
|
FilesScanned int
|
|
FilesSkipped int
|
|
BytesScanned int64
|
|
BytesSkipped int64
|
|
ChunksCreated int
|
|
BlobsCreated int
|
|
StartTime time.Time
|
|
EndTime time.Time
|
|
}
|
|
|
|
// NewScanner creates a new scanner instance
|
|
func NewScanner(cfg ScannerConfig) *Scanner {
|
|
// Create encryptor (required for blob packing)
|
|
if len(cfg.AgeRecipients) == 0 {
|
|
log.Error("No age recipients configured - encryption is required")
|
|
return nil
|
|
}
|
|
|
|
enc, err := crypto.NewEncryptor(cfg.AgeRecipients)
|
|
if err != nil {
|
|
log.Error("Failed to create encryptor", "error", err)
|
|
return nil
|
|
}
|
|
|
|
// Create blob packer with encryption
|
|
packerCfg := blob.PackerConfig{
|
|
MaxBlobSize: cfg.MaxBlobSize,
|
|
CompressionLevel: cfg.CompressionLevel,
|
|
Encryptor: enc,
|
|
Repositories: cfg.Repositories,
|
|
}
|
|
packer, err := blob.NewPacker(packerCfg)
|
|
if err != nil {
|
|
log.Error("Failed to create packer", "error", err)
|
|
return nil
|
|
}
|
|
|
|
var progress *ProgressReporter
|
|
if cfg.EnableProgress {
|
|
progress = NewProgressReporter()
|
|
}
|
|
|
|
return &Scanner{
|
|
fs: cfg.FS,
|
|
chunker: chunker.NewChunker(cfg.ChunkSize),
|
|
packer: packer,
|
|
repos: cfg.Repositories,
|
|
s3Client: cfg.S3Client,
|
|
maxBlobSize: cfg.MaxBlobSize,
|
|
compressionLevel: cfg.CompressionLevel,
|
|
ageRecipient: strings.Join(cfg.AgeRecipients, ","),
|
|
progress: progress,
|
|
}
|
|
}
|
|
|
|
// Scan scans a directory and populates the database
|
|
func (s *Scanner) Scan(ctx context.Context, path string, snapshotID string) (*ScanResult, error) {
|
|
s.snapshotID = snapshotID
|
|
s.scanCtx = ctx
|
|
result := &ScanResult{
|
|
StartTime: time.Now(),
|
|
}
|
|
|
|
// Set blob handler for concurrent upload
|
|
if s.s3Client != nil {
|
|
log.Debug("Setting blob handler for S3 uploads")
|
|
s.packer.SetBlobHandler(s.handleBlobReady)
|
|
} else {
|
|
log.Debug("No S3 client configured, blobs will not be uploaded")
|
|
}
|
|
|
|
// Start progress reporting if enabled
|
|
if s.progress != nil {
|
|
s.progress.Start()
|
|
defer s.progress.Stop()
|
|
}
|
|
|
|
// Phase 1: Scan directory and collect files to process
|
|
log.Info("Phase 1: Scanning directory structure")
|
|
filesToProcess, err := s.scanPhase(ctx, path, result)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("scan phase failed: %w", err)
|
|
}
|
|
|
|
// Calculate total size to process
|
|
var totalSizeToProcess int64
|
|
for _, file := range filesToProcess {
|
|
totalSizeToProcess += file.FileInfo.Size()
|
|
}
|
|
|
|
// Update progress with total size and file count
|
|
if s.progress != nil {
|
|
s.progress.SetTotalSize(totalSizeToProcess)
|
|
s.progress.GetStats().TotalFiles.Store(int64(len(filesToProcess)))
|
|
}
|
|
|
|
log.Info("Phase 1 complete",
|
|
"total_files", len(filesToProcess),
|
|
"total_size", humanize.Bytes(uint64(totalSizeToProcess)),
|
|
"files_skipped", result.FilesSkipped,
|
|
"bytes_skipped", humanize.Bytes(uint64(result.BytesSkipped)))
|
|
|
|
// Phase 2: Process files and create chunks
|
|
if len(filesToProcess) > 0 {
|
|
log.Info("Phase 2: Processing files and creating chunks")
|
|
if err := s.processPhase(ctx, filesToProcess, result); err != nil {
|
|
return nil, fmt.Errorf("process phase failed: %w", err)
|
|
}
|
|
}
|
|
|
|
// Get final stats from packer
|
|
blobs := s.packer.GetFinishedBlobs()
|
|
result.BlobsCreated += len(blobs)
|
|
|
|
result.EndTime = time.Now()
|
|
return result, nil
|
|
}
|
|
|
|
// scanPhase performs the initial directory scan to identify files to process
|
|
func (s *Scanner) scanPhase(ctx context.Context, path string, result *ScanResult) ([]*FileToProcess, error) {
|
|
var filesToProcess []*FileToProcess
|
|
var mu sync.Mutex
|
|
|
|
log.Debug("Starting directory walk", "path", path)
|
|
err := afero.Walk(s.fs, path, func(path string, info os.FileInfo, err error) error {
|
|
log.Debug("Walking file", "path", path)
|
|
if err != nil {
|
|
log.Debug("Error walking path", "path", path, "error", err)
|
|
return err
|
|
}
|
|
|
|
// Check context cancellation
|
|
select {
|
|
case <-ctx.Done():
|
|
return ctx.Err()
|
|
default:
|
|
}
|
|
|
|
// Check file and update metadata
|
|
file, needsProcessing, err := s.checkFileAndUpdateMetadata(ctx, path, info, result)
|
|
if err != nil {
|
|
// Don't log context cancellation as an error
|
|
if err == context.Canceled {
|
|
return err
|
|
}
|
|
return fmt.Errorf("failed to check %s: %w", path, err)
|
|
}
|
|
|
|
// If file needs processing, add to list
|
|
if needsProcessing && info.Mode().IsRegular() && info.Size() > 0 {
|
|
mu.Lock()
|
|
filesToProcess = append(filesToProcess, &FileToProcess{
|
|
Path: path,
|
|
FileInfo: info,
|
|
File: file,
|
|
})
|
|
mu.Unlock()
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return filesToProcess, nil
|
|
}
|
|
|
|
// processPhase processes the files that need backing up
|
|
func (s *Scanner) processPhase(ctx context.Context, filesToProcess []*FileToProcess, result *ScanResult) error {
|
|
// Process each file
|
|
for _, fileToProcess := range filesToProcess {
|
|
// Update progress
|
|
if s.progress != nil {
|
|
s.progress.GetStats().CurrentFile.Store(fileToProcess.Path)
|
|
}
|
|
|
|
// Process file in streaming fashion
|
|
if err := s.processFileStreaming(ctx, fileToProcess, result); err != nil {
|
|
return fmt.Errorf("processing file %s: %w", fileToProcess.Path, err)
|
|
}
|
|
|
|
// Update files processed counter
|
|
if s.progress != nil {
|
|
s.progress.GetStats().FilesProcessed.Add(1)
|
|
}
|
|
}
|
|
|
|
// Final flush (outside any transaction)
|
|
s.packerMu.Lock()
|
|
if err := s.packer.Flush(); err != nil {
|
|
s.packerMu.Unlock()
|
|
return fmt.Errorf("flushing packer: %w", err)
|
|
}
|
|
s.packerMu.Unlock()
|
|
|
|
// If no S3 client, store any remaining blobs
|
|
if s.s3Client == nil {
|
|
blobs := s.packer.GetFinishedBlobs()
|
|
for _, b := range blobs {
|
|
// Blob metadata is already stored incrementally during packing
|
|
// Just add the blob to the snapshot
|
|
err := s.repos.WithTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
|
|
return s.repos.Snapshots.AddBlob(ctx, tx, s.snapshotID, b.ID, b.Hash)
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("storing blob metadata: %w", err)
|
|
}
|
|
}
|
|
result.BlobsCreated += len(blobs)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// checkFileAndUpdateMetadata checks if a file needs processing and updates metadata
|
|
func (s *Scanner) checkFileAndUpdateMetadata(ctx context.Context, path string, info os.FileInfo, result *ScanResult) (*database.File, bool, error) {
|
|
// Check context cancellation
|
|
select {
|
|
case <-ctx.Done():
|
|
return nil, false, ctx.Err()
|
|
default:
|
|
}
|
|
|
|
var file *database.File
|
|
var needsProcessing bool
|
|
|
|
// Use a short transaction just for the database operations
|
|
err := s.repos.WithTx(ctx, func(txCtx context.Context, tx *sql.Tx) error {
|
|
var err error
|
|
file, needsProcessing, err = s.checkFile(txCtx, tx, path, info, result)
|
|
return err
|
|
})
|
|
|
|
return file, needsProcessing, err
|
|
}
|
|
|
|
// checkFile checks if a file needs processing and updates metadata within a transaction
|
|
func (s *Scanner) checkFile(ctx context.Context, tx *sql.Tx, path string, info os.FileInfo, result *ScanResult) (*database.File, bool, error) {
|
|
// Get file stats
|
|
stat, ok := info.Sys().(interface {
|
|
Uid() uint32
|
|
Gid() uint32
|
|
})
|
|
|
|
var uid, gid uint32
|
|
if ok {
|
|
uid = stat.Uid()
|
|
gid = stat.Gid()
|
|
}
|
|
|
|
// Check if it's a symlink
|
|
var linkTarget string
|
|
if info.Mode()&os.ModeSymlink != 0 {
|
|
// Read the symlink target
|
|
if linker, ok := s.fs.(afero.LinkReader); ok {
|
|
linkTarget, _ = linker.ReadlinkIfPossible(path)
|
|
}
|
|
}
|
|
|
|
// Create file record
|
|
file := &database.File{
|
|
Path: path,
|
|
MTime: info.ModTime(),
|
|
CTime: info.ModTime(), // afero doesn't provide ctime
|
|
Size: info.Size(),
|
|
Mode: uint32(info.Mode()),
|
|
UID: uid,
|
|
GID: gid,
|
|
LinkTarget: linkTarget,
|
|
}
|
|
|
|
// Check if file has changed since last backup
|
|
log.Debug("Checking if file exists in database", "path", path)
|
|
existingFile, err := s.repos.Files.GetByPathTx(ctx, tx, path)
|
|
if err != nil {
|
|
return nil, false, fmt.Errorf("checking existing file: %w", err)
|
|
}
|
|
|
|
fileChanged := existingFile == nil || s.hasFileChanged(existingFile, file)
|
|
|
|
// Always update file metadata
|
|
log.Debug("Updating file metadata", "path", path, "changed", fileChanged)
|
|
if err := s.repos.Files.Create(ctx, tx, file); err != nil {
|
|
return nil, false, err
|
|
}
|
|
log.Debug("File metadata updated", "path", path)
|
|
|
|
// Add file to snapshot
|
|
log.Debug("Adding file to snapshot", "path", path, "snapshot", s.snapshotID)
|
|
if err := s.repos.Snapshots.AddFile(ctx, tx, s.snapshotID, path); err != nil {
|
|
return nil, false, fmt.Errorf("adding file to snapshot: %w", err)
|
|
}
|
|
log.Debug("File added to snapshot", "path", path)
|
|
|
|
result.FilesScanned++
|
|
|
|
// Update progress
|
|
if s.progress != nil {
|
|
stats := s.progress.GetStats()
|
|
stats.FilesScanned.Add(1)
|
|
stats.CurrentFile.Store(path)
|
|
}
|
|
|
|
// Track skipped files
|
|
if info.Mode().IsRegular() && info.Size() > 0 && !fileChanged {
|
|
result.FilesSkipped++
|
|
result.BytesSkipped += info.Size()
|
|
if s.progress != nil {
|
|
stats := s.progress.GetStats()
|
|
stats.FilesSkipped.Add(1)
|
|
stats.BytesSkipped.Add(info.Size())
|
|
}
|
|
// File hasn't changed, but we still need to associate existing chunks with this snapshot
|
|
log.Debug("File hasn't changed, associating existing chunks", "path", path)
|
|
if err := s.associateExistingChunks(ctx, tx, path); err != nil {
|
|
return nil, false, fmt.Errorf("associating existing chunks: %w", err)
|
|
}
|
|
log.Debug("Existing chunks associated", "path", path)
|
|
} else {
|
|
// File changed or is not a regular file
|
|
result.BytesScanned += info.Size()
|
|
if s.progress != nil {
|
|
s.progress.GetStats().BytesScanned.Add(info.Size())
|
|
}
|
|
}
|
|
|
|
return file, fileChanged, nil
|
|
}
|
|
|
|
// hasFileChanged determines if a file has changed since last backup
|
|
func (s *Scanner) hasFileChanged(existingFile, newFile *database.File) bool {
|
|
// Check if any metadata has changed
|
|
if existingFile.Size != newFile.Size {
|
|
return true
|
|
}
|
|
if existingFile.MTime.Unix() != newFile.MTime.Unix() {
|
|
return true
|
|
}
|
|
if existingFile.Mode != newFile.Mode {
|
|
return true
|
|
}
|
|
if existingFile.UID != newFile.UID {
|
|
return true
|
|
}
|
|
if existingFile.GID != newFile.GID {
|
|
return true
|
|
}
|
|
if existingFile.LinkTarget != newFile.LinkTarget {
|
|
return true
|
|
}
|
|
return false
|
|
}
|
|
|
|
// associateExistingChunks links existing chunks from an unchanged file to the current snapshot
|
|
func (s *Scanner) associateExistingChunks(ctx context.Context, tx *sql.Tx, path string) error {
|
|
log.Debug("associateExistingChunks start", "path", path)
|
|
|
|
// Get existing file chunks
|
|
log.Debug("Getting existing file chunks", "path", path)
|
|
fileChunks, err := s.repos.FileChunks.GetByFileTx(ctx, tx, path)
|
|
if err != nil {
|
|
return fmt.Errorf("getting existing file chunks: %w", err)
|
|
}
|
|
log.Debug("Got file chunks", "path", path, "count", len(fileChunks))
|
|
|
|
// For each chunk, find its blob and associate with current snapshot
|
|
processedBlobs := make(map[string]bool)
|
|
for i, fc := range fileChunks {
|
|
log.Debug("Processing chunk", "path", path, "chunk_index", i, "chunk_hash", fc.ChunkHash)
|
|
|
|
// Find which blob contains this chunk
|
|
log.Debug("Finding blob for chunk", "chunk_hash", fc.ChunkHash)
|
|
blobChunk, err := s.repos.BlobChunks.GetByChunkHashTx(ctx, tx, fc.ChunkHash)
|
|
if err != nil {
|
|
return fmt.Errorf("finding blob for chunk %s: %w", fc.ChunkHash, err)
|
|
}
|
|
if blobChunk == nil {
|
|
log.Warn("Chunk exists but not in any blob", "chunk", fc.ChunkHash, "file", path)
|
|
continue
|
|
}
|
|
log.Debug("Found blob for chunk", "chunk_hash", fc.ChunkHash, "blob_id", blobChunk.BlobID)
|
|
|
|
// Get blob to find its hash
|
|
blob, err := s.repos.Blobs.GetByID(ctx, blobChunk.BlobID)
|
|
if err != nil {
|
|
return fmt.Errorf("getting blob %s: %w", blobChunk.BlobID, err)
|
|
}
|
|
if blob == nil {
|
|
log.Warn("Blob record not found", "blob_id", blobChunk.BlobID)
|
|
continue
|
|
}
|
|
|
|
// Add blob to snapshot if not already processed
|
|
if !processedBlobs[blobChunk.BlobID] {
|
|
log.Debug("Adding blob to snapshot", "blob_id", blobChunk.BlobID, "blob_hash", blob.Hash, "snapshot", s.snapshotID)
|
|
if err := s.repos.Snapshots.AddBlob(ctx, tx, s.snapshotID, blobChunk.BlobID, blob.Hash); err != nil {
|
|
return fmt.Errorf("adding existing blob to snapshot: %w", err)
|
|
}
|
|
log.Debug("Added blob to snapshot", "blob_id", blobChunk.BlobID)
|
|
processedBlobs[blobChunk.BlobID] = true
|
|
}
|
|
}
|
|
|
|
log.Debug("associateExistingChunks complete", "path", path, "blobs_processed", len(processedBlobs))
|
|
return nil
|
|
}
|
|
|
|
// handleBlobReady is called by the packer when a blob is finalized
|
|
func (s *Scanner) handleBlobReady(blobWithReader *blob.BlobWithReader) error {
|
|
log.Debug("Blob handler called", "blob_hash", blobWithReader.Hash[:8]+"...")
|
|
|
|
startTime := time.Now()
|
|
finishedBlob := blobWithReader.FinishedBlob
|
|
|
|
// Report upload start
|
|
if s.progress != nil {
|
|
s.progress.ReportUploadStart(finishedBlob.Hash, finishedBlob.Compressed)
|
|
}
|
|
|
|
// Upload to S3 first (without holding any locks)
|
|
// Use scan context for cancellation support
|
|
ctx := s.scanCtx
|
|
if ctx == nil {
|
|
ctx = context.Background()
|
|
}
|
|
if err := s.s3Client.PutObject(ctx, "blobs/"+finishedBlob.Hash, blobWithReader.Reader); err != nil {
|
|
return fmt.Errorf("uploading blob %s to S3: %w", finishedBlob.Hash, err)
|
|
}
|
|
|
|
uploadDuration := time.Since(startTime)
|
|
|
|
// Report upload complete
|
|
if s.progress != nil {
|
|
s.progress.ReportUploadComplete(finishedBlob.Hash, finishedBlob.Compressed, uploadDuration)
|
|
}
|
|
|
|
// Update progress
|
|
if s.progress != nil {
|
|
stats := s.progress.GetStats()
|
|
stats.BlobsUploaded.Add(1)
|
|
stats.BytesUploaded.Add(finishedBlob.Compressed)
|
|
stats.BlobsCreated.Add(1)
|
|
}
|
|
|
|
// Store metadata in database (after upload is complete)
|
|
dbCtx := s.scanCtx
|
|
if dbCtx == nil {
|
|
dbCtx = context.Background()
|
|
}
|
|
err := s.repos.WithTx(dbCtx, func(ctx context.Context, tx *sql.Tx) error {
|
|
// Update blob upload timestamp
|
|
if err := s.repos.Blobs.UpdateUploaded(ctx, tx, finishedBlob.ID); err != nil {
|
|
return fmt.Errorf("updating blob upload timestamp: %w", err)
|
|
}
|
|
|
|
// Add the blob to the snapshot
|
|
if err := s.repos.Snapshots.AddBlob(ctx, tx, s.snapshotID, finishedBlob.ID, finishedBlob.Hash); err != nil {
|
|
return fmt.Errorf("adding blob to snapshot: %w", err)
|
|
}
|
|
|
|
// Record upload metrics
|
|
upload := &database.Upload{
|
|
BlobHash: finishedBlob.Hash,
|
|
UploadedAt: startTime,
|
|
Size: finishedBlob.Compressed,
|
|
DurationMs: uploadDuration.Milliseconds(),
|
|
}
|
|
if err := s.repos.Uploads.Create(ctx, tx, upload); err != nil {
|
|
return fmt.Errorf("recording upload metrics: %w", err)
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
// Cleanup temp file if needed
|
|
if blobWithReader.TempFile != nil {
|
|
tempName := blobWithReader.TempFile.Name()
|
|
if err := blobWithReader.TempFile.Close(); err != nil {
|
|
log.Fatal("Failed to close temp file", "file", tempName, "error", err)
|
|
}
|
|
if err := os.Remove(tempName); err != nil {
|
|
log.Fatal("Failed to remove temp file", "file", tempName, "error", err)
|
|
}
|
|
}
|
|
|
|
return err
|
|
}
|
|
|
|
// processFileStreaming processes a file by streaming chunks directly to the packer
|
|
func (s *Scanner) processFileStreaming(ctx context.Context, fileToProcess *FileToProcess, result *ScanResult) error {
|
|
// Open the file
|
|
file, err := s.fs.Open(fileToProcess.Path)
|
|
if err != nil {
|
|
return fmt.Errorf("opening file: %w", err)
|
|
}
|
|
defer func() { _ = file.Close() }()
|
|
|
|
// We'll collect file chunks for database storage
|
|
// but process them for packing as we go
|
|
type chunkInfo struct {
|
|
fileChunk database.FileChunk
|
|
offset int64
|
|
size int64
|
|
}
|
|
var chunks []chunkInfo
|
|
chunkIndex := 0
|
|
|
|
// Process chunks in streaming fashion
|
|
err = s.chunker.ChunkReaderStreaming(file, func(chunk chunker.Chunk) error {
|
|
// Check for cancellation
|
|
select {
|
|
case <-ctx.Done():
|
|
return ctx.Err()
|
|
default:
|
|
}
|
|
|
|
log.Debug("Processing chunk",
|
|
"file", fileToProcess.Path,
|
|
"chunk", chunkIndex,
|
|
"hash", chunk.Hash,
|
|
"size", chunk.Size)
|
|
|
|
// Check if chunk already exists
|
|
chunkExists := false
|
|
err := s.repos.WithTx(ctx, func(txCtx context.Context, tx *sql.Tx) error {
|
|
existing, err := s.repos.Chunks.GetByHash(txCtx, chunk.Hash)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
chunkExists = (existing != nil)
|
|
|
|
// Store chunk if new
|
|
if !chunkExists {
|
|
dbChunk := &database.Chunk{
|
|
ChunkHash: chunk.Hash,
|
|
SHA256: chunk.Hash,
|
|
Size: chunk.Size,
|
|
}
|
|
if err := s.repos.Chunks.Create(txCtx, tx, dbChunk); err != nil {
|
|
return fmt.Errorf("creating chunk: %w", err)
|
|
}
|
|
}
|
|
return nil
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("checking/storing chunk: %w", err)
|
|
}
|
|
|
|
// Track file chunk association for later storage
|
|
chunks = append(chunks, chunkInfo{
|
|
fileChunk: database.FileChunk{
|
|
Path: fileToProcess.Path,
|
|
Idx: chunkIndex,
|
|
ChunkHash: chunk.Hash,
|
|
},
|
|
offset: chunk.Offset,
|
|
size: chunk.Size,
|
|
})
|
|
|
|
// Update stats
|
|
if chunkExists {
|
|
result.FilesSkipped++ // Track as skipped for now
|
|
result.BytesSkipped += chunk.Size
|
|
if s.progress != nil {
|
|
s.progress.GetStats().BytesSkipped.Add(chunk.Size)
|
|
}
|
|
} else {
|
|
result.ChunksCreated++
|
|
result.BytesScanned += chunk.Size
|
|
if s.progress != nil {
|
|
s.progress.GetStats().ChunksCreated.Add(1)
|
|
s.progress.GetStats().BytesProcessed.Add(chunk.Size)
|
|
s.progress.UpdateChunkingActivity()
|
|
}
|
|
}
|
|
|
|
// Add chunk to packer immediately (streaming)
|
|
// This happens outside the database transaction
|
|
if !chunkExists {
|
|
s.packerMu.Lock()
|
|
err := s.packer.AddChunk(&blob.ChunkRef{
|
|
Hash: chunk.Hash,
|
|
Data: chunk.Data,
|
|
})
|
|
if err == blob.ErrBlobSizeLimitExceeded {
|
|
// Finalize current blob and retry
|
|
if err := s.packer.FinalizeBlob(); err != nil {
|
|
s.packerMu.Unlock()
|
|
return fmt.Errorf("finalizing blob: %w", err)
|
|
}
|
|
// Retry adding the chunk
|
|
if err := s.packer.AddChunk(&blob.ChunkRef{
|
|
Hash: chunk.Hash,
|
|
Data: chunk.Data,
|
|
}); err != nil {
|
|
s.packerMu.Unlock()
|
|
return fmt.Errorf("adding chunk after finalize: %w", err)
|
|
}
|
|
} else if err != nil {
|
|
s.packerMu.Unlock()
|
|
return fmt.Errorf("adding chunk to packer: %w", err)
|
|
}
|
|
s.packerMu.Unlock()
|
|
}
|
|
|
|
// Clear chunk data from memory immediately after use
|
|
chunk.Data = nil
|
|
|
|
chunkIndex++
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
return fmt.Errorf("chunking file: %w", err)
|
|
}
|
|
|
|
// Store file-chunk associations and chunk-file mappings in database
|
|
err = s.repos.WithTx(ctx, func(txCtx context.Context, tx *sql.Tx) error {
|
|
for _, ci := range chunks {
|
|
// Create file-chunk mapping
|
|
if err := s.repos.FileChunks.Create(txCtx, tx, &ci.fileChunk); err != nil {
|
|
return fmt.Errorf("creating file chunk: %w", err)
|
|
}
|
|
|
|
// Create chunk-file mapping
|
|
chunkFile := &database.ChunkFile{
|
|
ChunkHash: ci.fileChunk.ChunkHash,
|
|
FilePath: fileToProcess.Path,
|
|
FileOffset: ci.offset,
|
|
Length: ci.size,
|
|
}
|
|
if err := s.repos.ChunkFiles.Create(txCtx, tx, chunkFile); err != nil {
|
|
return fmt.Errorf("creating chunk file: %w", err)
|
|
}
|
|
}
|
|
|
|
// Add file to snapshot
|
|
if err := s.repos.Snapshots.AddFile(txCtx, tx, s.snapshotID, fileToProcess.Path); err != nil {
|
|
return fmt.Errorf("adding file to snapshot: %w", err)
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
return err
|
|
}
|