Add exclude patterns, snapshot prune, and other improvements
- Implement exclude patterns with anchored pattern support: - Patterns starting with / only match from root of source dir - Unanchored patterns match anywhere in path - Support for glob patterns (*.log, .*, **/*.pack) - Directory patterns skip entire subtrees - Add gobwas/glob dependency for pattern matching - Add 16 comprehensive tests for exclude functionality - Add snapshot prune command to clean orphaned data: - Removes incomplete snapshots from database - Cleans orphaned files, chunks, and blobs - Runs automatically at backup start for consistency - Add snapshot remove command for deleting snapshots - Add VAULTIK_AGE_SECRET_KEY environment variable support - Fix duplicate fx module provider in restore command - Change snapshot ID format to hostname_YYYY-MM-DDTHH:MM:SSZ
This commit is contained in:
@@ -157,6 +157,86 @@ func (r *FileChunkRepository) DeleteByFileID(ctx context.Context, tx *sql.Tx, fi
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteByFileIDs deletes all chunks for multiple files in a single statement.
|
||||
func (r *FileChunkRepository) DeleteByFileIDs(ctx context.Context, tx *sql.Tx, fileIDs []string) error {
|
||||
if len(fileIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Batch at 500 to stay within SQLite's variable limit
|
||||
const batchSize = 500
|
||||
|
||||
for i := 0; i < len(fileIDs); i += batchSize {
|
||||
end := i + batchSize
|
||||
if end > len(fileIDs) {
|
||||
end = len(fileIDs)
|
||||
}
|
||||
batch := fileIDs[i:end]
|
||||
|
||||
query := "DELETE FROM file_chunks WHERE file_id IN (?" + repeatPlaceholder(len(batch)-1) + ")"
|
||||
args := make([]interface{}, len(batch))
|
||||
for j, id := range batch {
|
||||
args[j] = id
|
||||
}
|
||||
|
||||
var err error
|
||||
if tx != nil {
|
||||
_, err = tx.ExecContext(ctx, query, args...)
|
||||
} else {
|
||||
_, err = r.db.ExecWithLog(ctx, query, args...)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("batch deleting file_chunks: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateBatch inserts multiple file_chunks in a single statement for efficiency.
|
||||
// Batches are automatically split to stay within SQLite's variable limit.
|
||||
func (r *FileChunkRepository) CreateBatch(ctx context.Context, tx *sql.Tx, fcs []FileChunk) error {
|
||||
if len(fcs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SQLite has a limit on variables (typically 999 or 32766).
|
||||
// Each FileChunk has 3 values, so batch at 300 to be safe.
|
||||
const batchSize = 300
|
||||
|
||||
for i := 0; i < len(fcs); i += batchSize {
|
||||
end := i + batchSize
|
||||
if end > len(fcs) {
|
||||
end = len(fcs)
|
||||
}
|
||||
batch := fcs[i:end]
|
||||
|
||||
// Build the query with multiple value sets
|
||||
query := "INSERT INTO file_chunks (file_id, idx, chunk_hash) VALUES "
|
||||
args := make([]interface{}, 0, len(batch)*3)
|
||||
for j, fc := range batch {
|
||||
if j > 0 {
|
||||
query += ", "
|
||||
}
|
||||
query += "(?, ?, ?)"
|
||||
args = append(args, fc.FileID, fc.Idx, fc.ChunkHash)
|
||||
}
|
||||
query += " ON CONFLICT(file_id, idx) DO NOTHING"
|
||||
|
||||
var err error
|
||||
if tx != nil {
|
||||
_, err = tx.ExecContext(ctx, query, args...)
|
||||
} else {
|
||||
_, err = r.db.ExecWithLog(ctx, query, args...)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("batch inserting file_chunks: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetByFile is an alias for GetByPath for compatibility
|
||||
func (r *FileChunkRepository) GetByFile(ctx context.Context, path string) ([]*FileChunk, error) {
|
||||
LogSQL("GetByFile", "Starting", path)
|
||||
|
||||
Reference in New Issue
Block a user