fix: bound blob cache during restore with LRU eviction to prevent OOM

The restore operation cached every downloaded blob in an unbounded map,
which could exhaust system memory when restoring large backups with many
unique blobs (each up to 10GB).

Replaced with an LRU cache bounded to 1 GiB by default, evicting
least-recently-used blobs when the limit is exceeded.
This commit is contained in:
clawbot 2026-02-08 12:04:17 -08:00
parent 46c2ea3079
commit 0eb07b29d6

View File

@ -109,7 +109,7 @@ func (v *Vaultik) Restore(opts *RestoreOptions) error {
// Step 5: Restore files
result := &RestoreResult{}
blobCache := make(map[string][]byte) // Cache downloaded and decrypted blobs
blobCache := newBlobLRUCache(defaultMaxBlobCacheBytes) // LRU cache bounded to ~1 GiB
for i, file := range files {
if v.ctx.Err() != nil {
@ -299,7 +299,7 @@ func (v *Vaultik) restoreFile(
targetDir string,
identity age.Identity,
chunkToBlobMap map[string]*database.BlobChunk,
blobCache map[string][]byte,
blobCache *blobLRUCache,
result *RestoreResult,
) error {
// Calculate target path - use full original path under target directory
@ -383,7 +383,7 @@ func (v *Vaultik) restoreRegularFile(
targetPath string,
identity age.Identity,
chunkToBlobMap map[string]*database.BlobChunk,
blobCache map[string][]byte,
blobCache *blobLRUCache,
result *RestoreResult,
) error {
// Get file chunks in order
@ -417,13 +417,13 @@ func (v *Vaultik) restoreRegularFile(
// Download and decrypt blob if not cached
blobHashStr := blob.Hash.String()
blobData, ok := blobCache[blobHashStr]
blobData, ok := blobCache.Get(blobHashStr)
if !ok {
blobData, err = v.downloadBlob(ctx, blobHashStr, blob.CompressedSize, identity)
if err != nil {
return fmt.Errorf("downloading blob %s: %w", blobHashStr[:16], err)
}
blobCache[blobHashStr] = blobData
blobCache.Put(blobHashStr, blobData)
result.BlobsDownloaded++
result.BytesDownloaded += blob.CompressedSize
}