Major refactoring: Updated manifest format and renamed backup to snapshot

- Created manifest.go with proper Manifest structure including blob sizes
- Updated manifest generation to include compressed size for each blob
- Added TotalCompressedSize field to manifest for quick access
- Renamed backup package to snapshot for clarity
- Updated snapshot list to show all remote snapshots
- Remote snapshots not in local DB fetch manifest to get size
- Local snapshots not in remote are automatically deleted
- Removed backwards compatibility code (pre-1.0, no users)
- Fixed prune command to use new manifest format
- Updated all imports and references from backup to snapshot
This commit is contained in:
Jeffrey Paul 2025-07-26 03:27:47 +02:00
parent c07d8eec0a
commit a544fa80f2
11 changed files with 254 additions and 168 deletions

View File

@ -2,18 +2,16 @@ package cli
import (
"context"
"encoding/json"
"fmt"
"strings"
"git.eeqj.de/sneak/vaultik/internal/backup"
"git.eeqj.de/sneak/vaultik/internal/config"
"git.eeqj.de/sneak/vaultik/internal/database"
"git.eeqj.de/sneak/vaultik/internal/globals"
"git.eeqj.de/sneak/vaultik/internal/log"
"git.eeqj.de/sneak/vaultik/internal/s3"
"git.eeqj.de/sneak/vaultik/internal/snapshot"
"github.com/dustin/go-humanize"
"github.com/klauspost/compress/zstd"
"github.com/spf13/cobra"
"go.uber.org/fx"
)
@ -66,7 +64,7 @@ specifying a path using --config or by setting VAULTIK_CONFIG to a path.`,
Debug: rootFlags.Debug,
},
Modules: []fx.Option{
backup.Module,
snapshot.Module,
s3.Module,
fx.Provide(fx.Annotate(
func(g *globals.Globals, cfg *config.Config, repos *database.Repositories,
@ -195,8 +193,8 @@ func (app *PruneApp) runPrune(ctx context.Context, opts *PruneOptions) error {
// Step 5: Build set of referenced blobs
referencedBlobs := make(map[string]bool)
for _, blobHash := range manifest.Blobs {
referencedBlobs[blobHash] = true
for _, blob := range manifest.Blobs {
referencedBlobs[blob.Hash] = true
}
// Step 6: List all blobs in S3
@ -277,16 +275,8 @@ func (app *PruneApp) runPrune(ctx context.Context, opts *PruneOptions) error {
return nil
}
// BlobManifest represents the structure of a snapshot's blob manifest
type BlobManifest struct {
SnapshotID string `json:"snapshot_id"`
Timestamp string `json:"timestamp"`
BlobCount int `json:"blob_count"`
Blobs []string `json:"blobs"`
}
// downloadManifest downloads and decompresses a snapshot manifest
func (app *PruneApp) downloadManifest(ctx context.Context, snapshotID string) (*BlobManifest, error) {
func (app *PruneApp) downloadManifest(ctx context.Context, snapshotID string) (*snapshot.Manifest, error) {
manifestPath := fmt.Sprintf("metadata/%s/manifest.json.zst", snapshotID)
// Download the compressed manifest
@ -296,18 +286,11 @@ func (app *PruneApp) downloadManifest(ctx context.Context, snapshotID string) (*
}
defer func() { _ = reader.Close() }()
// Decompress using zstd
zr, err := zstd.NewReader(reader)
// Decode manifest
manifest, err := snapshot.DecodeManifest(reader)
if err != nil {
return nil, fmt.Errorf("creating zstd reader: %w", err)
}
defer zr.Close()
// Decode JSON manifest
var manifest BlobManifest
if err := json.NewDecoder(zr).Decode(&manifest); err != nil {
return nil, fmt.Errorf("decoding manifest: %w", err)
}
return &manifest, nil
return manifest, nil
}

View File

@ -12,14 +12,13 @@ import (
"text/tabwriter"
"time"
"git.eeqj.de/sneak/vaultik/internal/backup"
"git.eeqj.de/sneak/vaultik/internal/config"
"git.eeqj.de/sneak/vaultik/internal/database"
"git.eeqj.de/sneak/vaultik/internal/globals"
"git.eeqj.de/sneak/vaultik/internal/log"
"git.eeqj.de/sneak/vaultik/internal/s3"
"git.eeqj.de/sneak/vaultik/internal/snapshot"
"github.com/dustin/go-humanize"
"github.com/klauspost/compress/zstd"
"github.com/spf13/cobra"
"go.uber.org/fx"
)
@ -36,8 +35,8 @@ type SnapshotCreateApp struct {
Globals *globals.Globals
Config *config.Config
Repositories *database.Repositories
ScannerFactory backup.ScannerFactory
SnapshotManager *backup.SnapshotManager
ScannerFactory snapshot.ScannerFactory
SnapshotManager *snapshot.SnapshotManager
S3Client *s3.Client
DB *database.DB
Lifecycle fx.Lifecycle
@ -106,11 +105,11 @@ specifying a path using --config or by setting VAULTIK_CONFIG to a path.`,
Cron: opts.Cron,
},
Modules: []fx.Option{
backup.Module,
snapshot.Module,
s3.Module,
fx.Provide(fx.Annotate(
func(g *globals.Globals, cfg *config.Config, repos *database.Repositories,
scannerFactory backup.ScannerFactory, snapshotManager *backup.SnapshotManager,
scannerFactory snapshot.ScannerFactory, snapshotManager *snapshot.SnapshotManager,
s3Client *s3.Client, db *database.DB,
lc fx.Lifecycle, shutdowner fx.Shutdowner) *SnapshotCreateApp {
return &SnapshotCreateApp{
@ -226,7 +225,7 @@ func (app *SnapshotCreateApp) runSnapshot(ctx context.Context, opts *SnapshotCre
}
// Create scanner with progress enabled (unless in cron mode)
scanner := app.ScannerFactory(backup.ScannerParams{
scanner := app.ScannerFactory(snapshot.ScannerParams{
EnableProgress: !opts.Cron,
})
@ -306,8 +305,8 @@ func (app *SnapshotCreateApp) runSnapshot(ctx context.Context, opts *SnapshotCre
}
// Update snapshot statistics with extended fields
extStats := backup.ExtendedBackupStats{
BackupStats: backup.BackupStats{
extStats := snapshot.ExtendedBackupStats{
BackupStats: snapshot.BackupStats{
FilesScanned: totalFiles,
BytesScanned: totalBytes,
ChunksCreated: totalChunks,
@ -487,15 +486,78 @@ func newSnapshotVerifyCommand() *cobra.Command {
// List lists all snapshots
func (app *SnapshotApp) List(ctx context.Context, jsonOutput bool) error {
// First, sync with remote snapshots
if err := app.syncWithRemote(ctx); err != nil {
return fmt.Errorf("syncing with remote: %w", err)
// Get all remote snapshots
remoteSnapshots := make(map[string]bool)
objectCh := app.S3Client.ListObjectsStream(ctx, "metadata/", false)
for object := range objectCh {
if object.Err != nil {
return fmt.Errorf("listing remote snapshots: %w", object.Err)
}
// Now get snapshots from S3
snapshots, err := app.getSnapshots(ctx)
// Extract snapshot ID from paths like metadata/hostname-20240115-143052Z/
parts := strings.Split(object.Key, "/")
if len(parts) >= 2 && parts[0] == "metadata" && parts[1] != "" {
remoteSnapshots[parts[1]] = true
}
}
// Get all local snapshots
localSnapshots, err := app.Repositories.Snapshots.ListRecent(ctx, 10000)
if err != nil {
return err
return fmt.Errorf("listing local snapshots: %w", err)
}
// Build a map of local snapshots for quick lookup
localSnapshotMap := make(map[string]*database.Snapshot)
for _, s := range localSnapshots {
localSnapshotMap[s.ID] = s
}
// Remove local snapshots that don't exist remotely
for _, snapshot := range localSnapshots {
if !remoteSnapshots[snapshot.ID] {
log.Info("Removing local snapshot not found in remote", "snapshot_id", snapshot.ID)
if err := app.Repositories.Snapshots.Delete(ctx, snapshot.ID); err != nil {
log.Error("Failed to delete local snapshot", "snapshot_id", snapshot.ID, "error", err)
}
delete(localSnapshotMap, snapshot.ID)
}
}
// Build final snapshot list
snapshots := make([]SnapshotInfo, 0, len(remoteSnapshots))
for snapshotID := range remoteSnapshots {
// Check if we have this snapshot locally
if localSnap, exists := localSnapshotMap[snapshotID]; exists && localSnap.CompletedAt != nil {
// Use local data
snapshots = append(snapshots, SnapshotInfo{
ID: localSnap.ID,
Timestamp: localSnap.StartedAt,
CompressedSize: localSnap.BlobSize,
})
} else {
// Remote snapshot not in local DB - fetch manifest to get size
timestamp, err := parseSnapshotTimestamp(snapshotID)
if err != nil {
log.Warn("Failed to parse snapshot timestamp", "id", snapshotID, "error", err)
continue
}
// Try to download manifest to get size
totalSize, err := app.getManifestSize(ctx, snapshotID)
if err != nil {
log.Warn("Failed to get manifest size", "id", snapshotID, "error", err)
totalSize = 0
}
snapshots = append(snapshots, SnapshotInfo{
ID: snapshotID,
Timestamp: timestamp,
CompressedSize: totalSize,
})
}
}
// Sort by timestamp (newest first)
@ -512,17 +574,18 @@ func (app *SnapshotApp) List(ctx context.Context, jsonOutput bool) error {
// Table output
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
if _, err := fmt.Fprintln(w, "SNAPSHOT ID\tTIMESTAMP"); err != nil {
if _, err := fmt.Fprintln(w, "SNAPSHOT ID\tTIMESTAMP\tCOMPRESSED SIZE"); err != nil {
return err
}
if _, err := fmt.Fprintln(w, "───────────\t─────────"); err != nil {
if _, err := fmt.Fprintln(w, "───────────\t─────────\t───────────────"); err != nil {
return err
}
for _, snap := range snapshots {
if _, err := fmt.Fprintf(w, "%s\t%s\n",
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n",
snap.ID,
snap.Timestamp.Format("2006-01-02 15:04:05")); err != nil {
snap.Timestamp.Format("2006-01-02 15:04:05"),
formatBytes(snap.CompressedSize)); err != nil {
return err
}
}
@ -532,9 +595,27 @@ func (app *SnapshotApp) List(ctx context.Context, jsonOutput bool) error {
// Purge removes old snapshots based on criteria
func (app *SnapshotApp) Purge(ctx context.Context, keepLatest bool, olderThan string, force bool) error {
snapshots, err := app.getSnapshots(ctx)
// Sync with remote first
if err := app.syncWithRemote(ctx); err != nil {
return fmt.Errorf("syncing with remote: %w", err)
}
// Get snapshots from local database
dbSnapshots, err := app.Repositories.Snapshots.ListRecent(ctx, 10000)
if err != nil {
return err
return fmt.Errorf("listing snapshots: %w", err)
}
// Convert to SnapshotInfo format, only including completed snapshots
snapshots := make([]SnapshotInfo, 0, len(dbSnapshots))
for _, s := range dbSnapshots {
if s.CompletedAt != nil {
snapshots = append(snapshots, SnapshotInfo{
ID: s.ID,
Timestamp: s.StartedAt,
CompressedSize: s.BlobSize,
})
}
}
// Sort by timestamp (newest first)
@ -657,57 +738,25 @@ func (app *SnapshotApp) Verify(ctx context.Context, snapshotID string, deep bool
return nil
}
// getSnapshots retrieves all snapshots from S3
func (app *SnapshotApp) getSnapshots(ctx context.Context) ([]SnapshotInfo, error) {
var snapshots []SnapshotInfo
// getManifestSize downloads a manifest and returns the total compressed size
func (app *SnapshotApp) getManifestSize(ctx context.Context, snapshotID string) (int64, error) {
manifestPath := fmt.Sprintf("metadata/%s/manifest.json.zst", snapshotID)
// List all objects under metadata/
objectCh := app.S3Client.ListObjectsStream(ctx, "metadata/", true)
// Track unique snapshots
snapshotMap := make(map[string]*SnapshotInfo)
for object := range objectCh {
if object.Err != nil {
return nil, fmt.Errorf("listing objects: %w", object.Err)
}
// Extract snapshot ID from paths like metadata/2024-01-15-143052-hostname/manifest.json.zst
parts := strings.Split(object.Key, "/")
if len(parts) < 3 || parts[0] != "metadata" {
continue
}
snapshotID := parts[1]
if snapshotID == "" {
continue
}
// Initialize snapshot info if not seen
if _, exists := snapshotMap[snapshotID]; !exists {
timestamp, err := parseSnapshotTimestamp(snapshotID)
reader, err := app.S3Client.GetObject(ctx, manifestPath)
if err != nil {
log.Warn("Failed to parse snapshot timestamp", "id", snapshotID, "error", err)
continue
return 0, fmt.Errorf("downloading manifest: %w", err)
}
defer func() { _ = reader.Close() }()
manifest, err := snapshot.DecodeManifest(reader)
if err != nil {
return 0, fmt.Errorf("decoding manifest: %w", err)
}
snapshotMap[snapshotID] = &SnapshotInfo{
ID: snapshotID,
Timestamp: timestamp,
CompressedSize: 0,
}
}
return manifest.TotalCompressedSize, nil
}
// Convert map to slice without downloading manifests
for _, snap := range snapshotMap {
snapshots = append(snapshots, *snap)
}
return snapshots, nil
}
// downloadManifest downloads and parses a snapshot manifest
// downloadManifest downloads and parses a snapshot manifest (for verify command)
func (app *SnapshotApp) downloadManifest(ctx context.Context, snapshotID string) ([]string, error) {
manifestPath := fmt.Sprintf("metadata/%s/manifest.json.zst", snapshotID)
@ -717,25 +766,17 @@ func (app *SnapshotApp) downloadManifest(ctx context.Context, snapshotID string)
}
defer func() { _ = reader.Close() }()
// Decompress
zr, err := zstd.NewReader(reader)
manifest, err := snapshot.DecodeManifest(reader)
if err != nil {
return nil, fmt.Errorf("creating zstd reader: %w", err)
}
defer zr.Close()
// Decode JSON - manifest is an object with a "blobs" field
var manifest struct {
SnapshotID string `json:"snapshot_id"`
Timestamp string `json:"timestamp"`
BlobCount int `json:"blob_count"`
Blobs []string `json:"blobs"`
}
if err := json.NewDecoder(zr).Decode(&manifest); err != nil {
return nil, fmt.Errorf("decoding manifest: %w", err)
}
return manifest.Blobs, nil
// Extract blob hashes
hashes := make([]string, len(manifest.Blobs))
for i, blob := range manifest.Blobs {
hashes[i] = blob.Hash
}
return hashes, nil
}
// deleteSnapshot removes a snapshot and its metadata

View File

@ -1,4 +1,4 @@
package backup
package snapshot
import (
"context"

View File

@ -1,4 +1,4 @@
package backup_test
package snapshot_test
import (
"context"
@ -6,9 +6,9 @@ import (
"testing"
"time"
"git.eeqj.de/sneak/vaultik/internal/backup"
"git.eeqj.de/sneak/vaultik/internal/database"
"git.eeqj.de/sneak/vaultik/internal/log"
"git.eeqj.de/sneak/vaultik/internal/snapshot"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -39,7 +39,7 @@ func TestFileContentChange(t *testing.T) {
repos := database.NewRepositories(db)
// Create scanner
scanner := backup.NewScanner(backup.ScannerConfig{
scanner := snapshot.NewScanner(snapshot.ScannerConfig{
FS: fs,
ChunkSize: int64(1024 * 16), // 16KB chunks for testing
Repositories: repos,
@ -168,7 +168,7 @@ func TestMultipleFileChanges(t *testing.T) {
repos := database.NewRepositories(db)
// Create scanner
scanner := backup.NewScanner(backup.ScannerConfig{
scanner := snapshot.NewScanner(snapshot.ScannerConfig{
FS: fs,
ChunkSize: int64(1024 * 16), // 16KB chunks for testing
Repositories: repos,

View File

@ -0,0 +1,70 @@
package snapshot
import (
"bytes"
"encoding/json"
"fmt"
"io"
"github.com/klauspost/compress/zstd"
)
// Manifest represents the structure of a snapshot's blob manifest
type Manifest struct {
SnapshotID string `json:"snapshot_id"`
Timestamp string `json:"timestamp"`
BlobCount int `json:"blob_count"`
TotalCompressedSize int64 `json:"total_compressed_size"`
Blobs []BlobInfo `json:"blobs"`
}
// BlobInfo represents information about a single blob in the manifest
type BlobInfo struct {
Hash string `json:"hash"`
CompressedSize int64 `json:"compressed_size"`
}
// DecodeManifest decodes a manifest from a reader containing compressed JSON
func DecodeManifest(r io.Reader) (*Manifest, error) {
// Decompress using zstd
zr, err := zstd.NewReader(r)
if err != nil {
return nil, fmt.Errorf("creating zstd reader: %w", err)
}
defer zr.Close()
// Decode JSON manifest
var manifest Manifest
if err := json.NewDecoder(zr).Decode(&manifest); err != nil {
return nil, fmt.Errorf("decoding manifest: %w", err)
}
return &manifest, nil
}
// EncodeManifest encodes a manifest to compressed JSON
func EncodeManifest(manifest *Manifest, compressionLevel int) ([]byte, error) {
// Marshal to JSON
jsonData, err := json.MarshalIndent(manifest, "", " ")
if err != nil {
return nil, fmt.Errorf("marshaling manifest: %w", err)
}
// Compress using zstd
var compressedBuf bytes.Buffer
writer, err := zstd.NewWriter(&compressedBuf, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(compressionLevel)))
if err != nil {
return nil, fmt.Errorf("creating zstd writer: %w", err)
}
if _, err := writer.Write(jsonData); err != nil {
_ = writer.Close()
return nil, fmt.Errorf("writing compressed data: %w", err)
}
if err := writer.Close(); err != nil {
return nil, fmt.Errorf("closing zstd writer: %w", err)
}
return compressedBuf.Bytes(), nil
}

View File

@ -1,4 +1,4 @@
package backup
package snapshot
import (
"git.eeqj.de/sneak/vaultik/internal/config"

View File

@ -1,4 +1,4 @@
package backup
package snapshot
import (
"context"

View File

@ -1,4 +1,4 @@
package backup
package snapshot
import (
"context"

View File

@ -1,4 +1,4 @@
package backup_test
package snapshot_test
import (
"context"
@ -7,9 +7,9 @@ import (
"testing"
"time"
"git.eeqj.de/sneak/vaultik/internal/backup"
"git.eeqj.de/sneak/vaultik/internal/database"
"git.eeqj.de/sneak/vaultik/internal/log"
"git.eeqj.de/sneak/vaultik/internal/snapshot"
"github.com/spf13/afero"
)
@ -60,7 +60,7 @@ func TestScannerSimpleDirectory(t *testing.T) {
repos := database.NewRepositories(db)
// Create scanner
scanner := backup.NewScanner(backup.ScannerConfig{
scanner := snapshot.NewScanner(snapshot.ScannerConfig{
FS: fs,
ChunkSize: int64(1024 * 16), // 16KB chunks for testing
Repositories: repos,
@ -93,7 +93,7 @@ func TestScannerSimpleDirectory(t *testing.T) {
}
// Scan the directory
var result *backup.ScanResult
var result *snapshot.ScanResult
result, err = scanner.Scan(ctx, "/source", snapshotID)
if err != nil {
t.Fatalf("scan failed: %v", err)
@ -207,7 +207,7 @@ func TestScannerWithSymlinks(t *testing.T) {
repos := database.NewRepositories(db)
// Create scanner
scanner := backup.NewScanner(backup.ScannerConfig{
scanner := snapshot.NewScanner(snapshot.ScannerConfig{
FS: fs,
ChunkSize: 1024 * 16,
Repositories: repos,
@ -240,7 +240,7 @@ func TestScannerWithSymlinks(t *testing.T) {
}
// Scan the directory
var result *backup.ScanResult
var result *snapshot.ScanResult
result, err = scanner.Scan(ctx, "/source", snapshotID)
if err != nil {
t.Fatalf("scan failed: %v", err)
@ -308,7 +308,7 @@ func TestScannerLargeFile(t *testing.T) {
repos := database.NewRepositories(db)
// Create scanner with 64KB average chunk size
scanner := backup.NewScanner(backup.ScannerConfig{
scanner := snapshot.NewScanner(snapshot.ScannerConfig{
FS: fs,
ChunkSize: int64(1024 * 64), // 64KB average chunks
Repositories: repos,
@ -341,7 +341,7 @@ func TestScannerLargeFile(t *testing.T) {
}
// Scan the directory
var result *backup.ScanResult
var result *snapshot.ScanResult
result, err = scanner.Scan(ctx, "/source", snapshotID)
if err != nil {
t.Fatalf("scan failed: %v", err)

View File

@ -1,9 +1,9 @@
package backup
package snapshot
// Snapshot Metadata Export Process
// ================================
//
// The snapshot metadata contains all information needed to restore a backup.
// The snapshot metadata contains all information needed to restore a snapshot.
// Instead of creating a custom format, we use a trimmed copy of the SQLite
// database containing only data relevant to the current snapshot.
//
@ -42,7 +42,6 @@ import (
"bytes"
"context"
"database/sql"
"encoding/json"
"fmt"
"io"
"os"
@ -56,7 +55,6 @@ import (
"git.eeqj.de/sneak/vaultik/internal/log"
"git.eeqj.de/sneak/vaultik/internal/s3"
"github.com/dustin/go-humanize"
"github.com/klauspost/compress/zstd"
"go.uber.org/fx"
)
@ -540,60 +538,54 @@ func (sm *SnapshotManager) generateBlobManifest(ctx context.Context, dbPath stri
// Get all blobs for this snapshot
log.Debug("Querying blobs for snapshot", "snapshot_id", snapshotID)
blobs, err := repos.Snapshots.GetBlobHashes(ctx, snapshotID)
blobHashes, err := repos.Snapshots.GetBlobHashes(ctx, snapshotID)
if err != nil {
return nil, fmt.Errorf("getting snapshot blobs: %w", err)
}
log.Debug("Found blobs", "count", len(blobs))
log.Debug("Found blobs", "count", len(blobHashes))
// Create manifest structure
manifest := struct {
SnapshotID string `json:"snapshot_id"`
Timestamp string `json:"timestamp"`
BlobCount int `json:"blob_count"`
Blobs []string `json:"blobs"`
}{
// Get blob details including sizes
blobs := make([]BlobInfo, 0, len(blobHashes))
totalCompressedSize := int64(0)
for _, hash := range blobHashes {
blob, err := repos.Blobs.GetByHash(ctx, hash)
if err != nil {
log.Warn("Failed to get blob details", "hash", hash, "error", err)
continue
}
if blob != nil {
blobs = append(blobs, BlobInfo{
Hash: hash,
CompressedSize: blob.CompressedSize,
})
totalCompressedSize += blob.CompressedSize
}
}
// Create manifest
manifest := &Manifest{
SnapshotID: snapshotID,
Timestamp: time.Now().UTC().Format(time.RFC3339),
BlobCount: len(blobs),
TotalCompressedSize: totalCompressedSize,
Blobs: blobs,
}
// Marshal to JSON
log.Debug("Marshaling manifest to JSON")
jsonData, err := json.MarshalIndent(manifest, "", " ")
// Encode manifest
log.Debug("Encoding manifest")
compressedData, err := EncodeManifest(manifest, sm.config.CompressionLevel)
if err != nil {
return nil, fmt.Errorf("marshaling manifest: %w", err)
return nil, fmt.Errorf("encoding manifest: %w", err)
}
log.Debug("JSON manifest created", "size", len(jsonData))
// Compress only (no encryption) - manifests must be readable without private keys for pruning
log.Debug("Compressing manifest")
var compressedBuf bytes.Buffer
writer, err := zstd.NewWriter(&compressedBuf, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(sm.config.CompressionLevel)))
if err != nil {
return nil, fmt.Errorf("creating zstd writer: %w", err)
}
if _, err := writer.Write(jsonData); err != nil {
_ = writer.Close()
return nil, fmt.Errorf("writing compressed data: %w", err)
}
if err := writer.Close(); err != nil {
return nil, fmt.Errorf("closing zstd writer: %w", err)
}
log.Debug("Manifest compressed",
"original_size", len(jsonData),
"compressed_size", compressedBuf.Len())
log.Info("Generated blob manifest",
"snapshot_id", snapshotID,
"blob_count", len(blobs),
"json_size", len(jsonData),
"compressed_size", compressedBuf.Len())
"total_compressed_size", totalCompressedSize,
"manifest_size", len(compressedData))
return compressedBuf.Bytes(), nil
return compressedData, nil
}
// compressData compresses data using zstd

View File

@ -1,4 +1,4 @@
package backup
package snapshot
import (
"context"