vaultik/internal/blobgen/writer.go
sneak d3afa65420 Fix foreign key constraints and improve snapshot tracking
- Add unified compression/encryption package in internal/blobgen
- Update DATAMODEL.md to reflect current schema implementation
- Refactor snapshot cleanup into well-named methods for clarity
- Add snapshot_id to uploads table to track new blobs per snapshot
- Fix blob count reporting for incremental backups
- Add DeleteOrphaned method to BlobChunkRepository
- Fix cleanup order to respect foreign key constraints
- Update tests to reflect schema changes
2025-07-26 02:22:25 +02:00

113 lines
3.0 KiB
Go

package blobgen
import (
"crypto/sha256"
"fmt"
"hash"
"io"
"filippo.io/age"
"github.com/klauspost/compress/zstd"
)
// Writer wraps compression and encryption with SHA256 hashing
type Writer struct {
writer io.Writer // Final destination
compressor *zstd.Encoder // Compression layer
encryptor io.WriteCloser // Encryption layer
hasher hash.Hash // SHA256 hasher
teeWriter io.Writer // Tees data to hasher
compressionLevel int
bytesWritten int64
}
// NewWriter creates a new Writer that compresses, encrypts, and hashes data
func NewWriter(w io.Writer, compressionLevel int, recipients []string) (*Writer, error) {
// Validate compression level
if err := validateCompressionLevel(compressionLevel); err != nil {
return nil, err
}
// Create SHA256 hasher
hasher := sha256.New()
// Parse recipients
var ageRecipients []age.Recipient
for _, recipient := range recipients {
r, err := age.ParseX25519Recipient(recipient)
if err != nil {
return nil, fmt.Errorf("parsing recipient %s: %w", recipient, err)
}
ageRecipients = append(ageRecipients, r)
}
// Create encryption writer
encWriter, err := age.Encrypt(w, ageRecipients...)
if err != nil {
return nil, fmt.Errorf("creating encryption writer: %w", err)
}
// Create compression writer with encryption as destination
compressor, err := zstd.NewWriter(encWriter,
zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(compressionLevel)),
zstd.WithEncoderConcurrency(1), // Use single thread for streaming
)
if err != nil {
_ = encWriter.Close()
return nil, fmt.Errorf("creating compression writer: %w", err)
}
// Create tee writer that writes to both compressor and hasher
teeWriter := io.MultiWriter(compressor, hasher)
return &Writer{
writer: w,
compressor: compressor,
encryptor: encWriter,
hasher: hasher,
teeWriter: teeWriter,
compressionLevel: compressionLevel,
}, nil
}
// Write implements io.Writer
func (w *Writer) Write(p []byte) (n int, err error) {
n, err = w.teeWriter.Write(p)
w.bytesWritten += int64(n)
return n, err
}
// Close closes all layers and returns any errors
func (w *Writer) Close() error {
// Close compressor first
if err := w.compressor.Close(); err != nil {
return fmt.Errorf("closing compressor: %w", err)
}
// Then close encryptor
if err := w.encryptor.Close(); err != nil {
return fmt.Errorf("closing encryptor: %w", err)
}
return nil
}
// Sum256 returns the SHA256 hash of all data written
func (w *Writer) Sum256() []byte {
return w.hasher.Sum(nil)
}
// BytesWritten returns the number of uncompressed bytes written
func (w *Writer) BytesWritten() int64 {
return w.bytesWritten
}
func validateCompressionLevel(level int) error {
// Zstd compression levels: 1-19 (default is 3)
// SpeedFastest = 1, SpeedDefault = 3, SpeedBetterCompression = 7, SpeedBestCompression = 11
if level < 1 || level > 19 {
return fmt.Errorf("invalid compression level %d: must be between 1 and 19", level)
}
return nil
}