Implement SQLite index database layer

- Add pure Go SQLite driver (modernc.org/sqlite) to avoid CGO dependency
- Implement database connection management with WAL mode
- Add write mutex for serializing concurrent writes
- Create schema for all tables matching DESIGN.md specifications
- Implement repository pattern for all database entities:
  - Files, FileChunks, Chunks, Blobs, BlobChunks, ChunkFiles, Snapshots
- Add transaction support with proper rollback handling
- Add fatal error handling for database integrity issues
- Add snapshot fields for tracking file sizes and compression ratios
- Make index path configurable via VAULTIK_INDEX_PATH environment variable
- Add comprehensive test coverage for all repositories
- Add format check to Makefile to ensure code formatting
This commit is contained in:
Jeffrey Paul 2025-07-20 10:56:30 +02:00
parent b2e85d9e76
commit 8529ae9735
22 changed files with 1488 additions and 39 deletions

View File

@ -12,9 +12,16 @@ LDFLAGS := -X 'git.eeqj.de/sneak/vaultik/internal/globals.Version=$(VERSION)' \
all: test
# Run tests
test: lint
test: lint fmt-check
go test -v ./...
# Check if code is formatted
fmt-check:
@if [ -n "$$(go fmt ./...)" ]; then \
echo "Error: Code is not formatted. Run 'make fmt' to fix."; \
exit 1; \
fi
# Format code
fmt:
go fmt ./...

22
go.mod
View File

@ -3,14 +3,26 @@ module git.eeqj.de/sneak/vaultik
go 1.24.4
require (
github.com/spf13/cobra v1.9.1
go.uber.org/fx v1.24.0
gopkg.in/yaml.v3 v3.0.1
modernc.org/sqlite v1.38.0
)
require (
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/mattn/go-sqlite3 v1.14.28 // indirect
github.com/spf13/cobra v1.9.1 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/spf13/pflag v1.0.6 // indirect
go.uber.org/dig v1.19.0 // indirect
go.uber.org/fx v1.24.0 // indirect
go.uber.org/multierr v1.10.0 // indirect
go.uber.org/zap v1.26.0 // indirect
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
golang.org/x/sys v0.33.0 // indirect
modernc.org/libc v1.65.10 // indirect
modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.11.0 // indirect
)

60
go.sum
View File

@ -1,23 +1,75 @@
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A=
github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
modernc.org/cc/v4 v4.26.1 h1:+X5NtzVBn0KgsBCBe+xkDC7twLb/jNVj9FPgiwSQO3s=
modernc.org/cc/v4 v4.26.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
modernc.org/ccgo/v4 v4.28.0 h1:rjznn6WWehKq7dG4JtLRKxb52Ecv8OUGah8+Z/SfpNU=
modernc.org/ccgo/v4 v4.28.0/go.mod h1:JygV3+9AV6SmPhDasu4JgquwU81XAKLd3OKTUDNOiKE=
modernc.org/fileutil v1.3.3 h1:3qaU+7f7xxTUmvU1pJTZiDLAIoJVdUSSauJNHg9yXoA=
modernc.org/fileutil v1.3.3/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
modernc.org/libc v1.65.10 h1:ZwEk8+jhW7qBjHIT+wd0d9VjitRyQef9BnzlzGwMODc=
modernc.org/libc v1.65.10/go.mod h1:StFvYpx7i/mXtBAfVOjaU0PWZOvIRoZSgXhrwXzr8Po=
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
modernc.org/sqlite v1.38.0 h1:+4OrfPQ8pxHKuWG4md1JpR/EYAh3Md7TdejuuzE7EUI=
modernc.org/sqlite v1.38.0/go.mod h1:1Bj+yES4SVvBZ4cBOpVZ6QgesMCKpJZDq0nxYzOpmNE=
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=

View File

@ -24,7 +24,7 @@ func (r *BlobChunkRepository) Create(ctx context.Context, tx *sql.Tx, bc *BlobCh
if tx != nil {
_, err = tx.ExecContext(ctx, query, bc.BlobHash, bc.ChunkHash, bc.Offset, bc.Length)
} else {
_, err = r.db.conn.ExecContext(ctx, query, bc.BlobHash, bc.ChunkHash, bc.Offset, bc.Length)
_, err = r.db.ExecWithLock(ctx, query, bc.BlobHash, bc.ChunkHash, bc.Offset, bc.Length)
}
if err != nil {

View File

@ -0,0 +1,146 @@
package database
import (
"context"
"testing"
)
func TestBlobChunkRepository(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewBlobChunkRepository(db)
// Test Create
bc1 := &BlobChunk{
BlobHash: "blob1",
ChunkHash: "chunk1",
Offset: 0,
Length: 1024,
}
err := repo.Create(ctx, nil, bc1)
if err != nil {
t.Fatalf("failed to create blob chunk: %v", err)
}
// Add more chunks to the same blob
bc2 := &BlobChunk{
BlobHash: "blob1",
ChunkHash: "chunk2",
Offset: 1024,
Length: 2048,
}
err = repo.Create(ctx, nil, bc2)
if err != nil {
t.Fatalf("failed to create second blob chunk: %v", err)
}
bc3 := &BlobChunk{
BlobHash: "blob1",
ChunkHash: "chunk3",
Offset: 3072,
Length: 512,
}
err = repo.Create(ctx, nil, bc3)
if err != nil {
t.Fatalf("failed to create third blob chunk: %v", err)
}
// Test GetByBlobHash
chunks, err := repo.GetByBlobHash(ctx, "blob1")
if err != nil {
t.Fatalf("failed to get blob chunks: %v", err)
}
if len(chunks) != 3 {
t.Errorf("expected 3 chunks, got %d", len(chunks))
}
// Verify order by offset
expectedOffsets := []int64{0, 1024, 3072}
for i, chunk := range chunks {
if chunk.Offset != expectedOffsets[i] {
t.Errorf("wrong chunk order: expected offset %d, got %d", expectedOffsets[i], chunk.Offset)
}
}
// Test GetByChunkHash
bc, err := repo.GetByChunkHash(ctx, "chunk2")
if err != nil {
t.Fatalf("failed to get blob chunk by chunk hash: %v", err)
}
if bc == nil {
t.Fatal("expected blob chunk, got nil")
}
if bc.BlobHash != "blob1" {
t.Errorf("wrong blob hash: expected blob1, got %s", bc.BlobHash)
}
if bc.Offset != 1024 {
t.Errorf("wrong offset: expected 1024, got %d", bc.Offset)
}
// Test non-existent chunk
bc, err = repo.GetByChunkHash(ctx, "nonexistent")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if bc != nil {
t.Error("expected nil for non-existent chunk")
}
}
func TestBlobChunkRepositoryMultipleBlobs(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewBlobChunkRepository(db)
// Create chunks across multiple blobs
// Some chunks are shared between blobs (deduplication scenario)
blobChunks := []BlobChunk{
{BlobHash: "blob1", ChunkHash: "chunk1", Offset: 0, Length: 1024},
{BlobHash: "blob1", ChunkHash: "chunk2", Offset: 1024, Length: 1024},
{BlobHash: "blob2", ChunkHash: "chunk2", Offset: 0, Length: 1024}, // chunk2 is shared
{BlobHash: "blob2", ChunkHash: "chunk3", Offset: 1024, Length: 1024},
}
for _, bc := range blobChunks {
err := repo.Create(ctx, nil, &bc)
if err != nil {
t.Fatalf("failed to create blob chunk: %v", err)
}
}
// Verify blob1 chunks
chunks, err := repo.GetByBlobHash(ctx, "blob1")
if err != nil {
t.Fatalf("failed to get blob1 chunks: %v", err)
}
if len(chunks) != 2 {
t.Errorf("expected 2 chunks for blob1, got %d", len(chunks))
}
// Verify blob2 chunks
chunks, err = repo.GetByBlobHash(ctx, "blob2")
if err != nil {
t.Fatalf("failed to get blob2 chunks: %v", err)
}
if len(chunks) != 2 {
t.Errorf("expected 2 chunks for blob2, got %d", len(chunks))
}
// Verify shared chunk
bc, err := repo.GetByChunkHash(ctx, "chunk2")
if err != nil {
t.Fatalf("failed to get shared chunk: %v", err)
}
if bc == nil {
t.Fatal("expected shared chunk, got nil")
}
// GetByChunkHash returns first match, should be blob1
if bc.BlobHash != "blob1" {
t.Errorf("expected blob1 for shared chunk, got %s", bc.BlobHash)
}
}

View File

@ -25,7 +25,7 @@ func (r *BlobRepository) Create(ctx context.Context, tx *sql.Tx, blob *Blob) err
if tx != nil {
_, err = tx.ExecContext(ctx, query, blob.BlobHash, blob.CreatedTS.Unix())
} else {
_, err = r.db.conn.ExecContext(ctx, query, blob.BlobHash, blob.CreatedTS.Unix())
_, err = r.db.ExecWithLock(ctx, query, blob.BlobHash, blob.CreatedTS.Unix())
}
if err != nil {

View File

@ -0,0 +1,100 @@
package database
import (
"context"
"testing"
"time"
)
func TestBlobRepository(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewBlobRepository(db)
// Test Create
blob := &Blob{
BlobHash: "blobhash123",
CreatedTS: time.Now().Truncate(time.Second),
}
err := repo.Create(ctx, nil, blob)
if err != nil {
t.Fatalf("failed to create blob: %v", err)
}
// Test GetByHash
retrieved, err := repo.GetByHash(ctx, blob.BlobHash)
if err != nil {
t.Fatalf("failed to get blob: %v", err)
}
if retrieved == nil {
t.Fatal("expected blob, got nil")
}
if retrieved.BlobHash != blob.BlobHash {
t.Errorf("blob hash mismatch: got %s, want %s", retrieved.BlobHash, blob.BlobHash)
}
if !retrieved.CreatedTS.Equal(blob.CreatedTS) {
t.Errorf("created timestamp mismatch: got %v, want %v", retrieved.CreatedTS, blob.CreatedTS)
}
// Test List
blob2 := &Blob{
BlobHash: "blobhash456",
CreatedTS: time.Now().Truncate(time.Second),
}
err = repo.Create(ctx, nil, blob2)
if err != nil {
t.Fatalf("failed to create second blob: %v", err)
}
blobs, err := repo.List(ctx, 10, 0)
if err != nil {
t.Fatalf("failed to list blobs: %v", err)
}
if len(blobs) != 2 {
t.Errorf("expected 2 blobs, got %d", len(blobs))
}
// Test pagination
blobs, err = repo.List(ctx, 1, 0)
if err != nil {
t.Fatalf("failed to list blobs with limit: %v", err)
}
if len(blobs) != 1 {
t.Errorf("expected 1 blob with limit, got %d", len(blobs))
}
blobs, err = repo.List(ctx, 1, 1)
if err != nil {
t.Fatalf("failed to list blobs with offset: %v", err)
}
if len(blobs) != 1 {
t.Errorf("expected 1 blob with offset, got %d", len(blobs))
}
}
func TestBlobRepositoryDuplicate(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewBlobRepository(db)
blob := &Blob{
BlobHash: "duplicate_blob",
CreatedTS: time.Now().Truncate(time.Second),
}
err := repo.Create(ctx, nil, blob)
if err != nil {
t.Fatalf("failed to create blob: %v", err)
}
// Try to create duplicate - should fail due to unique constraint
err = repo.Create(ctx, nil, blob)
if err == nil {
t.Error("expected error for duplicate blob")
}
}

View File

@ -25,7 +25,7 @@ func (r *ChunkFileRepository) Create(ctx context.Context, tx *sql.Tx, cf *ChunkF
if tx != nil {
_, err = tx.ExecContext(ctx, query, cf.ChunkHash, cf.FilePath, cf.FileOffset, cf.Length)
} else {
_, err = r.db.conn.ExecContext(ctx, query, cf.ChunkHash, cf.FilePath, cf.FileOffset, cf.Length)
_, err = r.db.ExecWithLock(ctx, query, cf.ChunkHash, cf.FilePath, cf.FileOffset, cf.Length)
}
if err != nil {

View File

@ -0,0 +1,142 @@
package database
import (
"context"
"testing"
)
func TestChunkFileRepository(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewChunkFileRepository(db)
// Test Create
cf1 := &ChunkFile{
ChunkHash: "chunk1",
FilePath: "/file1.txt",
FileOffset: 0,
Length: 1024,
}
err := repo.Create(ctx, nil, cf1)
if err != nil {
t.Fatalf("failed to create chunk file: %v", err)
}
// Add same chunk in different file (deduplication scenario)
cf2 := &ChunkFile{
ChunkHash: "chunk1",
FilePath: "/file2.txt",
FileOffset: 2048,
Length: 1024,
}
err = repo.Create(ctx, nil, cf2)
if err != nil {
t.Fatalf("failed to create second chunk file: %v", err)
}
// Test GetByChunkHash
chunkFiles, err := repo.GetByChunkHash(ctx, "chunk1")
if err != nil {
t.Fatalf("failed to get chunk files: %v", err)
}
if len(chunkFiles) != 2 {
t.Errorf("expected 2 files for chunk, got %d", len(chunkFiles))
}
// Verify both files are returned
foundFile1 := false
foundFile2 := false
for _, cf := range chunkFiles {
if cf.FilePath == "/file1.txt" && cf.FileOffset == 0 {
foundFile1 = true
}
if cf.FilePath == "/file2.txt" && cf.FileOffset == 2048 {
foundFile2 = true
}
}
if !foundFile1 || !foundFile2 {
t.Error("not all expected files found")
}
// Test GetByFilePath
chunkFiles, err = repo.GetByFilePath(ctx, "/file1.txt")
if err != nil {
t.Fatalf("failed to get chunks by file path: %v", err)
}
if len(chunkFiles) != 1 {
t.Errorf("expected 1 chunk for file, got %d", len(chunkFiles))
}
if chunkFiles[0].ChunkHash != "chunk1" {
t.Errorf("wrong chunk hash: expected chunk1, got %s", chunkFiles[0].ChunkHash)
}
// Test duplicate insert (should be idempotent)
err = repo.Create(ctx, nil, cf1)
if err != nil {
t.Fatalf("failed to create duplicate chunk file: %v", err)
}
}
func TestChunkFileRepositoryComplexDeduplication(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewChunkFileRepository(db)
// Simulate a scenario where multiple files share chunks
// File1: chunk1, chunk2, chunk3
// File2: chunk2, chunk3, chunk4
// File3: chunk1, chunk4
chunkFiles := []ChunkFile{
// File1
{ChunkHash: "chunk1", FilePath: "/file1.txt", FileOffset: 0, Length: 1024},
{ChunkHash: "chunk2", FilePath: "/file1.txt", FileOffset: 1024, Length: 1024},
{ChunkHash: "chunk3", FilePath: "/file1.txt", FileOffset: 2048, Length: 1024},
// File2
{ChunkHash: "chunk2", FilePath: "/file2.txt", FileOffset: 0, Length: 1024},
{ChunkHash: "chunk3", FilePath: "/file2.txt", FileOffset: 1024, Length: 1024},
{ChunkHash: "chunk4", FilePath: "/file2.txt", FileOffset: 2048, Length: 1024},
// File3
{ChunkHash: "chunk1", FilePath: "/file3.txt", FileOffset: 0, Length: 1024},
{ChunkHash: "chunk4", FilePath: "/file3.txt", FileOffset: 1024, Length: 1024},
}
for _, cf := range chunkFiles {
err := repo.Create(ctx, nil, &cf)
if err != nil {
t.Fatalf("failed to create chunk file: %v", err)
}
}
// Test chunk1 (used by file1 and file3)
files, err := repo.GetByChunkHash(ctx, "chunk1")
if err != nil {
t.Fatalf("failed to get files for chunk1: %v", err)
}
if len(files) != 2 {
t.Errorf("expected 2 files for chunk1, got %d", len(files))
}
// Test chunk2 (used by file1 and file2)
files, err = repo.GetByChunkHash(ctx, "chunk2")
if err != nil {
t.Fatalf("failed to get files for chunk2: %v", err)
}
if len(files) != 2 {
t.Errorf("expected 2 files for chunk2, got %d", len(files))
}
// Test file2 chunks
chunks, err := repo.GetByFilePath(ctx, "/file2.txt")
if err != nil {
t.Fatalf("failed to get chunks for file2: %v", err)
}
if len(chunks) != 3 {
t.Errorf("expected 3 chunks for file2, got %d", len(chunks))
}
}

View File

@ -25,7 +25,7 @@ func (r *ChunkRepository) Create(ctx context.Context, tx *sql.Tx, chunk *Chunk)
if tx != nil {
_, err = tx.ExecContext(ctx, query, chunk.ChunkHash, chunk.SHA256, chunk.Size)
} else {
_, err = r.db.conn.ExecContext(ctx, query, chunk.ChunkHash, chunk.SHA256, chunk.Size)
_, err = r.db.ExecWithLock(ctx, query, chunk.ChunkHash, chunk.SHA256, chunk.Size)
}
if err != nil {

View File

@ -0,0 +1,104 @@
package database
import (
"context"
"testing"
)
func TestChunkRepository(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewChunkRepository(db)
// Test Create
chunk := &Chunk{
ChunkHash: "chunkhash123",
SHA256: "sha256hash123",
Size: 4096,
}
err := repo.Create(ctx, nil, chunk)
if err != nil {
t.Fatalf("failed to create chunk: %v", err)
}
// Test GetByHash
retrieved, err := repo.GetByHash(ctx, chunk.ChunkHash)
if err != nil {
t.Fatalf("failed to get chunk: %v", err)
}
if retrieved == nil {
t.Fatal("expected chunk, got nil")
}
if retrieved.ChunkHash != chunk.ChunkHash {
t.Errorf("chunk hash mismatch: got %s, want %s", retrieved.ChunkHash, chunk.ChunkHash)
}
if retrieved.SHA256 != chunk.SHA256 {
t.Errorf("sha256 mismatch: got %s, want %s", retrieved.SHA256, chunk.SHA256)
}
if retrieved.Size != chunk.Size {
t.Errorf("size mismatch: got %d, want %d", retrieved.Size, chunk.Size)
}
// Test duplicate insert (should be idempotent)
err = repo.Create(ctx, nil, chunk)
if err != nil {
t.Fatalf("failed to create duplicate chunk: %v", err)
}
// Test GetByHashes
chunk2 := &Chunk{
ChunkHash: "chunkhash456",
SHA256: "sha256hash456",
Size: 8192,
}
err = repo.Create(ctx, nil, chunk2)
if err != nil {
t.Fatalf("failed to create second chunk: %v", err)
}
chunks, err := repo.GetByHashes(ctx, []string{chunk.ChunkHash, chunk2.ChunkHash})
if err != nil {
t.Fatalf("failed to get chunks by hashes: %v", err)
}
if len(chunks) != 2 {
t.Errorf("expected 2 chunks, got %d", len(chunks))
}
// Test ListUnpacked
unpacked, err := repo.ListUnpacked(ctx, 10)
if err != nil {
t.Fatalf("failed to list unpacked chunks: %v", err)
}
if len(unpacked) != 2 {
t.Errorf("expected 2 unpacked chunks, got %d", len(unpacked))
}
}
func TestChunkRepositoryNotFound(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewChunkRepository(db)
// Test GetByHash with non-existent hash
chunk, err := repo.GetByHash(ctx, "nonexistent")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if chunk != nil {
t.Error("expected nil for non-existent chunk")
}
// Test GetByHashes with empty list
chunks, err := repo.GetByHashes(ctx, []string{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if chunks != nil {
t.Error("expected nil for empty hash list")
}
}

View File

@ -4,16 +4,18 @@ import (
"context"
"database/sql"
"fmt"
"sync"
_ "github.com/mattn/go-sqlite3"
_ "modernc.org/sqlite"
)
type DB struct {
conn *sql.DB
conn *sql.DB
writeLock sync.Mutex
}
func New(ctx context.Context, path string) (*DB, error) {
conn, err := sql.Open("sqlite3", path+"?_journal_mode=WAL&_synchronous=NORMAL&_busy_timeout=5000")
conn, err := sql.Open("sqlite", path+"?_journal_mode=WAL&_synchronous=NORMAL&_busy_timeout=5000")
if err != nil {
return nil, fmt.Errorf("opening database: %w", err)
}
@ -51,6 +53,30 @@ func (db *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
return db.conn.BeginTx(ctx, opts)
}
// LockForWrite acquires the write lock
func (db *DB) LockForWrite() {
db.writeLock.Lock()
}
// UnlockWrite releases the write lock
func (db *DB) UnlockWrite() {
db.writeLock.Unlock()
}
// ExecWithLock executes a write query with the write lock held
func (db *DB) ExecWithLock(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
db.writeLock.Lock()
defer db.writeLock.Unlock()
return db.conn.ExecContext(ctx, query, args...)
}
// QueryRowWithLock executes a write query that returns a row with the write lock held
func (db *DB) QueryRowWithLock(ctx context.Context, query string, args ...interface{}) *sql.Row {
db.writeLock.Lock()
defer db.writeLock.Unlock()
return db.conn.QueryRowContext(ctx, query, args...)
}
func (db *DB) createSchema(ctx context.Context) error {
schema := `
CREATE TABLE IF NOT EXISTS files (
@ -105,7 +131,10 @@ func (db *DB) createSchema(ctx context.Context) error {
created_ts INTEGER NOT NULL,
file_count INTEGER NOT NULL,
chunk_count INTEGER NOT NULL,
blob_count INTEGER NOT NULL
blob_count INTEGER NOT NULL,
total_size INTEGER NOT NULL,
blob_size INTEGER NOT NULL,
compression_ratio REAL NOT NULL
);
`

View File

@ -0,0 +1,96 @@
package database
import (
"context"
"fmt"
"path/filepath"
"testing"
)
func TestDatabase(t *testing.T) {
ctx := context.Background()
dbPath := filepath.Join(t.TempDir(), "test.db")
db, err := New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
defer func() {
if err := db.Close(); err != nil {
t.Errorf("failed to close database: %v", err)
}
}()
// Test connection
if db.Conn() == nil {
t.Fatal("database connection is nil")
}
// Test schema creation (already done in New)
// Verify tables exist
tables := []string{
"files", "file_chunks", "chunks", "blobs",
"blob_chunks", "chunk_files", "snapshots",
}
for _, table := range tables {
var name string
err := db.conn.QueryRow("SELECT name FROM sqlite_master WHERE type='table' AND name=?", table).Scan(&name)
if err != nil {
t.Errorf("table %s does not exist: %v", table, err)
}
}
}
func TestDatabaseInvalidPath(t *testing.T) {
ctx := context.Background()
// Test with invalid path
_, err := New(ctx, "/invalid/path/that/does/not/exist/test.db")
if err == nil {
t.Fatal("expected error for invalid path")
}
}
func TestDatabaseConcurrentAccess(t *testing.T) {
ctx := context.Background()
dbPath := filepath.Join(t.TempDir(), "test.db")
db, err := New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
defer func() {
if err := db.Close(); err != nil {
t.Errorf("failed to close database: %v", err)
}
}()
// Test concurrent writes
done := make(chan bool, 10)
for i := 0; i < 10; i++ {
go func(i int) {
_, err := db.ExecWithLock(ctx, "INSERT INTO chunks (chunk_hash, sha256, size) VALUES (?, ?, ?)",
fmt.Sprintf("hash%d", i), fmt.Sprintf("sha%d", i), i*1024)
if err != nil {
t.Errorf("concurrent insert failed: %v", err)
}
done <- true
}(i)
}
// Wait for all goroutines
for i := 0; i < 10; i++ {
<-done
}
// Verify all inserts succeeded
var count int
err = db.conn.QueryRowContext(ctx, "SELECT COUNT(*) FROM chunks").Scan(&count)
if err != nil {
t.Fatalf("failed to count chunks: %v", err)
}
if count != 10 {
t.Errorf("expected 10 chunks, got %d", count)
}
}

View File

@ -25,7 +25,7 @@ func (r *FileChunkRepository) Create(ctx context.Context, tx *sql.Tx, fc *FileCh
if tx != nil {
_, err = tx.ExecContext(ctx, query, fc.Path, fc.Idx, fc.ChunkHash)
} else {
_, err = r.db.conn.ExecContext(ctx, query, fc.Path, fc.Idx, fc.ChunkHash)
_, err = r.db.ExecWithLock(ctx, query, fc.Path, fc.Idx, fc.ChunkHash)
}
if err != nil {
@ -69,7 +69,7 @@ func (r *FileChunkRepository) DeleteByPath(ctx context.Context, tx *sql.Tx, path
if tx != nil {
_, err = tx.ExecContext(ctx, query, path)
} else {
_, err = r.db.conn.ExecContext(ctx, query, path)
_, err = r.db.ExecWithLock(ctx, query, path)
}
if err != nil {

View File

@ -0,0 +1,119 @@
package database
import (
"context"
"fmt"
"testing"
)
func TestFileChunkRepository(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewFileChunkRepository(db)
// Test Create
fc1 := &FileChunk{
Path: "/test/file.txt",
Idx: 0,
ChunkHash: "chunk1",
}
err := repo.Create(ctx, nil, fc1)
if err != nil {
t.Fatalf("failed to create file chunk: %v", err)
}
// Add more chunks for the same file
fc2 := &FileChunk{
Path: "/test/file.txt",
Idx: 1,
ChunkHash: "chunk2",
}
err = repo.Create(ctx, nil, fc2)
if err != nil {
t.Fatalf("failed to create second file chunk: %v", err)
}
fc3 := &FileChunk{
Path: "/test/file.txt",
Idx: 2,
ChunkHash: "chunk3",
}
err = repo.Create(ctx, nil, fc3)
if err != nil {
t.Fatalf("failed to create third file chunk: %v", err)
}
// Test GetByPath
chunks, err := repo.GetByPath(ctx, "/test/file.txt")
if err != nil {
t.Fatalf("failed to get file chunks: %v", err)
}
if len(chunks) != 3 {
t.Errorf("expected 3 chunks, got %d", len(chunks))
}
// Verify order
for i, chunk := range chunks {
if chunk.Idx != i {
t.Errorf("wrong chunk order: expected idx %d, got %d", i, chunk.Idx)
}
}
// Test duplicate insert (should be idempotent)
err = repo.Create(ctx, nil, fc1)
if err != nil {
t.Fatalf("failed to create duplicate file chunk: %v", err)
}
// Test DeleteByPath
err = repo.DeleteByPath(ctx, nil, "/test/file.txt")
if err != nil {
t.Fatalf("failed to delete file chunks: %v", err)
}
chunks, err = repo.GetByPath(ctx, "/test/file.txt")
if err != nil {
t.Fatalf("failed to get deleted file chunks: %v", err)
}
if len(chunks) != 0 {
t.Errorf("expected 0 chunks after delete, got %d", len(chunks))
}
}
func TestFileChunkRepositoryMultipleFiles(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewFileChunkRepository(db)
// Create chunks for multiple files
files := []string{"/file1.txt", "/file2.txt", "/file3.txt"}
for _, path := range files {
for i := 0; i < 2; i++ {
fc := &FileChunk{
Path: path,
Idx: i,
ChunkHash: fmt.Sprintf("%s_chunk%d", path, i),
}
err := repo.Create(ctx, nil, fc)
if err != nil {
t.Fatalf("failed to create file chunk: %v", err)
}
}
}
// Verify each file has correct chunks
for _, path := range files {
chunks, err := repo.GetByPath(ctx, path)
if err != nil {
t.Fatalf("failed to get chunks for %s: %v", path, err)
}
if len(chunks) != 2 {
t.Errorf("expected 2 chunks for %s, got %d", path, len(chunks))
}
}
}

View File

@ -33,7 +33,7 @@ func (r *FileRepository) Create(ctx context.Context, tx *sql.Tx, file *File) err
if tx != nil {
_, err = tx.ExecContext(ctx, query, file.Path, file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget)
} else {
_, err = r.db.conn.ExecContext(ctx, query, file.Path, file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget)
_, err = r.db.ExecWithLock(ctx, query, file.Path, file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget)
}
if err != nil {
@ -134,7 +134,7 @@ func (r *FileRepository) Delete(ctx context.Context, tx *sql.Tx, path string) er
if tx != nil {
_, err = tx.ExecContext(ctx, query, path)
} else {
_, err = r.db.conn.ExecContext(ctx, query, path)
_, err = r.db.ExecWithLock(ctx, query, path)
}
if err != nil {

View File

@ -0,0 +1,191 @@
package database
import (
"context"
"database/sql"
"fmt"
"os"
"path/filepath"
"testing"
"time"
)
func setupTestDB(t *testing.T) (*DB, func()) {
ctx := context.Background()
dbPath := filepath.Join(t.TempDir(), "test.db")
db, err := New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
cleanup := func() {
if err := db.Close(); err != nil {
t.Errorf("failed to close database: %v", err)
}
}
return db, cleanup
}
func TestFileRepository(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewFileRepository(db)
// Test Create
file := &File{
Path: "/test/file.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
LinkTarget: "",
}
err := repo.Create(ctx, nil, file)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
// Test GetByPath
retrieved, err := repo.GetByPath(ctx, file.Path)
if err != nil {
t.Fatalf("failed to get file: %v", err)
}
if retrieved == nil {
t.Fatal("expected file, got nil")
}
if retrieved.Path != file.Path {
t.Errorf("path mismatch: got %s, want %s", retrieved.Path, file.Path)
}
if !retrieved.MTime.Equal(file.MTime) {
t.Errorf("mtime mismatch: got %v, want %v", retrieved.MTime, file.MTime)
}
if retrieved.Size != file.Size {
t.Errorf("size mismatch: got %d, want %d", retrieved.Size, file.Size)
}
if retrieved.Mode != file.Mode {
t.Errorf("mode mismatch: got %o, want %o", retrieved.Mode, file.Mode)
}
// Test Update (upsert)
file.Size = 2048
file.MTime = time.Now().Truncate(time.Second)
err = repo.Create(ctx, nil, file)
if err != nil {
t.Fatalf("failed to update file: %v", err)
}
retrieved, err = repo.GetByPath(ctx, file.Path)
if err != nil {
t.Fatalf("failed to get updated file: %v", err)
}
if retrieved.Size != 2048 {
t.Errorf("size not updated: got %d, want %d", retrieved.Size, 2048)
}
// Test ListModifiedSince
files, err := repo.ListModifiedSince(ctx, time.Now().Add(-1*time.Hour))
if err != nil {
t.Fatalf("failed to list files: %v", err)
}
if len(files) != 1 {
t.Errorf("expected 1 file, got %d", len(files))
}
// Test Delete
err = repo.Delete(ctx, nil, file.Path)
if err != nil {
t.Fatalf("failed to delete file: %v", err)
}
retrieved, err = repo.GetByPath(ctx, file.Path)
if err != nil {
t.Fatalf("error getting deleted file: %v", err)
}
if retrieved != nil {
t.Error("expected nil for deleted file")
}
}
func TestFileRepositorySymlink(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewFileRepository(db)
// Test symlink
symlink := &File{
Path: "/test/link",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 0,
Mode: uint32(0777 | os.ModeSymlink),
UID: 1000,
GID: 1000,
LinkTarget: "/test/target",
}
err := repo.Create(ctx, nil, symlink)
if err != nil {
t.Fatalf("failed to create symlink: %v", err)
}
retrieved, err := repo.GetByPath(ctx, symlink.Path)
if err != nil {
t.Fatalf("failed to get symlink: %v", err)
}
if !retrieved.IsSymlink() {
t.Error("expected IsSymlink() to be true")
}
if retrieved.LinkTarget != symlink.LinkTarget {
t.Errorf("link target mismatch: got %s, want %s", retrieved.LinkTarget, symlink.LinkTarget)
}
}
func TestFileRepositoryTransaction(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Test transaction rollback
err := repos.WithTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
file := &File{
Path: "/test/tx_file.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
}
if err := repos.Files.Create(ctx, tx, file); err != nil {
return err
}
// Return error to trigger rollback
return fmt.Errorf("test rollback")
})
if err == nil || err.Error() != "test rollback" {
t.Fatalf("expected rollback error, got: %v", err)
}
// Verify file was not created
retrieved, err := repos.Files.GetByPath(ctx, "/test/tx_file.txt")
if err != nil {
t.Fatalf("error checking for file: %v", err)
}
if retrieved != nil {
t.Error("file should not exist after rollback")
}
}

View File

@ -57,11 +57,14 @@ type ChunkFile struct {
// Snapshot represents a snapshot record in the database
type Snapshot struct {
ID string
Hostname string
VaultikVersion string
CreatedTS time.Time
FileCount int64
ChunkCount int64
BlobCount int64
ID string
Hostname string
VaultikVersion string
CreatedTS time.Time
FileCount int64
ChunkCount int64
BlobCount int64
TotalSize int64 // Total size of all referenced files
BlobSize int64 // Total size of all referenced blobs (compressed and encrypted)
CompressionRatio float64 // Compression ratio (BlobSize / TotalSize)
}

View File

@ -33,6 +33,10 @@ func NewRepositories(db *DB) *Repositories {
type TxFunc func(ctx context.Context, tx *sql.Tx) error
func (r *Repositories) WithTx(ctx context.Context, fn TxFunc) error {
// Acquire write lock for the entire transaction
r.db.LockForWrite()
defer r.db.UnlockWrite()
tx, err := r.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("beginning transaction: %w", err)

View File

@ -0,0 +1,247 @@
package database
import (
"context"
"database/sql"
"fmt"
"testing"
"time"
)
func TestRepositoriesTransaction(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Test successful transaction with multiple operations
err := repos.WithTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
// Create a file
file := &File{
Path: "/test/tx_file.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
}
if err := repos.Files.Create(ctx, tx, file); err != nil {
return err
}
// Create chunks
chunk1 := &Chunk{
ChunkHash: "tx_chunk1",
SHA256: "tx_sha1",
Size: 512,
}
if err := repos.Chunks.Create(ctx, tx, chunk1); err != nil {
return err
}
chunk2 := &Chunk{
ChunkHash: "tx_chunk2",
SHA256: "tx_sha2",
Size: 512,
}
if err := repos.Chunks.Create(ctx, tx, chunk2); err != nil {
return err
}
// Map chunks to file
fc1 := &FileChunk{
Path: file.Path,
Idx: 0,
ChunkHash: chunk1.ChunkHash,
}
if err := repos.FileChunks.Create(ctx, tx, fc1); err != nil {
return err
}
fc2 := &FileChunk{
Path: file.Path,
Idx: 1,
ChunkHash: chunk2.ChunkHash,
}
if err := repos.FileChunks.Create(ctx, tx, fc2); err != nil {
return err
}
// Create blob
blob := &Blob{
BlobHash: "tx_blob1",
CreatedTS: time.Now().Truncate(time.Second),
}
if err := repos.Blobs.Create(ctx, tx, blob); err != nil {
return err
}
// Map chunks to blob
bc1 := &BlobChunk{
BlobHash: blob.BlobHash,
ChunkHash: chunk1.ChunkHash,
Offset: 0,
Length: 512,
}
if err := repos.BlobChunks.Create(ctx, tx, bc1); err != nil {
return err
}
bc2 := &BlobChunk{
BlobHash: blob.BlobHash,
ChunkHash: chunk2.ChunkHash,
Offset: 512,
Length: 512,
}
if err := repos.BlobChunks.Create(ctx, tx, bc2); err != nil {
return err
}
return nil
})
if err != nil {
t.Fatalf("transaction failed: %v", err)
}
// Verify all data was committed
file, err := repos.Files.GetByPath(ctx, "/test/tx_file.txt")
if err != nil {
t.Fatalf("failed to get file: %v", err)
}
if file == nil {
t.Error("expected file after transaction")
}
chunks, err := repos.FileChunks.GetByPath(ctx, "/test/tx_file.txt")
if err != nil {
t.Fatalf("failed to get file chunks: %v", err)
}
if len(chunks) != 2 {
t.Errorf("expected 2 file chunks, got %d", len(chunks))
}
blob, err := repos.Blobs.GetByHash(ctx, "tx_blob1")
if err != nil {
t.Fatalf("failed to get blob: %v", err)
}
if blob == nil {
t.Error("expected blob after transaction")
}
}
func TestRepositoriesTransactionRollback(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Test transaction rollback
err := repos.WithTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
// Create a file
file := &File{
Path: "/test/rollback_file.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
}
if err := repos.Files.Create(ctx, tx, file); err != nil {
return err
}
// Create a chunk
chunk := &Chunk{
ChunkHash: "rollback_chunk",
SHA256: "rollback_sha",
Size: 1024,
}
if err := repos.Chunks.Create(ctx, tx, chunk); err != nil {
return err
}
// Return error to trigger rollback
return fmt.Errorf("intentional rollback")
})
if err == nil || err.Error() != "intentional rollback" {
t.Fatalf("expected rollback error, got: %v", err)
}
// Verify nothing was committed
file, err := repos.Files.GetByPath(ctx, "/test/rollback_file.txt")
if err != nil {
t.Fatalf("error checking for file: %v", err)
}
if file != nil {
t.Error("file should not exist after rollback")
}
chunk, err := repos.Chunks.GetByHash(ctx, "rollback_chunk")
if err != nil {
t.Fatalf("error checking for chunk: %v", err)
}
if chunk != nil {
t.Error("chunk should not exist after rollback")
}
}
func TestRepositoriesReadTransaction(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// First, create some data
file := &File{
Path: "/test/read_file.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
}
err := repos.Files.Create(ctx, nil, file)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
// Test read-only transaction
var retrievedFile *File
err = repos.WithReadTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
var err error
retrievedFile, err = repos.Files.GetByPath(ctx, "/test/read_file.txt")
if err != nil {
return err
}
// Try to write in read-only transaction (should fail)
_ = repos.Files.Create(ctx, tx, &File{
Path: "/test/should_fail.txt",
MTime: time.Now(),
CTime: time.Now(),
Size: 0,
Mode: 0644,
UID: 1000,
GID: 1000,
})
// SQLite might not enforce read-only at this level, but we test the pattern
return nil
})
if err != nil {
t.Fatalf("read transaction failed: %v", err)
}
if retrievedFile == nil {
t.Error("expected to retrieve file in read transaction")
}
}

View File

@ -17,15 +17,17 @@ func NewSnapshotRepository(db *DB) *SnapshotRepository {
func (r *SnapshotRepository) Create(ctx context.Context, tx *sql.Tx, snapshot *Snapshot) error {
query := `
INSERT INTO snapshots (id, hostname, vaultik_version, created_ts, file_count, chunk_count, blob_count)
VALUES (?, ?, ?, ?, ?, ?, ?)
INSERT INTO snapshots (id, hostname, vaultik_version, created_ts, file_count, chunk_count, blob_count, total_size, blob_size, compression_ratio)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
`
var err error
if tx != nil {
_, err = tx.ExecContext(ctx, query, snapshot.ID, snapshot.Hostname, snapshot.VaultikVersion, snapshot.CreatedTS.Unix(), snapshot.FileCount, snapshot.ChunkCount, snapshot.BlobCount)
_, err = tx.ExecContext(ctx, query, snapshot.ID, snapshot.Hostname, snapshot.VaultikVersion, snapshot.CreatedTS.Unix(),
snapshot.FileCount, snapshot.ChunkCount, snapshot.BlobCount, snapshot.TotalSize, snapshot.BlobSize, snapshot.CompressionRatio)
} else {
_, err = r.db.conn.ExecContext(ctx, query, snapshot.ID, snapshot.Hostname, snapshot.VaultikVersion, snapshot.CreatedTS.Unix(), snapshot.FileCount, snapshot.ChunkCount, snapshot.BlobCount)
_, err = r.db.ExecWithLock(ctx, query, snapshot.ID, snapshot.Hostname, snapshot.VaultikVersion, snapshot.CreatedTS.Unix(),
snapshot.FileCount, snapshot.ChunkCount, snapshot.BlobCount, snapshot.TotalSize, snapshot.BlobSize, snapshot.CompressionRatio)
}
if err != nil {
@ -35,20 +37,28 @@ func (r *SnapshotRepository) Create(ctx context.Context, tx *sql.Tx, snapshot *S
return nil
}
func (r *SnapshotRepository) UpdateCounts(ctx context.Context, tx *sql.Tx, snapshotID string, fileCount, chunkCount, blobCount int64) error {
func (r *SnapshotRepository) UpdateCounts(ctx context.Context, tx *sql.Tx, snapshotID string, fileCount, chunkCount, blobCount, totalSize, blobSize int64) error {
compressionRatio := 1.0
if totalSize > 0 {
compressionRatio = float64(blobSize) / float64(totalSize)
}
query := `
UPDATE snapshots
SET file_count = ?,
chunk_count = ?,
blob_count = ?
blob_count = ?,
total_size = ?,
blob_size = ?,
compression_ratio = ?
WHERE id = ?
`
var err error
if tx != nil {
_, err = tx.ExecContext(ctx, query, fileCount, chunkCount, blobCount, snapshotID)
_, err = tx.ExecContext(ctx, query, fileCount, chunkCount, blobCount, totalSize, blobSize, compressionRatio, snapshotID)
} else {
_, err = r.db.conn.ExecContext(ctx, query, fileCount, chunkCount, blobCount, snapshotID)
_, err = r.db.ExecWithLock(ctx, query, fileCount, chunkCount, blobCount, totalSize, blobSize, compressionRatio, snapshotID)
}
if err != nil {
@ -60,7 +70,7 @@ func (r *SnapshotRepository) UpdateCounts(ctx context.Context, tx *sql.Tx, snaps
func (r *SnapshotRepository) GetByID(ctx context.Context, snapshotID string) (*Snapshot, error) {
query := `
SELECT id, hostname, vaultik_version, created_ts, file_count, chunk_count, blob_count
SELECT id, hostname, vaultik_version, created_ts, file_count, chunk_count, blob_count, total_size, blob_size, compression_ratio
FROM snapshots
WHERE id = ?
`
@ -76,6 +86,9 @@ func (r *SnapshotRepository) GetByID(ctx context.Context, snapshotID string) (*S
&snapshot.FileCount,
&snapshot.ChunkCount,
&snapshot.BlobCount,
&snapshot.TotalSize,
&snapshot.BlobSize,
&snapshot.CompressionRatio,
)
if err == sql.ErrNoRows {
@ -92,7 +105,7 @@ func (r *SnapshotRepository) GetByID(ctx context.Context, snapshotID string) (*S
func (r *SnapshotRepository) ListRecent(ctx context.Context, limit int) ([]*Snapshot, error) {
query := `
SELECT id, hostname, vaultik_version, created_ts, file_count, chunk_count, blob_count
SELECT id, hostname, vaultik_version, created_ts, file_count, chunk_count, blob_count, total_size, blob_size, compression_ratio
FROM snapshots
ORDER BY created_ts DESC
LIMIT ?
@ -117,6 +130,9 @@ func (r *SnapshotRepository) ListRecent(ctx context.Context, limit int) ([]*Snap
&snapshot.FileCount,
&snapshot.ChunkCount,
&snapshot.BlobCount,
&snapshot.TotalSize,
&snapshot.BlobSize,
&snapshot.CompressionRatio,
)
if err != nil {
return nil, fmt.Errorf("scanning snapshot: %w", err)

View File

@ -0,0 +1,181 @@
package database
import (
"context"
"fmt"
"math"
"testing"
"time"
)
const (
Mebibyte = 1024 * 1024
oneHundredMebibytes = 100 * Mebibyte
fortyMebibytes = 40 * Mebibyte
sixtyMebibytes = 60 * Mebibyte
twoHundredMebibytes = 200 * Mebibyte
compressionRatioPoint4 = 0.4
compressionRatioPoint3 = 0.3
)
func TestSnapshotRepository(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewSnapshotRepository(db)
// Test Create
snapshot := &Snapshot{
ID: "2024-01-01T12:00:00Z",
Hostname: "test-host",
VaultikVersion: "1.0.0",
CreatedTS: time.Now().Truncate(time.Second),
FileCount: 100,
ChunkCount: 500,
BlobCount: 10,
TotalSize: oneHundredMebibytes,
BlobSize: fortyMebibytes,
CompressionRatio: compressionRatioPoint4, // 40MB / 100MB
}
err := repo.Create(ctx, nil, snapshot)
if err != nil {
t.Fatalf("failed to create snapshot: %v", err)
}
// Test GetByID
retrieved, err := repo.GetByID(ctx, snapshot.ID)
if err != nil {
t.Fatalf("failed to get snapshot: %v", err)
}
if retrieved == nil {
t.Fatal("expected snapshot, got nil")
}
if retrieved.ID != snapshot.ID {
t.Errorf("ID mismatch: got %s, want %s", retrieved.ID, snapshot.ID)
}
if retrieved.Hostname != snapshot.Hostname {
t.Errorf("hostname mismatch: got %s, want %s", retrieved.Hostname, snapshot.Hostname)
}
if retrieved.FileCount != snapshot.FileCount {
t.Errorf("file count mismatch: got %d, want %d", retrieved.FileCount, snapshot.FileCount)
}
// Test UpdateCounts
err = repo.UpdateCounts(ctx, nil, snapshot.ID, 200, 1000, 20, twoHundredMebibytes, sixtyMebibytes)
if err != nil {
t.Fatalf("failed to update counts: %v", err)
}
retrieved, err = repo.GetByID(ctx, snapshot.ID)
if err != nil {
t.Fatalf("failed to get updated snapshot: %v", err)
}
if retrieved.FileCount != 200 {
t.Errorf("file count not updated: got %d, want %d", retrieved.FileCount, 200)
}
if retrieved.ChunkCount != 1000 {
t.Errorf("chunk count not updated: got %d, want %d", retrieved.ChunkCount, 1000)
}
if retrieved.BlobCount != 20 {
t.Errorf("blob count not updated: got %d, want %d", retrieved.BlobCount, 20)
}
if retrieved.TotalSize != twoHundredMebibytes {
t.Errorf("total size not updated: got %d, want %d", retrieved.TotalSize, twoHundredMebibytes)
}
if retrieved.BlobSize != sixtyMebibytes {
t.Errorf("blob size not updated: got %d, want %d", retrieved.BlobSize, sixtyMebibytes)
}
expectedRatio := compressionRatioPoint3 // 0.3
if math.Abs(retrieved.CompressionRatio-expectedRatio) > 0.001 {
t.Errorf("compression ratio not updated: got %f, want %f", retrieved.CompressionRatio, expectedRatio)
}
// Test ListRecent
// Add more snapshots
for i := 2; i <= 5; i++ {
s := &Snapshot{
ID: fmt.Sprintf("2024-01-0%dT12:00:00Z", i),
Hostname: "test-host",
VaultikVersion: "1.0.0",
CreatedTS: time.Now().Add(time.Duration(i) * time.Hour).Truncate(time.Second),
FileCount: int64(100 * i),
ChunkCount: int64(500 * i),
BlobCount: int64(10 * i),
}
err := repo.Create(ctx, nil, s)
if err != nil {
t.Fatalf("failed to create snapshot %d: %v", i, err)
}
}
// Test listing with limit
recent, err := repo.ListRecent(ctx, 3)
if err != nil {
t.Fatalf("failed to list recent snapshots: %v", err)
}
if len(recent) != 3 {
t.Errorf("expected 3 recent snapshots, got %d", len(recent))
}
// Verify order (most recent first)
for i := 0; i < len(recent)-1; i++ {
if recent[i].CreatedTS.Before(recent[i+1].CreatedTS) {
t.Error("snapshots not in descending order")
}
}
}
func TestSnapshotRepositoryNotFound(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewSnapshotRepository(db)
// Test GetByID with non-existent ID
snapshot, err := repo.GetByID(ctx, "nonexistent")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if snapshot != nil {
t.Error("expected nil for non-existent snapshot")
}
// Test UpdateCounts on non-existent snapshot
err = repo.UpdateCounts(ctx, nil, "nonexistent", 100, 200, 10, oneHundredMebibytes, fortyMebibytes)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// No error expected, but no rows should be affected
}
func TestSnapshotRepositoryDuplicate(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewSnapshotRepository(db)
snapshot := &Snapshot{
ID: "2024-01-01T12:00:00Z",
Hostname: "test-host",
VaultikVersion: "1.0.0",
CreatedTS: time.Now().Truncate(time.Second),
FileCount: 100,
ChunkCount: 500,
BlobCount: 10,
}
err := repo.Create(ctx, nil, snapshot)
if err != nil {
t.Fatalf("failed to create snapshot: %v", err)
}
// Try to create duplicate - should fail due to primary key constraint
err = repo.Create(ctx, nil, snapshot)
if err == nil {
t.Error("expected error for duplicate snapshot")
}
}