Files
vaultik/internal/database/chunk_files_test.go
clawbot 1c72a37bc8
All checks were successful
check / check (push) Successful in 5s
Remove all ctime usage and storage (#55)
Remove all ctime from the codebase per sneak's decision on [PR #48](#48).

## Rationale

- ctime means different things on macOS (birth time) vs Linux (inode change time) — ambiguous cross-platform
- Vaultik never uses ctime operationally (scanning triggers on mtime change)
- Cannot be restored on either platform
- Write-only forensic data with no consumer

## Changes

- **Schema** (`internal/database/schema.sql`): Removed `ctime` column from `files` table
- **Model** (`internal/database/models.go`): Removed `CTime` field from `File` struct
- **Database layer** (`internal/database/files.go`): Removed ctime from all INSERT/SELECT queries, ON CONFLICT updates, and scan targets in both `scanFile` and `scanFileRows` helpers; updated `CreateBatch` accordingly
- **Scanner** (`internal/snapshot/scanner.go`): Removed `CTime: info.ModTime()` assignment in `checkFileInMemory()`
- **Tests**: Removed all `CTime` field assignments from 8 test files
- **Documentation**: Removed ctime references from `ARCHITECTURE.md` and `docs/DATAMODEL.md`

`docker build .` passes clean (lint, fmt-check, all tests).

closes #54

Co-authored-by: user <user@Mac.lan guest wan>
Reviewed-on: #55
Co-authored-by: clawbot <clawbot@noreply.example.org>
Co-committed-by: clawbot <clawbot@noreply.example.org>
2026-03-20 03:12:46 +01:00

219 lines
5.9 KiB
Go

package database
import (
"context"
"testing"
"time"
"git.eeqj.de/sneak/vaultik/internal/types"
)
func TestChunkFileRepository(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewChunkFileRepository(db)
fileRepo := NewFileRepository(db)
chunksRepo := NewChunkRepository(db)
// Create test files first
testTime := time.Now().Truncate(time.Second)
file1 := &File{
Path: "/file1.txt",
MTime: testTime,
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
LinkTarget: "",
}
err := fileRepo.Create(ctx, nil, file1)
if err != nil {
t.Fatalf("failed to create file1: %v", err)
}
file2 := &File{
Path: "/file2.txt",
MTime: testTime,
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
LinkTarget: "",
}
err = fileRepo.Create(ctx, nil, file2)
if err != nil {
t.Fatalf("failed to create file2: %v", err)
}
// Create chunk first
chunk := &Chunk{
ChunkHash: types.ChunkHash("chunk1"),
Size: 1024,
}
err = chunksRepo.Create(ctx, nil, chunk)
if err != nil {
t.Fatalf("failed to create chunk: %v", err)
}
// Test Create
cf1 := &ChunkFile{
ChunkHash: types.ChunkHash("chunk1"),
FileID: file1.ID,
FileOffset: 0,
Length: 1024,
}
err = repo.Create(ctx, nil, cf1)
if err != nil {
t.Fatalf("failed to create chunk file: %v", err)
}
// Add same chunk in different file (deduplication scenario)
cf2 := &ChunkFile{
ChunkHash: types.ChunkHash("chunk1"),
FileID: file2.ID,
FileOffset: 2048,
Length: 1024,
}
err = repo.Create(ctx, nil, cf2)
if err != nil {
t.Fatalf("failed to create second chunk file: %v", err)
}
// Test GetByChunkHash
chunkFiles, err := repo.GetByChunkHash(ctx, "chunk1")
if err != nil {
t.Fatalf("failed to get chunk files: %v", err)
}
if len(chunkFiles) != 2 {
t.Errorf("expected 2 files for chunk, got %d", len(chunkFiles))
}
// Verify both files are returned
foundFile1 := false
foundFile2 := false
for _, cf := range chunkFiles {
if cf.FileID == file1.ID && cf.FileOffset == 0 {
foundFile1 = true
}
if cf.FileID == file2.ID && cf.FileOffset == 2048 {
foundFile2 = true
}
}
if !foundFile1 || !foundFile2 {
t.Error("not all expected files found")
}
// Test GetByFileID
chunkFiles, err = repo.GetByFileID(ctx, file1.ID)
if err != nil {
t.Fatalf("failed to get chunks by file ID: %v", err)
}
if len(chunkFiles) != 1 {
t.Errorf("expected 1 chunk for file, got %d", len(chunkFiles))
}
if chunkFiles[0].ChunkHash != types.ChunkHash("chunk1") {
t.Errorf("wrong chunk hash: expected chunk1, got %s", chunkFiles[0].ChunkHash)
}
// Test duplicate insert (should be idempotent)
err = repo.Create(ctx, nil, cf1)
if err != nil {
t.Fatalf("failed to create duplicate chunk file: %v", err)
}
}
func TestChunkFileRepositoryComplexDeduplication(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewChunkFileRepository(db)
fileRepo := NewFileRepository(db)
chunksRepo := NewChunkRepository(db)
// Create test files
testTime := time.Now().Truncate(time.Second)
file1 := &File{Path: "/file1.txt", MTime: testTime, Size: 3072, Mode: 0644, UID: 1000, GID: 1000}
file2 := &File{Path: "/file2.txt", MTime: testTime, Size: 3072, Mode: 0644, UID: 1000, GID: 1000}
file3 := &File{Path: "/file3.txt", MTime: testTime, Size: 2048, Mode: 0644, UID: 1000, GID: 1000}
if err := fileRepo.Create(ctx, nil, file1); err != nil {
t.Fatalf("failed to create file1: %v", err)
}
if err := fileRepo.Create(ctx, nil, file2); err != nil {
t.Fatalf("failed to create file2: %v", err)
}
if err := fileRepo.Create(ctx, nil, file3); err != nil {
t.Fatalf("failed to create file3: %v", err)
}
// Create chunks first
chunks := []types.ChunkHash{"chunk1", "chunk2", "chunk3", "chunk4"}
for _, chunkHash := range chunks {
chunk := &Chunk{
ChunkHash: chunkHash,
Size: 1024,
}
err := chunksRepo.Create(ctx, nil, chunk)
if err != nil {
t.Fatalf("failed to create chunk %s: %v", chunkHash, err)
}
}
// Simulate a scenario where multiple files share chunks
// File1: chunk1, chunk2, chunk3
// File2: chunk2, chunk3, chunk4
// File3: chunk1, chunk4
chunkFiles := []ChunkFile{
// File1
{ChunkHash: types.ChunkHash("chunk1"), FileID: file1.ID, FileOffset: 0, Length: 1024},
{ChunkHash: types.ChunkHash("chunk2"), FileID: file1.ID, FileOffset: 1024, Length: 1024},
{ChunkHash: types.ChunkHash("chunk3"), FileID: file1.ID, FileOffset: 2048, Length: 1024},
// File2
{ChunkHash: types.ChunkHash("chunk2"), FileID: file2.ID, FileOffset: 0, Length: 1024},
{ChunkHash: types.ChunkHash("chunk3"), FileID: file2.ID, FileOffset: 1024, Length: 1024},
{ChunkHash: types.ChunkHash("chunk4"), FileID: file2.ID, FileOffset: 2048, Length: 1024},
// File3
{ChunkHash: types.ChunkHash("chunk1"), FileID: file3.ID, FileOffset: 0, Length: 1024},
{ChunkHash: types.ChunkHash("chunk4"), FileID: file3.ID, FileOffset: 1024, Length: 1024},
}
for _, cf := range chunkFiles {
err := repo.Create(ctx, nil, &cf)
if err != nil {
t.Fatalf("failed to create chunk file: %v", err)
}
}
// Test chunk1 (used by file1 and file3)
files, err := repo.GetByChunkHash(ctx, "chunk1")
if err != nil {
t.Fatalf("failed to get files for chunk1: %v", err)
}
if len(files) != 2 {
t.Errorf("expected 2 files for chunk1, got %d", len(files))
}
// Test chunk2 (used by file1 and file2)
files, err = repo.GetByChunkHash(ctx, "chunk2")
if err != nil {
t.Fatalf("failed to get files for chunk2: %v", err)
}
if len(files) != 2 {
t.Errorf("expected 2 files for chunk2, got %d", len(files))
}
// Test file2 chunks
file2Chunks, err := repo.GetByFileID(ctx, file2.ID)
if err != nil {
t.Fatalf("failed to get chunks for file2: %v", err)
}
if len(file2Chunks) != 3 {
t.Errorf("expected 3 chunks for file2, got %d", len(file2Chunks))
}
}