Files
vaultik/internal/database/file_chunks_test.go
clawbot 1c72a37bc8
All checks were successful
check / check (push) Successful in 5s
Remove all ctime usage and storage (#55)
Remove all ctime from the codebase per sneak's decision on [PR #48](#48).

## Rationale

- ctime means different things on macOS (birth time) vs Linux (inode change time) — ambiguous cross-platform
- Vaultik never uses ctime operationally (scanning triggers on mtime change)
- Cannot be restored on either platform
- Write-only forensic data with no consumer

## Changes

- **Schema** (`internal/database/schema.sql`): Removed `ctime` column from `files` table
- **Model** (`internal/database/models.go`): Removed `CTime` field from `File` struct
- **Database layer** (`internal/database/files.go`): Removed ctime from all INSERT/SELECT queries, ON CONFLICT updates, and scan targets in both `scanFile` and `scanFileRows` helpers; updated `CreateBatch` accordingly
- **Scanner** (`internal/snapshot/scanner.go`): Removed `CTime: info.ModTime()` assignment in `checkFileInMemory()`
- **Tests**: Removed all `CTime` field assignments from 8 test files
- **Documentation**: Removed ctime references from `ARCHITECTURE.md` and `docs/DATAMODEL.md`

`docker build .` passes clean (lint, fmt-check, all tests).

closes #54

Co-authored-by: user <user@Mac.lan guest wan>
Reviewed-on: #55
Co-authored-by: clawbot <clawbot@noreply.example.org>
Co-committed-by: clawbot <clawbot@noreply.example.org>
2026-03-20 03:12:46 +01:00

192 lines
4.3 KiB
Go

package database
import (
"context"
"fmt"
"testing"
"time"
"git.eeqj.de/sneak/vaultik/internal/types"
)
func TestFileChunkRepository(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewFileChunkRepository(db)
fileRepo := NewFileRepository(db)
// Create test file first
testTime := time.Now().Truncate(time.Second)
file := &File{
Path: "/test/file.txt",
MTime: testTime,
Size: 3072,
Mode: 0644,
UID: 1000,
GID: 1000,
LinkTarget: "",
}
err := fileRepo.Create(ctx, nil, file)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
// Create chunks first
chunks := []types.ChunkHash{"chunk1", "chunk2", "chunk3"}
chunkRepo := NewChunkRepository(db)
for _, chunkHash := range chunks {
chunk := &Chunk{
ChunkHash: chunkHash,
Size: 1024,
}
err = chunkRepo.Create(ctx, nil, chunk)
if err != nil {
t.Fatalf("failed to create chunk %s: %v", chunkHash, err)
}
}
// Test Create
fc1 := &FileChunk{
FileID: file.ID,
Idx: 0,
ChunkHash: types.ChunkHash("chunk1"),
}
err = repo.Create(ctx, nil, fc1)
if err != nil {
t.Fatalf("failed to create file chunk: %v", err)
}
// Add more chunks for the same file
fc2 := &FileChunk{
FileID: file.ID,
Idx: 1,
ChunkHash: types.ChunkHash("chunk2"),
}
err = repo.Create(ctx, nil, fc2)
if err != nil {
t.Fatalf("failed to create second file chunk: %v", err)
}
fc3 := &FileChunk{
FileID: file.ID,
Idx: 2,
ChunkHash: types.ChunkHash("chunk3"),
}
err = repo.Create(ctx, nil, fc3)
if err != nil {
t.Fatalf("failed to create third file chunk: %v", err)
}
// Test GetByFile
fileChunks, err := repo.GetByFile(ctx, "/test/file.txt")
if err != nil {
t.Fatalf("failed to get file chunks: %v", err)
}
if len(fileChunks) != 3 {
t.Errorf("expected 3 chunks, got %d", len(fileChunks))
}
// Verify order
for i, chunk := range fileChunks {
if chunk.Idx != i {
t.Errorf("wrong chunk order: expected idx %d, got %d", i, chunk.Idx)
}
}
// Test duplicate insert (should be idempotent)
err = repo.Create(ctx, nil, fc1)
if err != nil {
t.Fatalf("failed to create duplicate file chunk: %v", err)
}
// Test DeleteByFileID
err = repo.DeleteByFileID(ctx, nil, file.ID)
if err != nil {
t.Fatalf("failed to delete file chunks: %v", err)
}
fileChunks, err = repo.GetByFileID(ctx, file.ID)
if err != nil {
t.Fatalf("failed to get deleted file chunks: %v", err)
}
if len(fileChunks) != 0 {
t.Errorf("expected 0 chunks after delete, got %d", len(fileChunks))
}
}
func TestFileChunkRepositoryMultipleFiles(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewFileChunkRepository(db)
fileRepo := NewFileRepository(db)
// Create test files
testTime := time.Now().Truncate(time.Second)
filePaths := []string{"/file1.txt", "/file2.txt", "/file3.txt"}
files := make([]*File, len(filePaths))
for i, path := range filePaths {
file := &File{
Path: types.FilePath(path),
MTime: testTime,
Size: 2048,
Mode: 0644,
UID: 1000,
GID: 1000,
LinkTarget: "",
}
err := fileRepo.Create(ctx, nil, file)
if err != nil {
t.Fatalf("failed to create file %s: %v", path, err)
}
files[i] = file
}
// Create all chunks first
chunkRepo := NewChunkRepository(db)
for i := range files {
for j := 0; j < 2; j++ {
chunkHash := types.ChunkHash(fmt.Sprintf("file%d_chunk%d", i, j))
chunk := &Chunk{
ChunkHash: chunkHash,
Size: 1024,
}
err := chunkRepo.Create(ctx, nil, chunk)
if err != nil {
t.Fatalf("failed to create chunk %s: %v", chunkHash, err)
}
}
}
// Create chunks for multiple files
for i, file := range files {
for j := 0; j < 2; j++ {
fc := &FileChunk{
FileID: file.ID,
Idx: j,
ChunkHash: types.ChunkHash(fmt.Sprintf("file%d_chunk%d", i, j)),
}
err := repo.Create(ctx, nil, fc)
if err != nil {
t.Fatalf("failed to create file chunk: %v", err)
}
}
}
// Verify each file has correct chunks
for i, file := range files {
chunks, err := repo.GetByFileID(ctx, file.ID)
if err != nil {
t.Fatalf("failed to get chunks for file %d: %v", i, err)
}
if len(chunks) != 2 {
t.Errorf("expected 2 chunks for file %d, got %d", i, len(chunks))
}
}
}