vaultik/internal/database/repository_comprehensive_test.go
sneak d3afa65420 Fix foreign key constraints and improve snapshot tracking
- Add unified compression/encryption package in internal/blobgen
- Update DATAMODEL.md to reflect current schema implementation
- Refactor snapshot cleanup into well-named methods for clarity
- Add snapshot_id to uploads table to track new blobs per snapshot
- Fix blob count reporting for incremental backups
- Add DeleteOrphaned method to BlobChunkRepository
- Fix cleanup order to respect foreign key constraints
- Update tests to reflect schema changes
2025-07-26 02:22:25 +02:00

872 lines
21 KiB
Go

package database
import (
"context"
"database/sql"
"fmt"
"testing"
"time"
)
// TestFileRepositoryUUIDGeneration tests that files get unique UUIDs
func TestFileRepositoryUUIDGeneration(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewFileRepository(db)
// Create multiple files
files := []*File{
{
Path: "/file1.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
},
{
Path: "/file2.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 2048,
Mode: 0644,
UID: 1000,
GID: 1000,
},
}
uuids := make(map[string]bool)
for _, file := range files {
err := repo.Create(ctx, nil, file)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
// Check UUID was generated
if file.ID == "" {
t.Error("file ID was not generated")
}
// Check UUID is unique
if uuids[file.ID] {
t.Errorf("duplicate UUID generated: %s", file.ID)
}
uuids[file.ID] = true
}
}
// TestFileRepositoryGetByID tests retrieving files by UUID
func TestFileRepositoryGetByID(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewFileRepository(db)
// Create a file
file := &File{
Path: "/test.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
}
err := repo.Create(ctx, nil, file)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
// Retrieve by ID
retrieved, err := repo.GetByID(ctx, file.ID)
if err != nil {
t.Fatalf("failed to get file by ID: %v", err)
}
if retrieved.ID != file.ID {
t.Errorf("ID mismatch: expected %s, got %s", file.ID, retrieved.ID)
}
if retrieved.Path != file.Path {
t.Errorf("Path mismatch: expected %s, got %s", file.Path, retrieved.Path)
}
// Test non-existent ID
nonExistent, err := repo.GetByID(ctx, "non-existent-uuid")
if err != nil {
t.Fatalf("GetByID should not return error for non-existent ID: %v", err)
}
if nonExistent != nil {
t.Error("expected nil for non-existent ID")
}
}
// TestOrphanedFileCleanup tests the cleanup of orphaned files
func TestOrphanedFileCleanup(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Create files
file1 := &File{
Path: "/orphaned.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
}
file2 := &File{
Path: "/referenced.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 2048,
Mode: 0644,
UID: 1000,
GID: 1000,
}
err := repos.Files.Create(ctx, nil, file1)
if err != nil {
t.Fatalf("failed to create file1: %v", err)
}
err = repos.Files.Create(ctx, nil, file2)
if err != nil {
t.Fatalf("failed to create file2: %v", err)
}
// Create a snapshot and reference only file2
snapshot := &Snapshot{
ID: "test-snapshot",
Hostname: "test-host",
StartedAt: time.Now(),
}
err = repos.Snapshots.Create(ctx, nil, snapshot)
if err != nil {
t.Fatalf("failed to create snapshot: %v", err)
}
// Add file2 to snapshot
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot.ID, file2.ID)
if err != nil {
t.Fatalf("failed to add file to snapshot: %v", err)
}
// Run orphaned cleanup
err = repos.Files.DeleteOrphaned(ctx)
if err != nil {
t.Fatalf("failed to delete orphaned files: %v", err)
}
// Check that orphaned file is gone
orphanedFile, err := repos.Files.GetByID(ctx, file1.ID)
if err != nil {
t.Fatalf("error getting file: %v", err)
}
if orphanedFile != nil {
t.Error("orphaned file should have been deleted")
}
// Check that referenced file still exists
referencedFile, err := repos.Files.GetByID(ctx, file2.ID)
if err != nil {
t.Fatalf("error getting file: %v", err)
}
if referencedFile == nil {
t.Error("referenced file should not have been deleted")
}
}
// TestOrphanedChunkCleanup tests the cleanup of orphaned chunks
func TestOrphanedChunkCleanup(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Create chunks
chunk1 := &Chunk{
ChunkHash: "orphaned-chunk",
Size: 1024,
}
chunk2 := &Chunk{
ChunkHash: "referenced-chunk",
Size: 1024,
}
err := repos.Chunks.Create(ctx, nil, chunk1)
if err != nil {
t.Fatalf("failed to create chunk1: %v", err)
}
err = repos.Chunks.Create(ctx, nil, chunk2)
if err != nil {
t.Fatalf("failed to create chunk2: %v", err)
}
// Create a file and reference only chunk2
file := &File{
Path: "/test.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
}
err = repos.Files.Create(ctx, nil, file)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
// Create file-chunk mapping only for chunk2
fc := &FileChunk{
FileID: file.ID,
Idx: 0,
ChunkHash: chunk2.ChunkHash,
}
err = repos.FileChunks.Create(ctx, nil, fc)
if err != nil {
t.Fatalf("failed to create file chunk: %v", err)
}
// Run orphaned cleanup
err = repos.Chunks.DeleteOrphaned(ctx)
if err != nil {
t.Fatalf("failed to delete orphaned chunks: %v", err)
}
// Check that orphaned chunk is gone
orphanedChunk, err := repos.Chunks.GetByHash(ctx, chunk1.ChunkHash)
if err != nil {
t.Fatalf("error getting chunk: %v", err)
}
if orphanedChunk != nil {
t.Error("orphaned chunk should have been deleted")
}
// Check that referenced chunk still exists
referencedChunk, err := repos.Chunks.GetByHash(ctx, chunk2.ChunkHash)
if err != nil {
t.Fatalf("error getting chunk: %v", err)
}
if referencedChunk == nil {
t.Error("referenced chunk should not have been deleted")
}
}
// TestOrphanedBlobCleanup tests the cleanup of orphaned blobs
func TestOrphanedBlobCleanup(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Create blobs
blob1 := &Blob{
ID: "orphaned-blob-id",
Hash: "orphaned-blob",
CreatedTS: time.Now().Truncate(time.Second),
}
blob2 := &Blob{
ID: "referenced-blob-id",
Hash: "referenced-blob",
CreatedTS: time.Now().Truncate(time.Second),
}
err := repos.Blobs.Create(ctx, nil, blob1)
if err != nil {
t.Fatalf("failed to create blob1: %v", err)
}
err = repos.Blobs.Create(ctx, nil, blob2)
if err != nil {
t.Fatalf("failed to create blob2: %v", err)
}
// Create a snapshot and reference only blob2
snapshot := &Snapshot{
ID: "test-snapshot",
Hostname: "test-host",
StartedAt: time.Now(),
}
err = repos.Snapshots.Create(ctx, nil, snapshot)
if err != nil {
t.Fatalf("failed to create snapshot: %v", err)
}
// Add blob2 to snapshot
err = repos.Snapshots.AddBlob(ctx, nil, snapshot.ID, blob2.ID, blob2.Hash)
if err != nil {
t.Fatalf("failed to add blob to snapshot: %v", err)
}
// Run orphaned cleanup
err = repos.Blobs.DeleteOrphaned(ctx)
if err != nil {
t.Fatalf("failed to delete orphaned blobs: %v", err)
}
// Check that orphaned blob is gone
orphanedBlob, err := repos.Blobs.GetByID(ctx, blob1.ID)
if err != nil {
t.Fatalf("error getting blob: %v", err)
}
if orphanedBlob != nil {
t.Error("orphaned blob should have been deleted")
}
// Check that referenced blob still exists
referencedBlob, err := repos.Blobs.GetByID(ctx, blob2.ID)
if err != nil {
t.Fatalf("error getting blob: %v", err)
}
if referencedBlob == nil {
t.Error("referenced blob should not have been deleted")
}
}
// TestFileChunkRepositoryWithUUIDs tests file-chunk relationships with UUIDs
func TestFileChunkRepositoryWithUUIDs(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Create a file
file := &File{
Path: "/test.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 3072,
Mode: 0644,
UID: 1000,
GID: 1000,
}
err := repos.Files.Create(ctx, nil, file)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
// Create chunks
chunks := []string{"chunk1", "chunk2", "chunk3"}
for i, chunkHash := range chunks {
chunk := &Chunk{
ChunkHash: chunkHash,
Size: 1024,
}
err = repos.Chunks.Create(ctx, nil, chunk)
if err != nil {
t.Fatalf("failed to create chunk: %v", err)
}
// Create file-chunk mapping
fc := &FileChunk{
FileID: file.ID,
Idx: i,
ChunkHash: chunkHash,
}
err = repos.FileChunks.Create(ctx, nil, fc)
if err != nil {
t.Fatalf("failed to create file chunk: %v", err)
}
}
// Test GetByFileID
fileChunks, err := repos.FileChunks.GetByFileID(ctx, file.ID)
if err != nil {
t.Fatalf("failed to get file chunks: %v", err)
}
if len(fileChunks) != 3 {
t.Errorf("expected 3 chunks, got %d", len(fileChunks))
}
// Test DeleteByFileID
err = repos.FileChunks.DeleteByFileID(ctx, nil, file.ID)
if err != nil {
t.Fatalf("failed to delete file chunks: %v", err)
}
fileChunks, err = repos.FileChunks.GetByFileID(ctx, file.ID)
if err != nil {
t.Fatalf("failed to get file chunks after delete: %v", err)
}
if len(fileChunks) != 0 {
t.Errorf("expected 0 chunks after delete, got %d", len(fileChunks))
}
}
// TestChunkFileRepositoryWithUUIDs tests chunk-file relationships with UUIDs
func TestChunkFileRepositoryWithUUIDs(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Create files
file1 := &File{
Path: "/file1.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
}
file2 := &File{
Path: "/file2.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
}
err := repos.Files.Create(ctx, nil, file1)
if err != nil {
t.Fatalf("failed to create file1: %v", err)
}
err = repos.Files.Create(ctx, nil, file2)
if err != nil {
t.Fatalf("failed to create file2: %v", err)
}
// Create a chunk that appears in both files (deduplication)
chunk := &Chunk{
ChunkHash: "shared-chunk",
Size: 1024,
}
err = repos.Chunks.Create(ctx, nil, chunk)
if err != nil {
t.Fatalf("failed to create chunk: %v", err)
}
// Create chunk-file mappings
cf1 := &ChunkFile{
ChunkHash: chunk.ChunkHash,
FileID: file1.ID,
FileOffset: 0,
Length: 1024,
}
cf2 := &ChunkFile{
ChunkHash: chunk.ChunkHash,
FileID: file2.ID,
FileOffset: 512,
Length: 1024,
}
err = repos.ChunkFiles.Create(ctx, nil, cf1)
if err != nil {
t.Fatalf("failed to create chunk file 1: %v", err)
}
err = repos.ChunkFiles.Create(ctx, nil, cf2)
if err != nil {
t.Fatalf("failed to create chunk file 2: %v", err)
}
// Test GetByChunkHash
chunkFiles, err := repos.ChunkFiles.GetByChunkHash(ctx, chunk.ChunkHash)
if err != nil {
t.Fatalf("failed to get chunk files: %v", err)
}
if len(chunkFiles) != 2 {
t.Errorf("expected 2 files for chunk, got %d", len(chunkFiles))
}
// Test GetByFileID
chunkFiles, err = repos.ChunkFiles.GetByFileID(ctx, file1.ID)
if err != nil {
t.Fatalf("failed to get chunks by file ID: %v", err)
}
if len(chunkFiles) != 1 {
t.Errorf("expected 1 chunk for file, got %d", len(chunkFiles))
}
}
// TestSnapshotRepositoryExtendedFields tests snapshot with version and git revision
func TestSnapshotRepositoryExtendedFields(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repo := NewSnapshotRepository(db)
// Create snapshot with extended fields
snapshot := &Snapshot{
ID: "test-20250722-120000Z",
Hostname: "test-host",
VaultikVersion: "0.0.1",
VaultikGitRevision: "abc123def456",
StartedAt: time.Now(),
CompletedAt: nil,
FileCount: 100,
ChunkCount: 200,
BlobCount: 50,
TotalSize: 1024 * 1024,
BlobSize: 512 * 1024,
BlobUncompressedSize: 1024 * 1024,
CompressionLevel: 6,
CompressionRatio: 2.0,
UploadDurationMs: 5000,
}
err := repo.Create(ctx, nil, snapshot)
if err != nil {
t.Fatalf("failed to create snapshot: %v", err)
}
// Retrieve and verify
retrieved, err := repo.GetByID(ctx, snapshot.ID)
if err != nil {
t.Fatalf("failed to get snapshot: %v", err)
}
if retrieved.VaultikVersion != snapshot.VaultikVersion {
t.Errorf("version mismatch: expected %s, got %s", snapshot.VaultikVersion, retrieved.VaultikVersion)
}
if retrieved.VaultikGitRevision != snapshot.VaultikGitRevision {
t.Errorf("git revision mismatch: expected %s, got %s", snapshot.VaultikGitRevision, retrieved.VaultikGitRevision)
}
if retrieved.CompressionLevel != snapshot.CompressionLevel {
t.Errorf("compression level mismatch: expected %d, got %d", snapshot.CompressionLevel, retrieved.CompressionLevel)
}
if retrieved.BlobUncompressedSize != snapshot.BlobUncompressedSize {
t.Errorf("uncompressed size mismatch: expected %d, got %d", snapshot.BlobUncompressedSize, retrieved.BlobUncompressedSize)
}
if retrieved.UploadDurationMs != snapshot.UploadDurationMs {
t.Errorf("upload duration mismatch: expected %d, got %d", snapshot.UploadDurationMs, retrieved.UploadDurationMs)
}
}
// TestComplexOrphanedDataScenario tests a complex scenario with multiple relationships
func TestComplexOrphanedDataScenario(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Create snapshots
snapshot1 := &Snapshot{
ID: "snapshot1",
Hostname: "host1",
StartedAt: time.Now(),
}
snapshot2 := &Snapshot{
ID: "snapshot2",
Hostname: "host1",
StartedAt: time.Now(),
}
err := repos.Snapshots.Create(ctx, nil, snapshot1)
if err != nil {
t.Fatalf("failed to create snapshot1: %v", err)
}
err = repos.Snapshots.Create(ctx, nil, snapshot2)
if err != nil {
t.Fatalf("failed to create snapshot2: %v", err)
}
// Create files
files := make([]*File, 3)
for i := range files {
files[i] = &File{
Path: fmt.Sprintf("/file%d.txt", i),
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
}
err = repos.Files.Create(ctx, nil, files[i])
if err != nil {
t.Fatalf("failed to create file%d: %v", i, err)
}
}
// Add files to snapshots
// Snapshot1: file0, file1
// Snapshot2: file1, file2
// file0: only in snapshot1
// file1: in both snapshots
// file2: only in snapshot2
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot1.ID, files[0].ID)
if err != nil {
t.Fatal(err)
}
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot1.ID, files[1].ID)
if err != nil {
t.Fatal(err)
}
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot2.ID, files[1].ID)
if err != nil {
t.Fatal(err)
}
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot2.ID, files[2].ID)
if err != nil {
t.Fatal(err)
}
// Delete snapshot1
err = repos.Snapshots.DeleteSnapshotFiles(ctx, snapshot1.ID)
if err != nil {
t.Fatal(err)
}
err = repos.Snapshots.Delete(ctx, snapshot1.ID)
if err != nil {
t.Fatal(err)
}
// Run orphaned cleanup
err = repos.Files.DeleteOrphaned(ctx)
if err != nil {
t.Fatal(err)
}
// Check results
// file0 should be deleted (only in deleted snapshot)
file0, err := repos.Files.GetByID(ctx, files[0].ID)
if err != nil {
t.Fatalf("error getting file0: %v", err)
}
if file0 != nil {
t.Error("file0 should have been deleted")
}
// file1 should exist (still in snapshot2)
file1, err := repos.Files.GetByID(ctx, files[1].ID)
if err != nil {
t.Fatalf("error getting file1: %v", err)
}
if file1 == nil {
t.Error("file1 should still exist")
}
// file2 should exist (still in snapshot2)
file2, err := repos.Files.GetByID(ctx, files[2].ID)
if err != nil {
t.Fatalf("error getting file2: %v", err)
}
if file2 == nil {
t.Error("file2 should still exist")
}
}
// TestCascadeDelete tests that cascade deletes work properly
func TestCascadeDelete(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Create a file
file := &File{
Path: "/cascade-test.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
}
err := repos.Files.Create(ctx, nil, file)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
// Create chunks and file-chunk mappings
for i := 0; i < 3; i++ {
chunk := &Chunk{
ChunkHash: fmt.Sprintf("cascade-chunk-%d", i),
Size: 1024,
}
err = repos.Chunks.Create(ctx, nil, chunk)
if err != nil {
t.Fatalf("failed to create chunk: %v", err)
}
fc := &FileChunk{
FileID: file.ID,
Idx: i,
ChunkHash: chunk.ChunkHash,
}
err = repos.FileChunks.Create(ctx, nil, fc)
if err != nil {
t.Fatalf("failed to create file chunk: %v", err)
}
}
// Verify file chunks exist
fileChunks, err := repos.FileChunks.GetByFileID(ctx, file.ID)
if err != nil {
t.Fatal(err)
}
if len(fileChunks) != 3 {
t.Errorf("expected 3 file chunks, got %d", len(fileChunks))
}
// Delete the file
err = repos.Files.DeleteByID(ctx, nil, file.ID)
if err != nil {
t.Fatalf("failed to delete file: %v", err)
}
// Verify file chunks were cascade deleted
fileChunks, err = repos.FileChunks.GetByFileID(ctx, file.ID)
if err != nil {
t.Fatal(err)
}
if len(fileChunks) != 0 {
t.Errorf("expected 0 file chunks after cascade delete, got %d", len(fileChunks))
}
}
// TestTransactionIsolation tests that transactions properly isolate changes
func TestTransactionIsolation(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Start a transaction
err := repos.WithTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
// Create a file within the transaction
file := &File{
Path: "/tx-test.txt",
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
}
err := repos.Files.Create(ctx, tx, file)
if err != nil {
return err
}
// Within the same transaction, we should be able to query it
// Note: This would require modifying GetByPath to accept a tx parameter
// For now, we'll just test that rollback works
// Return an error to trigger rollback
return fmt.Errorf("intentional rollback")
})
if err == nil {
t.Fatal("expected error from transaction")
}
// Verify the file was not created (transaction rolled back)
files, err := repos.Files.ListByPrefix(ctx, "/tx-test")
if err != nil {
t.Fatal(err)
}
if len(files) != 0 {
t.Error("file should not exist after rollback")
}
}
// TestConcurrentOrphanedCleanup tests that concurrent cleanup operations don't interfere
func TestConcurrentOrphanedCleanup(t *testing.T) {
db, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
repos := NewRepositories(db)
// Set a 5-second busy timeout to handle concurrent operations
if _, err := db.conn.Exec("PRAGMA busy_timeout = 5000"); err != nil {
t.Fatalf("failed to set busy timeout: %v", err)
}
// Create a snapshot
snapshot := &Snapshot{
ID: "concurrent-test",
Hostname: "test-host",
StartedAt: time.Now(),
}
err := repos.Snapshots.Create(ctx, nil, snapshot)
if err != nil {
t.Fatal(err)
}
// Create many files, some orphaned
for i := 0; i < 20; i++ {
file := &File{
Path: fmt.Sprintf("/concurrent-%d.txt", i),
MTime: time.Now().Truncate(time.Second),
CTime: time.Now().Truncate(time.Second),
Size: 1024,
Mode: 0644,
UID: 1000,
GID: 1000,
}
err = repos.Files.Create(ctx, nil, file)
if err != nil {
t.Fatal(err)
}
// Add even-numbered files to snapshot
if i%2 == 0 {
err = repos.Snapshots.AddFileByID(ctx, nil, snapshot.ID, file.ID)
if err != nil {
t.Fatal(err)
}
}
}
// Run multiple cleanup operations concurrently
// Note: SQLite has limited support for concurrent writes, so we expect some to fail
done := make(chan error, 3)
for i := 0; i < 3; i++ {
go func() {
done <- repos.Files.DeleteOrphaned(ctx)
}()
}
// Wait for all to complete
for i := 0; i < 3; i++ {
err := <-done
if err != nil {
t.Errorf("cleanup %d failed: %v", i, err)
}
}
// Verify correct files were deleted
files, err := repos.Files.ListByPrefix(ctx, "/concurrent-")
if err != nil {
t.Fatal(err)
}
// Should have 10 files remaining (even numbered)
if len(files) != 10 {
t.Errorf("expected 10 files remaining, got %d", len(files))
}
// Verify all remaining files are even-numbered
for _, file := range files {
var num int
_, err := fmt.Sscanf(file.Path, "/concurrent-%d.txt", &num)
if err != nil {
t.Logf("failed to parse file number from %s: %v", file.Path, err)
}
if num%2 != 0 {
t.Errorf("odd-numbered file %s should have been deleted", file.Path)
}
}
}