Compare commits
	
		
			4 Commits
		
	
	
		
			bcbc186286
			...
			9c072166fa
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 9c072166fa | |||
| 8529ae9735 | |||
| b2e85d9e76 | |||
| 9de439a0a4 | 
							
								
								
									
										162
									
								
								DESIGN.md
									
									
									
									
									
								
							
							
						
						
									
										162
									
								
								DESIGN.md
									
									
									
									
									
								
							@ -58,12 +58,14 @@ Surprisingly, no existing tool meets these requirements, so I wrote `vaultik`.
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
## S3 Bucket Layout
 | 
					## S3 Bucket Layout
 | 
				
			||||||
 | 
					
 | 
				
			||||||
S3 stores only three things:
 | 
					S3 stores only four things:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
1) Blobs: encrypted, compressed packs of file chunks.
 | 
					1) Blobs: encrypted, compressed packs of file chunks.
 | 
				
			||||||
2) Metadata: encrypted SQLite databases containing the current state of the
 | 
					2) Metadata: encrypted SQLite databases containing the current state of the
 | 
				
			||||||
   filesystem at the time of the snapshot.
 | 
					   filesystem at the time of the snapshot.
 | 
				
			||||||
3) Metadata hashes: encrypted hashes of the metadata SQLite databases.
 | 
					3) Metadata hashes: encrypted hashes of the metadata SQLite databases.
 | 
				
			||||||
 | 
					4) Blob manifests: unencrypted compressed JSON files listing all blob hashes
 | 
				
			||||||
 | 
					   referenced in the snapshot, enabling pruning without decryption.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
s3://<bucket>/<prefix>/
 | 
					s3://<bucket>/<prefix>/
 | 
				
			||||||
@ -73,6 +75,7 @@ s3://<bucket>/<prefix>/
 | 
				
			|||||||
│   ├── <snapshot_id>.sqlite.age
 | 
					│   ├── <snapshot_id>.sqlite.age
 | 
				
			||||||
│   ├── <snapshot_id>.sqlite.00.age
 | 
					│   ├── <snapshot_id>.sqlite.00.age
 | 
				
			||||||
│   ├── <snapshot_id>.sqlite.01.age
 | 
					│   ├── <snapshot_id>.sqlite.01.age
 | 
				
			||||||
 | 
					│   ├── <snapshot_id>.manifest.json.zst
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
To retrieve a given file, you would:
 | 
					To retrieve a given file, you would:
 | 
				
			||||||
@ -99,6 +102,23 @@ memory (<10GB).
 | 
				
			|||||||
* `<snapshot_id>`: UTC timestamp in iso860 format, e.g. `2023-10-01T12:00:00Z`.  These are lexicographically sortable.
 | 
					* `<snapshot_id>`: UTC timestamp in iso860 format, e.g. `2023-10-01T12:00:00Z`.  These are lexicographically sortable.
 | 
				
			||||||
* `blobs/<aa>/<bb>/...`: where `aa` and `bb` are the first 2 hex bytes of the blob hash.
 | 
					* `blobs/<aa>/<bb>/...`: where `aa` and `bb` are the first 2 hex bytes of the blob hash.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					### Blob Manifest Format
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					The `<snapshot_id>.manifest.json.zst` file is an unencrypted, compressed JSON file containing:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```json
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					  "snapshot_id": "2023-10-01T12:00:00Z",
 | 
				
			||||||
 | 
					  "blob_hashes": [
 | 
				
			||||||
 | 
					    "aa1234567890abcdef...",
 | 
				
			||||||
 | 
					    "bb2345678901bcdef0...",
 | 
				
			||||||
 | 
					    ...
 | 
				
			||||||
 | 
					  ]
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					This allows pruning operations to determine which blobs are referenced without requiring decryption keys.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
---
 | 
					---
 | 
				
			||||||
 | 
					
 | 
				
			||||||
## 3. Local SQLite Index Schema (source host)
 | 
					## 3. Local SQLite Index Schema (source host)
 | 
				
			||||||
@ -110,6 +130,8 @@ CREATE TABLE files (
 | 
				
			|||||||
  size INTEGER NOT NULL
 | 
					  size INTEGER NOT NULL
 | 
				
			||||||
);
 | 
					);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					-- Maps files to their constituent chunks in sequence order
 | 
				
			||||||
 | 
					-- Used for reconstructing files from chunks during restore
 | 
				
			||||||
CREATE TABLE file_chunks (
 | 
					CREATE TABLE file_chunks (
 | 
				
			||||||
  path TEXT NOT NULL,
 | 
					  path TEXT NOT NULL,
 | 
				
			||||||
  idx INTEGER NOT NULL,
 | 
					  idx INTEGER NOT NULL,
 | 
				
			||||||
@ -137,6 +159,8 @@ CREATE TABLE blob_chunks (
 | 
				
			|||||||
  PRIMARY KEY (blob_hash, chunk_hash)
 | 
					  PRIMARY KEY (blob_hash, chunk_hash)
 | 
				
			||||||
);
 | 
					);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					-- Reverse mapping: tracks which files contain a given chunk
 | 
				
			||||||
 | 
					-- Used for deduplication and tracking chunk usage across files
 | 
				
			||||||
CREATE TABLE chunk_files (
 | 
					CREATE TABLE chunk_files (
 | 
				
			||||||
  chunk_hash TEXT NOT NULL,
 | 
					  chunk_hash TEXT NOT NULL,
 | 
				
			||||||
  file_path TEXT NOT NULL,
 | 
					  file_path TEXT NOT NULL,
 | 
				
			||||||
@ -219,18 +243,20 @@ metadata/<snapshot_id>.sqlite.01.age
 | 
				
			|||||||
9. Compress, encrypt, split, and upload to S3
 | 
					9. Compress, encrypt, split, and upload to S3
 | 
				
			||||||
10. Encrypt the hash of the snapshot database to the backup age key
 | 
					10. Encrypt the hash of the snapshot database to the backup age key
 | 
				
			||||||
11. Upload the encrypted hash to S3 as `metadata/<snapshot_id>.hash.age`
 | 
					11. Upload the encrypted hash to S3 as `metadata/<snapshot_id>.hash.age`
 | 
				
			||||||
12. Optionally prune remote blobs that are no longer referenced in the
 | 
					12. Create blob manifest JSON listing all blob hashes referenced in snapshot
 | 
				
			||||||
 | 
					13. Compress manifest with zstd and upload as `metadata/<snapshot_id>.manifest.json.zst`
 | 
				
			||||||
 | 
					14. Optionally prune remote blobs that are no longer referenced in the
 | 
				
			||||||
   snapshot, based on local state db
 | 
					   snapshot, based on local state db
 | 
				
			||||||
 | 
					
 | 
				
			||||||
### 5.2 Manual Prune
 | 
					### 5.2 Manual Prune
 | 
				
			||||||
 | 
					
 | 
				
			||||||
1. List all objects under `metadata/`
 | 
					1. List all objects under `metadata/`
 | 
				
			||||||
2. Determine the latest valid `snapshot_id` by timestamp
 | 
					2. Determine the latest valid `snapshot_id` by timestamp
 | 
				
			||||||
3. Download, decrypt, and reconstruct the latest snapshot SQLite database
 | 
					3. Download and decompress the latest `<snapshot_id>.manifest.json.zst`
 | 
				
			||||||
4. Extract set of referenced blob hashes
 | 
					4. Extract set of referenced blob hashes from manifest (no decryption needed)
 | 
				
			||||||
5. List all blob objects under `blobs/`
 | 
					5. List all blob objects under `blobs/`
 | 
				
			||||||
6. For each blob:
 | 
					6. For each blob:
 | 
				
			||||||
   * If the hash is not in the latest snapshot:
 | 
					   * If the hash is not in the manifest:
 | 
				
			||||||
     * Issue `DeleteObject` to remove it
 | 
					     * Issue `DeleteObject` to remove it
 | 
				
			||||||
 | 
					
 | 
				
			||||||
### 5.3 Verify
 | 
					### 5.3 Verify
 | 
				
			||||||
@ -257,11 +283,14 @@ Verify runs on a host that has no state, but access to the bucket.
 | 
				
			|||||||
## 6. CLI Commands
 | 
					## 6. CLI Commands
 | 
				
			||||||
 | 
					
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
vaultik backup [--config <path>] [--cron] [--daemon]
 | 
					vaultik backup [--config <path>] [--cron] [--daemon] [--prune]
 | 
				
			||||||
vaultik restore --bucket <bucket> --prefix <prefix> --snapshot <id> --target <dir>
 | 
					vaultik restore --bucket <bucket> --prefix <prefix> --snapshot <id> --target <dir>
 | 
				
			||||||
vaultik prune --bucket <bucket> --prefix <prefix> [--dry-run]
 | 
					vaultik prune --bucket <bucket> --prefix <prefix> [--dry-run]
 | 
				
			||||||
vaultik verify --bucket <bucket> --prefix <prefix> [--snapshot <id>] [--quick]
 | 
					vaultik verify --bucket <bucket> --prefix <prefix> [--snapshot <id>] [--quick]
 | 
				
			||||||
vaultik fetch --bucket <bucket> --prefix <prefix> --snapshot <id> --file <path> --target <path>
 | 
					vaultik fetch --bucket <bucket> --prefix <prefix> --snapshot <id> --file <path> --target <path>
 | 
				
			||||||
 | 
					vaultik snapshot list --bucket <bucket> --prefix <prefix> [--limit <n>]
 | 
				
			||||||
 | 
					vaultik snapshot rm --bucket <bucket> --prefix <prefix> --snapshot <id>
 | 
				
			||||||
 | 
					vaultik snapshot latest --bucket <bucket> --prefix <prefix>
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
* `VAULTIK_PRIVATE_KEY` is required for `restore`, `prune`, `verify`, and
 | 
					* `VAULTIK_PRIVATE_KEY` is required for `restore`, `prune`, `verify`, and
 | 
				
			||||||
@ -354,124 +383,3 @@ func EncryptAndUploadMetadata(path string, cfg *Config, snapshotID string) error
 | 
				
			|||||||
func RunPrune(bucket, prefix, privateKey string) error
 | 
					func RunPrune(bucket, prefix, privateKey string) error
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
---
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
## Implementation TODO
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### Core Infrastructure
 | 
					 | 
				
			||||||
1. Set up Go module and project structure
 | 
					 | 
				
			||||||
1. Create Makefile with test, fmt, and lint targets
 | 
					 | 
				
			||||||
1. Set up cobra CLI skeleton with all commands
 | 
					 | 
				
			||||||
1. Implement config loading and validation from YAML
 | 
					 | 
				
			||||||
1. Create data structures for FileInfo, ChunkInfo, BlobInfo, etc.
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### Local Index Database
 | 
					 | 
				
			||||||
1. Implement SQLite schema creation and migrations
 | 
					 | 
				
			||||||
1. Create Index type with all database operations
 | 
					 | 
				
			||||||
1. Add transaction support and proper locking
 | 
					 | 
				
			||||||
1. Implement file tracking (save, lookup, delete)
 | 
					 | 
				
			||||||
1. Implement chunk tracking and deduplication
 | 
					 | 
				
			||||||
1. Implement blob tracking and chunk-to-blob mapping
 | 
					 | 
				
			||||||
1. Write tests for all index operations
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### Chunking and Hashing
 | 
					 | 
				
			||||||
1. Implement Rabin fingerprint chunker
 | 
					 | 
				
			||||||
1. Create streaming chunk processor
 | 
					 | 
				
			||||||
1. Implement SHA256 hashing for chunks
 | 
					 | 
				
			||||||
1. Add configurable chunk size parameters
 | 
					 | 
				
			||||||
1. Write tests for chunking consistency
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### Compression and Encryption
 | 
					 | 
				
			||||||
1. Implement zstd compression wrapper
 | 
					 | 
				
			||||||
1. Integrate age encryption library
 | 
					 | 
				
			||||||
1. Create Encryptor type for public key encryption
 | 
					 | 
				
			||||||
1. Create Decryptor type for private key decryption
 | 
					 | 
				
			||||||
1. Implement streaming encrypt/decrypt pipelines
 | 
					 | 
				
			||||||
1. Write tests for compression and encryption
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### Blob Packing
 | 
					 | 
				
			||||||
1. Implement BlobWriter with size limits
 | 
					 | 
				
			||||||
1. Add chunk accumulation and flushing
 | 
					 | 
				
			||||||
1. Create blob hash calculation
 | 
					 | 
				
			||||||
1. Implement proper error handling and rollback
 | 
					 | 
				
			||||||
1. Write tests for blob packing scenarios
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### S3 Operations
 | 
					 | 
				
			||||||
1. Integrate MinIO client library
 | 
					 | 
				
			||||||
1. Implement S3Client wrapper type
 | 
					 | 
				
			||||||
1. Add multipart upload support for large blobs
 | 
					 | 
				
			||||||
1. Implement retry logic with exponential backoff
 | 
					 | 
				
			||||||
1. Add connection pooling and timeout handling
 | 
					 | 
				
			||||||
1. Write tests using MinIO container
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### Backup Command - Basic
 | 
					 | 
				
			||||||
1. Implement directory walking with exclusion patterns
 | 
					 | 
				
			||||||
1. Add file change detection using index
 | 
					 | 
				
			||||||
1. Integrate chunking pipeline for changed files
 | 
					 | 
				
			||||||
1. Implement blob upload coordination
 | 
					 | 
				
			||||||
1. Add progress reporting to stderr
 | 
					 | 
				
			||||||
1. Write integration tests for backup
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### Snapshot Metadata
 | 
					 | 
				
			||||||
1. Implement snapshot metadata extraction from index
 | 
					 | 
				
			||||||
1. Create SQLite snapshot database builder
 | 
					 | 
				
			||||||
1. Add metadata compression and encryption
 | 
					 | 
				
			||||||
1. Implement metadata chunking for large snapshots
 | 
					 | 
				
			||||||
1. Add hash calculation and verification
 | 
					 | 
				
			||||||
1. Implement metadata upload to S3
 | 
					 | 
				
			||||||
1. Write tests for metadata operations
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### Restore Command
 | 
					 | 
				
			||||||
1. Implement snapshot listing and selection
 | 
					 | 
				
			||||||
1. Add metadata download and reconstruction
 | 
					 | 
				
			||||||
1. Implement hash verification for metadata
 | 
					 | 
				
			||||||
1. Create file restoration logic with chunk retrieval
 | 
					 | 
				
			||||||
1. Add blob caching for efficiency
 | 
					 | 
				
			||||||
1. Implement proper file permissions and mtime restoration
 | 
					 | 
				
			||||||
1. Write integration tests for restore
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### Prune Command
 | 
					 | 
				
			||||||
1. Implement latest snapshot detection
 | 
					 | 
				
			||||||
1. Add referenced blob extraction from metadata
 | 
					 | 
				
			||||||
1. Create S3 blob listing and comparison
 | 
					 | 
				
			||||||
1. Implement safe deletion of unreferenced blobs
 | 
					 | 
				
			||||||
1. Add dry-run mode for safety
 | 
					 | 
				
			||||||
1. Write tests for prune scenarios
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### Verify Command
 | 
					 | 
				
			||||||
1. Implement metadata integrity checking
 | 
					 | 
				
			||||||
1. Add blob existence verification
 | 
					 | 
				
			||||||
1. Implement quick mode (S3 hash checking)
 | 
					 | 
				
			||||||
1. Implement deep mode (download and verify chunks)
 | 
					 | 
				
			||||||
1. Add detailed error reporting
 | 
					 | 
				
			||||||
1. Write tests for verification
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### Fetch Command
 | 
					 | 
				
			||||||
1. Implement single-file metadata query
 | 
					 | 
				
			||||||
1. Add minimal blob downloading for file
 | 
					 | 
				
			||||||
1. Create streaming file reconstruction
 | 
					 | 
				
			||||||
1. Add support for output redirection
 | 
					 | 
				
			||||||
1. Write tests for fetch command
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### Daemon Mode
 | 
					 | 
				
			||||||
1. Implement inotify watcher for Linux
 | 
					 | 
				
			||||||
1. Add dirty path tracking in index
 | 
					 | 
				
			||||||
1. Create periodic full scan scheduler
 | 
					 | 
				
			||||||
1. Implement backup interval enforcement
 | 
					 | 
				
			||||||
1. Add proper signal handling and shutdown
 | 
					 | 
				
			||||||
1. Write tests for daemon behavior
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### Cron Mode
 | 
					 | 
				
			||||||
1. Implement silent operation mode
 | 
					 | 
				
			||||||
1. Add proper exit codes for cron
 | 
					 | 
				
			||||||
1. Implement lock file to prevent concurrent runs
 | 
					 | 
				
			||||||
1. Add error summary reporting
 | 
					 | 
				
			||||||
1. Write tests for cron mode
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
### Finalization
 | 
					 | 
				
			||||||
1. Add comprehensive logging throughout
 | 
					 | 
				
			||||||
1. Implement proper error wrapping and context
 | 
					 | 
				
			||||||
1. Add performance metrics collection
 | 
					 | 
				
			||||||
1. Create end-to-end integration tests
 | 
					 | 
				
			||||||
1. Write documentation and examples
 | 
					 | 
				
			||||||
1. Set up CI/CD pipeline
 | 
					 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										9
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										9
									
								
								Makefile
									
									
									
									
									
								
							@ -12,9 +12,16 @@ LDFLAGS := -X 'git.eeqj.de/sneak/vaultik/internal/globals.Version=$(VERSION)' \
 | 
				
			|||||||
all: test
 | 
					all: test
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# Run tests
 | 
					# Run tests
 | 
				
			||||||
test: lint
 | 
					test: lint fmt-check
 | 
				
			||||||
	go test -v ./...
 | 
						go test -v ./...
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Check if code is formatted
 | 
				
			||||||
 | 
					fmt-check:
 | 
				
			||||||
 | 
						@if [ -n "$$(go fmt ./...)" ]; then \
 | 
				
			||||||
 | 
							echo "Error: Code is not formatted. Run 'make fmt' to fix."; \
 | 
				
			||||||
 | 
							exit 1; \
 | 
				
			||||||
 | 
						fi
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# Format code
 | 
					# Format code
 | 
				
			||||||
fmt:
 | 
					fmt:
 | 
				
			||||||
	go fmt ./...
 | 
						go fmt ./...
 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										112
									
								
								TODO.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										112
									
								
								TODO.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,112 @@
 | 
				
			|||||||
 | 
					# Implementation TODO
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Local Index Database
 | 
				
			||||||
 | 
					1. Implement SQLite schema creation
 | 
				
			||||||
 | 
					1. Create Index type with all database operations
 | 
				
			||||||
 | 
					1. Add transaction support and proper locking
 | 
				
			||||||
 | 
					1. Implement file tracking (save, lookup, delete)
 | 
				
			||||||
 | 
					1. Implement chunk tracking and deduplication
 | 
				
			||||||
 | 
					1. Implement blob tracking and chunk-to-blob mapping
 | 
				
			||||||
 | 
					1. Write tests for all index operations
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Chunking and Hashing
 | 
				
			||||||
 | 
					1. Implement Rabin fingerprint chunker
 | 
				
			||||||
 | 
					1. Create streaming chunk processor
 | 
				
			||||||
 | 
					1. Implement SHA256 hashing for chunks
 | 
				
			||||||
 | 
					1. Add configurable chunk size parameters
 | 
				
			||||||
 | 
					1. Write tests for chunking consistency
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Compression and Encryption
 | 
				
			||||||
 | 
					1. Implement zstd compression wrapper
 | 
				
			||||||
 | 
					1. Integrate age encryption library
 | 
				
			||||||
 | 
					1. Create Encryptor type for public key encryption
 | 
				
			||||||
 | 
					1. Create Decryptor type for private key decryption
 | 
				
			||||||
 | 
					1. Implement streaming encrypt/decrypt pipelines
 | 
				
			||||||
 | 
					1. Write tests for compression and encryption
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Blob Packing
 | 
				
			||||||
 | 
					1. Implement BlobWriter with size limits
 | 
				
			||||||
 | 
					1. Add chunk accumulation and flushing
 | 
				
			||||||
 | 
					1. Create blob hash calculation
 | 
				
			||||||
 | 
					1. Implement proper error handling and rollback
 | 
				
			||||||
 | 
					1. Write tests for blob packing scenarios
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## S3 Operations
 | 
				
			||||||
 | 
					1. Integrate MinIO client library
 | 
				
			||||||
 | 
					1. Implement S3Client wrapper type
 | 
				
			||||||
 | 
					1. Add multipart upload support for large blobs
 | 
				
			||||||
 | 
					1. Implement retry logic with exponential backoff
 | 
				
			||||||
 | 
					1. Add connection pooling and timeout handling
 | 
				
			||||||
 | 
					1. Write tests using MinIO container
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Backup Command - Basic
 | 
				
			||||||
 | 
					1. Implement directory walking with exclusion patterns
 | 
				
			||||||
 | 
					1. Add file change detection using index
 | 
				
			||||||
 | 
					1. Integrate chunking pipeline for changed files
 | 
				
			||||||
 | 
					1. Implement blob upload coordination
 | 
				
			||||||
 | 
					1. Add progress reporting to stderr
 | 
				
			||||||
 | 
					1. Write integration tests for backup
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Snapshot Metadata
 | 
				
			||||||
 | 
					1. Implement snapshot metadata extraction from index
 | 
				
			||||||
 | 
					1. Create SQLite snapshot database builder
 | 
				
			||||||
 | 
					1. Add metadata compression and encryption
 | 
				
			||||||
 | 
					1. Implement metadata chunking for large snapshots
 | 
				
			||||||
 | 
					1. Add hash calculation and verification
 | 
				
			||||||
 | 
					1. Implement metadata upload to S3
 | 
				
			||||||
 | 
					1. Write tests for metadata operations
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Restore Command
 | 
				
			||||||
 | 
					1. Implement snapshot listing and selection
 | 
				
			||||||
 | 
					1. Add metadata download and reconstruction
 | 
				
			||||||
 | 
					1. Implement hash verification for metadata
 | 
				
			||||||
 | 
					1. Create file restoration logic with chunk retrieval
 | 
				
			||||||
 | 
					1. Add blob caching for efficiency
 | 
				
			||||||
 | 
					1. Implement proper file permissions and mtime restoration
 | 
				
			||||||
 | 
					1. Write integration tests for restore
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Prune Command
 | 
				
			||||||
 | 
					1. Implement latest snapshot detection
 | 
				
			||||||
 | 
					1. Add referenced blob extraction from metadata
 | 
				
			||||||
 | 
					1. Create S3 blob listing and comparison
 | 
				
			||||||
 | 
					1. Implement safe deletion of unreferenced blobs
 | 
				
			||||||
 | 
					1. Add dry-run mode for safety
 | 
				
			||||||
 | 
					1. Write tests for prune scenarios
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Verify Command
 | 
				
			||||||
 | 
					1. Implement metadata integrity checking
 | 
				
			||||||
 | 
					1. Add blob existence verification
 | 
				
			||||||
 | 
					1. Implement quick mode (S3 hash checking)
 | 
				
			||||||
 | 
					1. Implement deep mode (download and verify chunks)
 | 
				
			||||||
 | 
					1. Add detailed error reporting
 | 
				
			||||||
 | 
					1. Write tests for verification
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Fetch Command
 | 
				
			||||||
 | 
					1. Implement single-file metadata query
 | 
				
			||||||
 | 
					1. Add minimal blob downloading for file
 | 
				
			||||||
 | 
					1. Create streaming file reconstruction
 | 
				
			||||||
 | 
					1. Add support for output redirection
 | 
				
			||||||
 | 
					1. Write tests for fetch command
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Daemon Mode
 | 
				
			||||||
 | 
					1. Implement inotify watcher for Linux
 | 
				
			||||||
 | 
					1. Add dirty path tracking in index
 | 
				
			||||||
 | 
					1. Create periodic full scan scheduler
 | 
				
			||||||
 | 
					1. Implement backup interval enforcement
 | 
				
			||||||
 | 
					1. Add proper signal handling and shutdown
 | 
				
			||||||
 | 
					1. Write tests for daemon behavior
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Cron Mode
 | 
				
			||||||
 | 
					1. Implement silent operation mode
 | 
				
			||||||
 | 
					1. Add proper exit codes for cron
 | 
				
			||||||
 | 
					1. Implement lock file to prevent concurrent runs
 | 
				
			||||||
 | 
					1. Add error summary reporting
 | 
				
			||||||
 | 
					1. Write tests for cron mode
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Finalization
 | 
				
			||||||
 | 
					1. Add comprehensive logging throughout
 | 
				
			||||||
 | 
					1. Implement proper error wrapping and context
 | 
				
			||||||
 | 
					1. Add performance metrics collection
 | 
				
			||||||
 | 
					1. Create end-to-end integration tests
 | 
				
			||||||
 | 
					1. Write documentation and examples
 | 
				
			||||||
 | 
					1. Set up CI/CD pipeline
 | 
				
			||||||
							
								
								
									
										21
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										21
									
								
								go.mod
									
									
									
									
									
								
							@ -3,13 +3,26 @@ module git.eeqj.de/sneak/vaultik
 | 
				
			|||||||
go 1.24.4
 | 
					go 1.24.4
 | 
				
			||||||
 | 
					
 | 
				
			||||||
require (
 | 
					require (
 | 
				
			||||||
 | 
						github.com/spf13/cobra v1.9.1
 | 
				
			||||||
 | 
						go.uber.org/fx v1.24.0
 | 
				
			||||||
 | 
						gopkg.in/yaml.v3 v3.0.1
 | 
				
			||||||
 | 
						modernc.org/sqlite v1.38.0
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					require (
 | 
				
			||||||
 | 
						github.com/dustin/go-humanize v1.0.1 // indirect
 | 
				
			||||||
 | 
						github.com/google/uuid v1.6.0 // indirect
 | 
				
			||||||
	github.com/inconshreveable/mousetrap v1.1.0 // indirect
 | 
						github.com/inconshreveable/mousetrap v1.1.0 // indirect
 | 
				
			||||||
	github.com/spf13/cobra v1.9.1 // indirect
 | 
						github.com/mattn/go-isatty v0.0.20 // indirect
 | 
				
			||||||
 | 
						github.com/ncruces/go-strftime v0.1.9 // indirect
 | 
				
			||||||
 | 
						github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
 | 
				
			||||||
	github.com/spf13/pflag v1.0.6 // indirect
 | 
						github.com/spf13/pflag v1.0.6 // indirect
 | 
				
			||||||
	go.uber.org/dig v1.19.0 // indirect
 | 
						go.uber.org/dig v1.19.0 // indirect
 | 
				
			||||||
	go.uber.org/fx v1.24.0 // indirect
 | 
					 | 
				
			||||||
	go.uber.org/multierr v1.10.0 // indirect
 | 
						go.uber.org/multierr v1.10.0 // indirect
 | 
				
			||||||
	go.uber.org/zap v1.26.0 // indirect
 | 
						go.uber.org/zap v1.26.0 // indirect
 | 
				
			||||||
	golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
 | 
						golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
 | 
				
			||||||
	gopkg.in/yaml.v3 v3.0.1 // indirect
 | 
						golang.org/x/sys v0.33.0 // indirect
 | 
				
			||||||
 | 
						modernc.org/libc v1.65.10 // indirect
 | 
				
			||||||
 | 
						modernc.org/mathutil v1.7.1 // indirect
 | 
				
			||||||
 | 
						modernc.org/memory v1.11.0 // indirect
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										58
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										58
									
								
								go.sum
									
									
									
									
									
								
							@ -1,21 +1,75 @@
 | 
				
			|||||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
 | 
					github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
 | 
				
			||||||
 | 
					github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 | 
				
			||||||
 | 
					github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 | 
				
			||||||
 | 
					github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
 | 
				
			||||||
 | 
					github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
 | 
				
			||||||
 | 
					github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
 | 
				
			||||||
 | 
					github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
 | 
				
			||||||
 | 
					github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
 | 
				
			||||||
 | 
					github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 | 
				
			||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
 | 
					github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
 | 
				
			||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
 | 
					github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
 | 
				
			||||||
 | 
					github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
 | 
				
			||||||
 | 
					github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
 | 
				
			||||||
 | 
					github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
 | 
				
			||||||
 | 
					github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
 | 
				
			||||||
 | 
					github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 | 
				
			||||||
 | 
					github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 | 
				
			||||||
 | 
					github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
 | 
				
			||||||
 | 
					github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
 | 
				
			||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 | 
					github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 | 
				
			||||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
 | 
					github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
 | 
				
			||||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
 | 
					github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
 | 
				
			||||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
 | 
					github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
 | 
				
			||||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
 | 
					github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
 | 
				
			||||||
 | 
					github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
 | 
				
			||||||
 | 
					github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
 | 
				
			||||||
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
 | 
					go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
 | 
				
			||||||
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
 | 
					go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
 | 
				
			||||||
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
 | 
					go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
 | 
				
			||||||
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
 | 
					go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
 | 
				
			||||||
 | 
					go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
 | 
				
			||||||
 | 
					go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
 | 
				
			||||||
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
 | 
					go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
 | 
				
			||||||
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
 | 
					go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
 | 
				
			||||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
 | 
					go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
 | 
				
			||||||
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
 | 
					go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
 | 
				
			||||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
 | 
					golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
 | 
				
			||||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
					golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
 | 
				
			||||||
 | 
					golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
 | 
				
			||||||
 | 
					golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
 | 
				
			||||||
 | 
					golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
 | 
				
			||||||
 | 
					golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
 | 
				
			||||||
 | 
					golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
				
			||||||
 | 
					golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
 | 
				
			||||||
 | 
					golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
 | 
				
			||||||
 | 
					golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
 | 
				
			||||||
 | 
					golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
 | 
				
			||||||
 | 
					gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 | 
				
			||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 | 
					gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 | 
				
			||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 | 
					gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 | 
				
			||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 | 
					gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 | 
				
			||||||
 | 
					modernc.org/cc/v4 v4.26.1 h1:+X5NtzVBn0KgsBCBe+xkDC7twLb/jNVj9FPgiwSQO3s=
 | 
				
			||||||
 | 
					modernc.org/cc/v4 v4.26.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
 | 
				
			||||||
 | 
					modernc.org/ccgo/v4 v4.28.0 h1:rjznn6WWehKq7dG4JtLRKxb52Ecv8OUGah8+Z/SfpNU=
 | 
				
			||||||
 | 
					modernc.org/ccgo/v4 v4.28.0/go.mod h1:JygV3+9AV6SmPhDasu4JgquwU81XAKLd3OKTUDNOiKE=
 | 
				
			||||||
 | 
					modernc.org/fileutil v1.3.3 h1:3qaU+7f7xxTUmvU1pJTZiDLAIoJVdUSSauJNHg9yXoA=
 | 
				
			||||||
 | 
					modernc.org/fileutil v1.3.3/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
 | 
				
			||||||
 | 
					modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
 | 
				
			||||||
 | 
					modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
 | 
				
			||||||
 | 
					modernc.org/libc v1.65.10 h1:ZwEk8+jhW7qBjHIT+wd0d9VjitRyQef9BnzlzGwMODc=
 | 
				
			||||||
 | 
					modernc.org/libc v1.65.10/go.mod h1:StFvYpx7i/mXtBAfVOjaU0PWZOvIRoZSgXhrwXzr8Po=
 | 
				
			||||||
 | 
					modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
 | 
				
			||||||
 | 
					modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
 | 
				
			||||||
 | 
					modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
 | 
				
			||||||
 | 
					modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
 | 
				
			||||||
 | 
					modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
 | 
				
			||||||
 | 
					modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
 | 
				
			||||||
 | 
					modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
 | 
				
			||||||
 | 
					modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
 | 
				
			||||||
 | 
					modernc.org/sqlite v1.38.0 h1:+4OrfPQ8pxHKuWG4md1JpR/EYAh3Md7TdejuuzE7EUI=
 | 
				
			||||||
 | 
					modernc.org/sqlite v1.38.0/go.mod h1:1Bj+yES4SVvBZ4cBOpVZ6QgesMCKpJZDq0nxYzOpmNE=
 | 
				
			||||||
 | 
					modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
 | 
				
			||||||
 | 
					modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
 | 
				
			||||||
 | 
					modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
 | 
				
			||||||
 | 
					modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										56
									
								
								internal/cli/app.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										56
									
								
								internal/cli/app.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,56 @@
 | 
				
			|||||||
 | 
					package cli
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"git.eeqj.de/sneak/vaultik/internal/config"
 | 
				
			||||||
 | 
						"git.eeqj.de/sneak/vaultik/internal/database"
 | 
				
			||||||
 | 
						"git.eeqj.de/sneak/vaultik/internal/globals"
 | 
				
			||||||
 | 
						"go.uber.org/fx"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// AppOptions contains common options for creating the fx application
 | 
				
			||||||
 | 
					type AppOptions struct {
 | 
				
			||||||
 | 
						ConfigPath string
 | 
				
			||||||
 | 
						Modules    []fx.Option
 | 
				
			||||||
 | 
						Invokes    []fx.Option
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// NewApp creates a new fx application with common modules
 | 
				
			||||||
 | 
					func NewApp(opts AppOptions) *fx.App {
 | 
				
			||||||
 | 
						baseModules := []fx.Option{
 | 
				
			||||||
 | 
							fx.Supply(config.ConfigPath(opts.ConfigPath)),
 | 
				
			||||||
 | 
							fx.Provide(globals.New),
 | 
				
			||||||
 | 
							config.Module,
 | 
				
			||||||
 | 
							database.Module,
 | 
				
			||||||
 | 
							fx.NopLogger,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						allOptions := append(baseModules, opts.Modules...)
 | 
				
			||||||
 | 
						allOptions = append(allOptions, opts.Invokes...)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return fx.New(allOptions...)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// RunApp starts and stops the fx application within the given context
 | 
				
			||||||
 | 
					func RunApp(ctx context.Context, app *fx.App) error {
 | 
				
			||||||
 | 
						if err := app.Start(ctx); err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("failed to start app: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer func() {
 | 
				
			||||||
 | 
							if err := app.Stop(ctx); err != nil {
 | 
				
			||||||
 | 
								fmt.Printf("error stopping app: %v\n", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Wait for context cancellation
 | 
				
			||||||
 | 
						<-ctx.Done()
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// RunWithApp is a helper that creates and runs an fx app with the given options
 | 
				
			||||||
 | 
					func RunWithApp(ctx context.Context, opts AppOptions) error {
 | 
				
			||||||
 | 
						app := NewApp(opts)
 | 
				
			||||||
 | 
						return RunApp(ctx, app)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@ -6,6 +6,7 @@ import (
 | 
				
			|||||||
	"os"
 | 
						"os"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"git.eeqj.de/sneak/vaultik/internal/config"
 | 
						"git.eeqj.de/sneak/vaultik/internal/config"
 | 
				
			||||||
 | 
						"git.eeqj.de/sneak/vaultik/internal/database"
 | 
				
			||||||
	"git.eeqj.de/sneak/vaultik/internal/globals"
 | 
						"git.eeqj.de/sneak/vaultik/internal/globals"
 | 
				
			||||||
	"github.com/spf13/cobra"
 | 
						"github.com/spf13/cobra"
 | 
				
			||||||
	"go.uber.org/fx"
 | 
						"go.uber.org/fx"
 | 
				
			||||||
@ -16,6 +17,7 @@ type BackupOptions struct {
 | 
				
			|||||||
	ConfigPath string
 | 
						ConfigPath string
 | 
				
			||||||
	Daemon     bool
 | 
						Daemon     bool
 | 
				
			||||||
	Cron       bool
 | 
						Cron       bool
 | 
				
			||||||
 | 
						Prune      bool
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// NewBackupCommand creates the backup command
 | 
					// NewBackupCommand creates the backup command
 | 
				
			||||||
@ -51,39 +53,31 @@ a path using --config or by setting VAULTIK_CONFIG to a path.`,
 | 
				
			|||||||
	cmd.Flags().StringVar(&opts.ConfigPath, "config", "", "Path to config file")
 | 
						cmd.Flags().StringVar(&opts.ConfigPath, "config", "", "Path to config file")
 | 
				
			||||||
	cmd.Flags().BoolVar(&opts.Daemon, "daemon", false, "Run in daemon mode with inotify monitoring")
 | 
						cmd.Flags().BoolVar(&opts.Daemon, "daemon", false, "Run in daemon mode with inotify monitoring")
 | 
				
			||||||
	cmd.Flags().BoolVar(&opts.Cron, "cron", false, "Run in cron mode (silent unless error)")
 | 
						cmd.Flags().BoolVar(&opts.Cron, "cron", false, "Run in cron mode (silent unless error)")
 | 
				
			||||||
 | 
						cmd.Flags().BoolVar(&opts.Prune, "prune", false, "Delete all previous snapshots and unreferenced blobs after backup")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return cmd
 | 
						return cmd
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func runBackup(ctx context.Context, opts *BackupOptions) error {
 | 
					func runBackup(ctx context.Context, opts *BackupOptions) error {
 | 
				
			||||||
	app := fx.New(
 | 
						return RunWithApp(ctx, AppOptions{
 | 
				
			||||||
		fx.Supply(config.ConfigPath(opts.ConfigPath)),
 | 
							ConfigPath: opts.ConfigPath,
 | 
				
			||||||
		fx.Provide(globals.New),
 | 
							Invokes: []fx.Option{
 | 
				
			||||||
		config.Module,
 | 
								fx.Invoke(func(g *globals.Globals, cfg *config.Config, repos *database.Repositories) error {
 | 
				
			||||||
		// Additional modules will be added here
 | 
					 | 
				
			||||||
		fx.Invoke(func(g *globals.Globals, cfg *config.Config) error {
 | 
					 | 
				
			||||||
				// TODO: Implement backup logic
 | 
									// TODO: Implement backup logic
 | 
				
			||||||
				fmt.Printf("Running backup with config: %s\n", opts.ConfigPath)
 | 
									fmt.Printf("Running backup with config: %s\n", opts.ConfigPath)
 | 
				
			||||||
				fmt.Printf("Version: %s, Commit: %s\n", g.Version, g.Commit)
 | 
									fmt.Printf("Version: %s, Commit: %s\n", g.Version, g.Commit)
 | 
				
			||||||
 | 
									fmt.Printf("Index path: %s\n", cfg.IndexPath)
 | 
				
			||||||
				if opts.Daemon {
 | 
									if opts.Daemon {
 | 
				
			||||||
					fmt.Println("Running in daemon mode")
 | 
										fmt.Println("Running in daemon mode")
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				if opts.Cron {
 | 
									if opts.Cron {
 | 
				
			||||||
					fmt.Println("Running in cron mode")
 | 
										fmt.Println("Running in cron mode")
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
 | 
									if opts.Prune {
 | 
				
			||||||
 | 
										fmt.Println("Pruning enabled - will delete old snapshots after backup")
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
				return nil
 | 
									return nil
 | 
				
			||||||
			}),
 | 
								}),
 | 
				
			||||||
		fx.NopLogger,
 | 
							},
 | 
				
			||||||
	)
 | 
						})
 | 
				
			||||||
 | 
					 | 
				
			||||||
	if err := app.Start(ctx); err != nil {
 | 
					 | 
				
			||||||
		return fmt.Errorf("failed to start backup: %w", err)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	defer func() {
 | 
					 | 
				
			||||||
		if err := app.Stop(ctx); err != nil {
 | 
					 | 
				
			||||||
			fmt.Printf("error stopping app: %v\n", err)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}()
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return nil
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@ -22,6 +22,7 @@ on the source system.`,
 | 
				
			|||||||
		NewPruneCommand(),
 | 
							NewPruneCommand(),
 | 
				
			||||||
		NewVerifyCommand(),
 | 
							NewVerifyCommand(),
 | 
				
			||||||
		NewFetchCommand(),
 | 
							NewFetchCommand(),
 | 
				
			||||||
 | 
							SnapshotCmd(),
 | 
				
			||||||
	)
 | 
						)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return cmd
 | 
						return cmd
 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										90
									
								
								internal/cli/snapshot.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										90
									
								
								internal/cli/snapshot.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,90 @@
 | 
				
			|||||||
 | 
					package cli
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"github.com/spf13/cobra"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func SnapshotCmd() *cobra.Command {
 | 
				
			||||||
 | 
						cmd := &cobra.Command{
 | 
				
			||||||
 | 
							Use:   "snapshot",
 | 
				
			||||||
 | 
							Short: "Manage snapshots",
 | 
				
			||||||
 | 
							Long:  "Commands for listing, removing, and querying snapshots",
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cmd.AddCommand(snapshotListCmd())
 | 
				
			||||||
 | 
						cmd.AddCommand(snapshotRmCmd())
 | 
				
			||||||
 | 
						cmd.AddCommand(snapshotLatestCmd())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return cmd
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func snapshotListCmd() *cobra.Command {
 | 
				
			||||||
 | 
						var (
 | 
				
			||||||
 | 
							bucket string
 | 
				
			||||||
 | 
							prefix string
 | 
				
			||||||
 | 
							limit  int
 | 
				
			||||||
 | 
						)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cmd := &cobra.Command{
 | 
				
			||||||
 | 
							Use:   "list",
 | 
				
			||||||
 | 
							Short: "List snapshots",
 | 
				
			||||||
 | 
							Long:  "List all snapshots in the bucket, sorted by timestamp",
 | 
				
			||||||
 | 
							RunE: func(cmd *cobra.Command, args []string) error {
 | 
				
			||||||
 | 
								panic("unimplemented")
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cmd.Flags().StringVar(&bucket, "bucket", "", "S3 bucket name")
 | 
				
			||||||
 | 
						cmd.Flags().StringVar(&prefix, "prefix", "", "S3 prefix")
 | 
				
			||||||
 | 
						cmd.Flags().IntVar(&limit, "limit", 10, "Maximum number of snapshots to list")
 | 
				
			||||||
 | 
						cmd.MarkFlagRequired("bucket")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return cmd
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func snapshotRmCmd() *cobra.Command {
 | 
				
			||||||
 | 
						var (
 | 
				
			||||||
 | 
							bucket   string
 | 
				
			||||||
 | 
							prefix   string
 | 
				
			||||||
 | 
							snapshot string
 | 
				
			||||||
 | 
						)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cmd := &cobra.Command{
 | 
				
			||||||
 | 
							Use:   "rm",
 | 
				
			||||||
 | 
							Short: "Remove a snapshot",
 | 
				
			||||||
 | 
							Long:  "Remove a snapshot and optionally its associated blobs",
 | 
				
			||||||
 | 
							RunE: func(cmd *cobra.Command, args []string) error {
 | 
				
			||||||
 | 
								panic("unimplemented")
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cmd.Flags().StringVar(&bucket, "bucket", "", "S3 bucket name")
 | 
				
			||||||
 | 
						cmd.Flags().StringVar(&prefix, "prefix", "", "S3 prefix")
 | 
				
			||||||
 | 
						cmd.Flags().StringVar(&snapshot, "snapshot", "", "Snapshot ID to remove")
 | 
				
			||||||
 | 
						cmd.MarkFlagRequired("bucket")
 | 
				
			||||||
 | 
						cmd.MarkFlagRequired("snapshot")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return cmd
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func snapshotLatestCmd() *cobra.Command {
 | 
				
			||||||
 | 
						var (
 | 
				
			||||||
 | 
							bucket string
 | 
				
			||||||
 | 
							prefix string
 | 
				
			||||||
 | 
						)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cmd := &cobra.Command{
 | 
				
			||||||
 | 
							Use:   "latest",
 | 
				
			||||||
 | 
							Short: "Get the latest snapshot ID",
 | 
				
			||||||
 | 
							Long:  "Display the ID of the most recent snapshot",
 | 
				
			||||||
 | 
							RunE: func(cmd *cobra.Command, args []string) error {
 | 
				
			||||||
 | 
								panic("unimplemented")
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cmd.Flags().StringVar(&bucket, "bucket", "", "S3 bucket name")
 | 
				
			||||||
 | 
						cmd.Flags().StringVar(&prefix, "prefix", "", "S3 prefix")
 | 
				
			||||||
 | 
						cmd.MarkFlagRequired("bucket")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return cmd
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@ -78,6 +78,11 @@ func Load(path string) (*Config, error) {
 | 
				
			|||||||
		return nil, fmt.Errorf("failed to parse config: %w", err)
 | 
							return nil, fmt.Errorf("failed to parse config: %w", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Check for environment variable override for IndexPath
 | 
				
			||||||
 | 
						if envIndexPath := os.Getenv("VAULTIK_INDEX_PATH"); envIndexPath != "" {
 | 
				
			||||||
 | 
							cfg.IndexPath = envIndexPath
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Get hostname if not set
 | 
						// Get hostname if not set
 | 
				
			||||||
	if cfg.Hostname == "" {
 | 
						if cfg.Hostname == "" {
 | 
				
			||||||
		hostname, err := os.Hostname()
 | 
							hostname, err := os.Hostname()
 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										88
									
								
								internal/database/blob_chunks.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										88
									
								
								internal/database/blob_chunks.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,88 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"database/sql"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					type BlobChunkRepository struct {
 | 
				
			||||||
 | 
						db *DB
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func NewBlobChunkRepository(db *DB) *BlobChunkRepository {
 | 
				
			||||||
 | 
						return &BlobChunkRepository{db: db}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *BlobChunkRepository) Create(ctx context.Context, tx *sql.Tx, bc *BlobChunk) error {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							INSERT INTO blob_chunks (blob_hash, chunk_hash, offset, length)
 | 
				
			||||||
 | 
							VALUES (?, ?, ?, ?)
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var err error
 | 
				
			||||||
 | 
						if tx != nil {
 | 
				
			||||||
 | 
							_, err = tx.ExecContext(ctx, query, bc.BlobHash, bc.ChunkHash, bc.Offset, bc.Length)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							_, err = r.db.ExecWithLock(ctx, query, bc.BlobHash, bc.ChunkHash, bc.Offset, bc.Length)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("inserting blob_chunk: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *BlobChunkRepository) GetByBlobHash(ctx context.Context, blobHash string) ([]*BlobChunk, error) {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							SELECT blob_hash, chunk_hash, offset, length
 | 
				
			||||||
 | 
							FROM blob_chunks
 | 
				
			||||||
 | 
							WHERE blob_hash = ?
 | 
				
			||||||
 | 
							ORDER BY offset
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						rows, err := r.db.conn.QueryContext(ctx, query, blobHash)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("querying blob chunks: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer CloseRows(rows)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var blobChunks []*BlobChunk
 | 
				
			||||||
 | 
						for rows.Next() {
 | 
				
			||||||
 | 
							var bc BlobChunk
 | 
				
			||||||
 | 
							err := rows.Scan(&bc.BlobHash, &bc.ChunkHash, &bc.Offset, &bc.Length)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return nil, fmt.Errorf("scanning blob chunk: %w", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							blobChunks = append(blobChunks, &bc)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return blobChunks, rows.Err()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *BlobChunkRepository) GetByChunkHash(ctx context.Context, chunkHash string) (*BlobChunk, error) {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							SELECT blob_hash, chunk_hash, offset, length
 | 
				
			||||||
 | 
							FROM blob_chunks
 | 
				
			||||||
 | 
							WHERE chunk_hash = ?
 | 
				
			||||||
 | 
							LIMIT 1
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var bc BlobChunk
 | 
				
			||||||
 | 
						err := r.db.conn.QueryRowContext(ctx, query, chunkHash).Scan(
 | 
				
			||||||
 | 
							&bc.BlobHash,
 | 
				
			||||||
 | 
							&bc.ChunkHash,
 | 
				
			||||||
 | 
							&bc.Offset,
 | 
				
			||||||
 | 
							&bc.Length,
 | 
				
			||||||
 | 
						)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err == sql.ErrNoRows {
 | 
				
			||||||
 | 
							return nil, nil
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("querying blob chunk: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return &bc, nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										146
									
								
								internal/database/blob_chunks_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										146
									
								
								internal/database/blob_chunks_test.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,146 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"testing"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestBlobChunkRepository(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewBlobChunkRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test Create
 | 
				
			||||||
 | 
						bc1 := &BlobChunk{
 | 
				
			||||||
 | 
							BlobHash:  "blob1",
 | 
				
			||||||
 | 
							ChunkHash: "chunk1",
 | 
				
			||||||
 | 
							Offset:    0,
 | 
				
			||||||
 | 
							Length:    1024,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err := repo.Create(ctx, nil, bc1)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create blob chunk: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Add more chunks to the same blob
 | 
				
			||||||
 | 
						bc2 := &BlobChunk{
 | 
				
			||||||
 | 
							BlobHash:  "blob1",
 | 
				
			||||||
 | 
							ChunkHash: "chunk2",
 | 
				
			||||||
 | 
							Offset:    1024,
 | 
				
			||||||
 | 
							Length:    2048,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						err = repo.Create(ctx, nil, bc2)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create second blob chunk: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						bc3 := &BlobChunk{
 | 
				
			||||||
 | 
							BlobHash:  "blob1",
 | 
				
			||||||
 | 
							ChunkHash: "chunk3",
 | 
				
			||||||
 | 
							Offset:    3072,
 | 
				
			||||||
 | 
							Length:    512,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						err = repo.Create(ctx, nil, bc3)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create third blob chunk: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test GetByBlobHash
 | 
				
			||||||
 | 
						chunks, err := repo.GetByBlobHash(ctx, "blob1")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get blob chunks: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(chunks) != 3 {
 | 
				
			||||||
 | 
							t.Errorf("expected 3 chunks, got %d", len(chunks))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Verify order by offset
 | 
				
			||||||
 | 
						expectedOffsets := []int64{0, 1024, 3072}
 | 
				
			||||||
 | 
						for i, chunk := range chunks {
 | 
				
			||||||
 | 
							if chunk.Offset != expectedOffsets[i] {
 | 
				
			||||||
 | 
								t.Errorf("wrong chunk order: expected offset %d, got %d", expectedOffsets[i], chunk.Offset)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test GetByChunkHash
 | 
				
			||||||
 | 
						bc, err := repo.GetByChunkHash(ctx, "chunk2")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get blob chunk by chunk hash: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if bc == nil {
 | 
				
			||||||
 | 
							t.Fatal("expected blob chunk, got nil")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if bc.BlobHash != "blob1" {
 | 
				
			||||||
 | 
							t.Errorf("wrong blob hash: expected blob1, got %s", bc.BlobHash)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if bc.Offset != 1024 {
 | 
				
			||||||
 | 
							t.Errorf("wrong offset: expected 1024, got %d", bc.Offset)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test non-existent chunk
 | 
				
			||||||
 | 
						bc, err = repo.GetByChunkHash(ctx, "nonexistent")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("unexpected error: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if bc != nil {
 | 
				
			||||||
 | 
							t.Error("expected nil for non-existent chunk")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestBlobChunkRepositoryMultipleBlobs(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewBlobChunkRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Create chunks across multiple blobs
 | 
				
			||||||
 | 
						// Some chunks are shared between blobs (deduplication scenario)
 | 
				
			||||||
 | 
						blobChunks := []BlobChunk{
 | 
				
			||||||
 | 
							{BlobHash: "blob1", ChunkHash: "chunk1", Offset: 0, Length: 1024},
 | 
				
			||||||
 | 
							{BlobHash: "blob1", ChunkHash: "chunk2", Offset: 1024, Length: 1024},
 | 
				
			||||||
 | 
							{BlobHash: "blob2", ChunkHash: "chunk2", Offset: 0, Length: 1024}, // chunk2 is shared
 | 
				
			||||||
 | 
							{BlobHash: "blob2", ChunkHash: "chunk3", Offset: 1024, Length: 1024},
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for _, bc := range blobChunks {
 | 
				
			||||||
 | 
							err := repo.Create(ctx, nil, &bc)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								t.Fatalf("failed to create blob chunk: %v", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Verify blob1 chunks
 | 
				
			||||||
 | 
						chunks, err := repo.GetByBlobHash(ctx, "blob1")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get blob1 chunks: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(chunks) != 2 {
 | 
				
			||||||
 | 
							t.Errorf("expected 2 chunks for blob1, got %d", len(chunks))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Verify blob2 chunks
 | 
				
			||||||
 | 
						chunks, err = repo.GetByBlobHash(ctx, "blob2")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get blob2 chunks: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(chunks) != 2 {
 | 
				
			||||||
 | 
							t.Errorf("expected 2 chunks for blob2, got %d", len(chunks))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Verify shared chunk
 | 
				
			||||||
 | 
						bc, err := repo.GetByChunkHash(ctx, "chunk2")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get shared chunk: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if bc == nil {
 | 
				
			||||||
 | 
							t.Fatal("expected shared chunk, got nil")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						// GetByChunkHash returns first match, should be blob1
 | 
				
			||||||
 | 
						if bc.BlobHash != "blob1" {
 | 
				
			||||||
 | 
							t.Errorf("expected blob1 for shared chunk, got %s", bc.BlobHash)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										96
									
								
								internal/database/blobs.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										96
									
								
								internal/database/blobs.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,96 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"database/sql"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
						"time"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					type BlobRepository struct {
 | 
				
			||||||
 | 
						db *DB
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func NewBlobRepository(db *DB) *BlobRepository {
 | 
				
			||||||
 | 
						return &BlobRepository{db: db}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *BlobRepository) Create(ctx context.Context, tx *sql.Tx, blob *Blob) error {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							INSERT INTO blobs (blob_hash, created_ts)
 | 
				
			||||||
 | 
							VALUES (?, ?)
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var err error
 | 
				
			||||||
 | 
						if tx != nil {
 | 
				
			||||||
 | 
							_, err = tx.ExecContext(ctx, query, blob.BlobHash, blob.CreatedTS.Unix())
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							_, err = r.db.ExecWithLock(ctx, query, blob.BlobHash, blob.CreatedTS.Unix())
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("inserting blob: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *BlobRepository) GetByHash(ctx context.Context, hash string) (*Blob, error) {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							SELECT blob_hash, created_ts
 | 
				
			||||||
 | 
							FROM blobs
 | 
				
			||||||
 | 
							WHERE blob_hash = ?
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var blob Blob
 | 
				
			||||||
 | 
						var createdTSUnix int64
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err := r.db.conn.QueryRowContext(ctx, query, hash).Scan(
 | 
				
			||||||
 | 
							&blob.BlobHash,
 | 
				
			||||||
 | 
							&createdTSUnix,
 | 
				
			||||||
 | 
						)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err == sql.ErrNoRows {
 | 
				
			||||||
 | 
							return nil, nil
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("querying blob: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						blob.CreatedTS = time.Unix(createdTSUnix, 0)
 | 
				
			||||||
 | 
						return &blob, nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *BlobRepository) List(ctx context.Context, limit, offset int) ([]*Blob, error) {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							SELECT blob_hash, created_ts
 | 
				
			||||||
 | 
							FROM blobs
 | 
				
			||||||
 | 
							ORDER BY blob_hash
 | 
				
			||||||
 | 
							LIMIT ? OFFSET ?
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						rows, err := r.db.conn.QueryContext(ctx, query, limit, offset)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("querying blobs: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer CloseRows(rows)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var blobs []*Blob
 | 
				
			||||||
 | 
						for rows.Next() {
 | 
				
			||||||
 | 
							var blob Blob
 | 
				
			||||||
 | 
							var createdTSUnix int64
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							err := rows.Scan(
 | 
				
			||||||
 | 
								&blob.BlobHash,
 | 
				
			||||||
 | 
								&createdTSUnix,
 | 
				
			||||||
 | 
							)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return nil, fmt.Errorf("scanning blob: %w", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							blob.CreatedTS = time.Unix(createdTSUnix, 0)
 | 
				
			||||||
 | 
							blobs = append(blobs, &blob)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return blobs, rows.Err()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										100
									
								
								internal/database/blobs_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										100
									
								
								internal/database/blobs_test.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,100 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"testing"
 | 
				
			||||||
 | 
						"time"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestBlobRepository(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewBlobRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test Create
 | 
				
			||||||
 | 
						blob := &Blob{
 | 
				
			||||||
 | 
							BlobHash:  "blobhash123",
 | 
				
			||||||
 | 
							CreatedTS: time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err := repo.Create(ctx, nil, blob)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create blob: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test GetByHash
 | 
				
			||||||
 | 
						retrieved, err := repo.GetByHash(ctx, blob.BlobHash)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get blob: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved == nil {
 | 
				
			||||||
 | 
							t.Fatal("expected blob, got nil")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.BlobHash != blob.BlobHash {
 | 
				
			||||||
 | 
							t.Errorf("blob hash mismatch: got %s, want %s", retrieved.BlobHash, blob.BlobHash)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if !retrieved.CreatedTS.Equal(blob.CreatedTS) {
 | 
				
			||||||
 | 
							t.Errorf("created timestamp mismatch: got %v, want %v", retrieved.CreatedTS, blob.CreatedTS)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test List
 | 
				
			||||||
 | 
						blob2 := &Blob{
 | 
				
			||||||
 | 
							BlobHash:  "blobhash456",
 | 
				
			||||||
 | 
							CreatedTS: time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						err = repo.Create(ctx, nil, blob2)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create second blob: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						blobs, err := repo.List(ctx, 10, 0)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to list blobs: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(blobs) != 2 {
 | 
				
			||||||
 | 
							t.Errorf("expected 2 blobs, got %d", len(blobs))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test pagination
 | 
				
			||||||
 | 
						blobs, err = repo.List(ctx, 1, 0)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to list blobs with limit: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(blobs) != 1 {
 | 
				
			||||||
 | 
							t.Errorf("expected 1 blob with limit, got %d", len(blobs))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						blobs, err = repo.List(ctx, 1, 1)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to list blobs with offset: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(blobs) != 1 {
 | 
				
			||||||
 | 
							t.Errorf("expected 1 blob with offset, got %d", len(blobs))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestBlobRepositoryDuplicate(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewBlobRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						blob := &Blob{
 | 
				
			||||||
 | 
							BlobHash:  "duplicate_blob",
 | 
				
			||||||
 | 
							CreatedTS: time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err := repo.Create(ctx, nil, blob)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create blob: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Try to create duplicate - should fail due to unique constraint
 | 
				
			||||||
 | 
						err = repo.Create(ctx, nil, blob)
 | 
				
			||||||
 | 
						if err == nil {
 | 
				
			||||||
 | 
							t.Error("expected error for duplicate blob")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										88
									
								
								internal/database/chunk_files.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										88
									
								
								internal/database/chunk_files.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,88 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"database/sql"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					type ChunkFileRepository struct {
 | 
				
			||||||
 | 
						db *DB
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func NewChunkFileRepository(db *DB) *ChunkFileRepository {
 | 
				
			||||||
 | 
						return &ChunkFileRepository{db: db}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *ChunkFileRepository) Create(ctx context.Context, tx *sql.Tx, cf *ChunkFile) error {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							INSERT INTO chunk_files (chunk_hash, file_path, file_offset, length)
 | 
				
			||||||
 | 
							VALUES (?, ?, ?, ?)
 | 
				
			||||||
 | 
							ON CONFLICT(chunk_hash, file_path) DO NOTHING
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var err error
 | 
				
			||||||
 | 
						if tx != nil {
 | 
				
			||||||
 | 
							_, err = tx.ExecContext(ctx, query, cf.ChunkHash, cf.FilePath, cf.FileOffset, cf.Length)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							_, err = r.db.ExecWithLock(ctx, query, cf.ChunkHash, cf.FilePath, cf.FileOffset, cf.Length)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("inserting chunk_file: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *ChunkFileRepository) GetByChunkHash(ctx context.Context, chunkHash string) ([]*ChunkFile, error) {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							SELECT chunk_hash, file_path, file_offset, length
 | 
				
			||||||
 | 
							FROM chunk_files
 | 
				
			||||||
 | 
							WHERE chunk_hash = ?
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						rows, err := r.db.conn.QueryContext(ctx, query, chunkHash)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("querying chunk files: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer CloseRows(rows)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var chunkFiles []*ChunkFile
 | 
				
			||||||
 | 
						for rows.Next() {
 | 
				
			||||||
 | 
							var cf ChunkFile
 | 
				
			||||||
 | 
							err := rows.Scan(&cf.ChunkHash, &cf.FilePath, &cf.FileOffset, &cf.Length)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return nil, fmt.Errorf("scanning chunk file: %w", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							chunkFiles = append(chunkFiles, &cf)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return chunkFiles, rows.Err()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *ChunkFileRepository) GetByFilePath(ctx context.Context, filePath string) ([]*ChunkFile, error) {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							SELECT chunk_hash, file_path, file_offset, length
 | 
				
			||||||
 | 
							FROM chunk_files
 | 
				
			||||||
 | 
							WHERE file_path = ?
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						rows, err := r.db.conn.QueryContext(ctx, query, filePath)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("querying chunk files: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer CloseRows(rows)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var chunkFiles []*ChunkFile
 | 
				
			||||||
 | 
						for rows.Next() {
 | 
				
			||||||
 | 
							var cf ChunkFile
 | 
				
			||||||
 | 
							err := rows.Scan(&cf.ChunkHash, &cf.FilePath, &cf.FileOffset, &cf.Length)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return nil, fmt.Errorf("scanning chunk file: %w", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							chunkFiles = append(chunkFiles, &cf)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return chunkFiles, rows.Err()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										142
									
								
								internal/database/chunk_files_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										142
									
								
								internal/database/chunk_files_test.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,142 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"testing"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestChunkFileRepository(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewChunkFileRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test Create
 | 
				
			||||||
 | 
						cf1 := &ChunkFile{
 | 
				
			||||||
 | 
							ChunkHash:  "chunk1",
 | 
				
			||||||
 | 
							FilePath:   "/file1.txt",
 | 
				
			||||||
 | 
							FileOffset: 0,
 | 
				
			||||||
 | 
							Length:     1024,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err := repo.Create(ctx, nil, cf1)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create chunk file: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Add same chunk in different file (deduplication scenario)
 | 
				
			||||||
 | 
						cf2 := &ChunkFile{
 | 
				
			||||||
 | 
							ChunkHash:  "chunk1",
 | 
				
			||||||
 | 
							FilePath:   "/file2.txt",
 | 
				
			||||||
 | 
							FileOffset: 2048,
 | 
				
			||||||
 | 
							Length:     1024,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						err = repo.Create(ctx, nil, cf2)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create second chunk file: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test GetByChunkHash
 | 
				
			||||||
 | 
						chunkFiles, err := repo.GetByChunkHash(ctx, "chunk1")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get chunk files: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(chunkFiles) != 2 {
 | 
				
			||||||
 | 
							t.Errorf("expected 2 files for chunk, got %d", len(chunkFiles))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Verify both files are returned
 | 
				
			||||||
 | 
						foundFile1 := false
 | 
				
			||||||
 | 
						foundFile2 := false
 | 
				
			||||||
 | 
						for _, cf := range chunkFiles {
 | 
				
			||||||
 | 
							if cf.FilePath == "/file1.txt" && cf.FileOffset == 0 {
 | 
				
			||||||
 | 
								foundFile1 = true
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if cf.FilePath == "/file2.txt" && cf.FileOffset == 2048 {
 | 
				
			||||||
 | 
								foundFile2 = true
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if !foundFile1 || !foundFile2 {
 | 
				
			||||||
 | 
							t.Error("not all expected files found")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test GetByFilePath
 | 
				
			||||||
 | 
						chunkFiles, err = repo.GetByFilePath(ctx, "/file1.txt")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get chunks by file path: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(chunkFiles) != 1 {
 | 
				
			||||||
 | 
							t.Errorf("expected 1 chunk for file, got %d", len(chunkFiles))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if chunkFiles[0].ChunkHash != "chunk1" {
 | 
				
			||||||
 | 
							t.Errorf("wrong chunk hash: expected chunk1, got %s", chunkFiles[0].ChunkHash)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test duplicate insert (should be idempotent)
 | 
				
			||||||
 | 
						err = repo.Create(ctx, nil, cf1)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create duplicate chunk file: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestChunkFileRepositoryComplexDeduplication(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewChunkFileRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Simulate a scenario where multiple files share chunks
 | 
				
			||||||
 | 
						// File1: chunk1, chunk2, chunk3
 | 
				
			||||||
 | 
						// File2: chunk2, chunk3, chunk4
 | 
				
			||||||
 | 
						// File3: chunk1, chunk4
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						chunkFiles := []ChunkFile{
 | 
				
			||||||
 | 
							// File1
 | 
				
			||||||
 | 
							{ChunkHash: "chunk1", FilePath: "/file1.txt", FileOffset: 0, Length: 1024},
 | 
				
			||||||
 | 
							{ChunkHash: "chunk2", FilePath: "/file1.txt", FileOffset: 1024, Length: 1024},
 | 
				
			||||||
 | 
							{ChunkHash: "chunk3", FilePath: "/file1.txt", FileOffset: 2048, Length: 1024},
 | 
				
			||||||
 | 
							// File2
 | 
				
			||||||
 | 
							{ChunkHash: "chunk2", FilePath: "/file2.txt", FileOffset: 0, Length: 1024},
 | 
				
			||||||
 | 
							{ChunkHash: "chunk3", FilePath: "/file2.txt", FileOffset: 1024, Length: 1024},
 | 
				
			||||||
 | 
							{ChunkHash: "chunk4", FilePath: "/file2.txt", FileOffset: 2048, Length: 1024},
 | 
				
			||||||
 | 
							// File3
 | 
				
			||||||
 | 
							{ChunkHash: "chunk1", FilePath: "/file3.txt", FileOffset: 0, Length: 1024},
 | 
				
			||||||
 | 
							{ChunkHash: "chunk4", FilePath: "/file3.txt", FileOffset: 1024, Length: 1024},
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for _, cf := range chunkFiles {
 | 
				
			||||||
 | 
							err := repo.Create(ctx, nil, &cf)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								t.Fatalf("failed to create chunk file: %v", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test chunk1 (used by file1 and file3)
 | 
				
			||||||
 | 
						files, err := repo.GetByChunkHash(ctx, "chunk1")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get files for chunk1: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(files) != 2 {
 | 
				
			||||||
 | 
							t.Errorf("expected 2 files for chunk1, got %d", len(files))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test chunk2 (used by file1 and file2)
 | 
				
			||||||
 | 
						files, err = repo.GetByChunkHash(ctx, "chunk2")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get files for chunk2: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(files) != 2 {
 | 
				
			||||||
 | 
							t.Errorf("expected 2 files for chunk2, got %d", len(files))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test file2 chunks
 | 
				
			||||||
 | 
						chunks, err := repo.GetByFilePath(ctx, "/file2.txt")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get chunks for file2: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(chunks) != 3 {
 | 
				
			||||||
 | 
							t.Errorf("expected 3 chunks for file2, got %d", len(chunks))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										141
									
								
								internal/database/chunks.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										141
									
								
								internal/database/chunks.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,141 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"database/sql"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					type ChunkRepository struct {
 | 
				
			||||||
 | 
						db *DB
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func NewChunkRepository(db *DB) *ChunkRepository {
 | 
				
			||||||
 | 
						return &ChunkRepository{db: db}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *ChunkRepository) Create(ctx context.Context, tx *sql.Tx, chunk *Chunk) error {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							INSERT INTO chunks (chunk_hash, sha256, size)
 | 
				
			||||||
 | 
							VALUES (?, ?, ?)
 | 
				
			||||||
 | 
							ON CONFLICT(chunk_hash) DO NOTHING
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var err error
 | 
				
			||||||
 | 
						if tx != nil {
 | 
				
			||||||
 | 
							_, err = tx.ExecContext(ctx, query, chunk.ChunkHash, chunk.SHA256, chunk.Size)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							_, err = r.db.ExecWithLock(ctx, query, chunk.ChunkHash, chunk.SHA256, chunk.Size)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("inserting chunk: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *ChunkRepository) GetByHash(ctx context.Context, hash string) (*Chunk, error) {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							SELECT chunk_hash, sha256, size
 | 
				
			||||||
 | 
							FROM chunks
 | 
				
			||||||
 | 
							WHERE chunk_hash = ?
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var chunk Chunk
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err := r.db.conn.QueryRowContext(ctx, query, hash).Scan(
 | 
				
			||||||
 | 
							&chunk.ChunkHash,
 | 
				
			||||||
 | 
							&chunk.SHA256,
 | 
				
			||||||
 | 
							&chunk.Size,
 | 
				
			||||||
 | 
						)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err == sql.ErrNoRows {
 | 
				
			||||||
 | 
							return nil, nil
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("querying chunk: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return &chunk, nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *ChunkRepository) GetByHashes(ctx context.Context, hashes []string) ([]*Chunk, error) {
 | 
				
			||||||
 | 
						if len(hashes) == 0 {
 | 
				
			||||||
 | 
							return nil, nil
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							SELECT chunk_hash, sha256, size
 | 
				
			||||||
 | 
							FROM chunks
 | 
				
			||||||
 | 
							WHERE chunk_hash IN (`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						args := make([]interface{}, len(hashes))
 | 
				
			||||||
 | 
						for i, hash := range hashes {
 | 
				
			||||||
 | 
							if i > 0 {
 | 
				
			||||||
 | 
								query += ", "
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							query += "?"
 | 
				
			||||||
 | 
							args[i] = hash
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						query += ") ORDER BY chunk_hash"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						rows, err := r.db.conn.QueryContext(ctx, query, args...)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("querying chunks: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer CloseRows(rows)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var chunks []*Chunk
 | 
				
			||||||
 | 
						for rows.Next() {
 | 
				
			||||||
 | 
							var chunk Chunk
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							err := rows.Scan(
 | 
				
			||||||
 | 
								&chunk.ChunkHash,
 | 
				
			||||||
 | 
								&chunk.SHA256,
 | 
				
			||||||
 | 
								&chunk.Size,
 | 
				
			||||||
 | 
							)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return nil, fmt.Errorf("scanning chunk: %w", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							chunks = append(chunks, &chunk)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return chunks, rows.Err()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *ChunkRepository) ListUnpacked(ctx context.Context, limit int) ([]*Chunk, error) {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							SELECT c.chunk_hash, c.sha256, c.size
 | 
				
			||||||
 | 
							FROM chunks c
 | 
				
			||||||
 | 
							LEFT JOIN blob_chunks bc ON c.chunk_hash = bc.chunk_hash
 | 
				
			||||||
 | 
							WHERE bc.chunk_hash IS NULL
 | 
				
			||||||
 | 
							ORDER BY c.chunk_hash
 | 
				
			||||||
 | 
							LIMIT ?
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						rows, err := r.db.conn.QueryContext(ctx, query, limit)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("querying unpacked chunks: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer CloseRows(rows)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var chunks []*Chunk
 | 
				
			||||||
 | 
						for rows.Next() {
 | 
				
			||||||
 | 
							var chunk Chunk
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							err := rows.Scan(
 | 
				
			||||||
 | 
								&chunk.ChunkHash,
 | 
				
			||||||
 | 
								&chunk.SHA256,
 | 
				
			||||||
 | 
								&chunk.Size,
 | 
				
			||||||
 | 
							)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return nil, fmt.Errorf("scanning chunk: %w", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							chunks = append(chunks, &chunk)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return chunks, rows.Err()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										104
									
								
								internal/database/chunks_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										104
									
								
								internal/database/chunks_test.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,104 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"testing"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestChunkRepository(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewChunkRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test Create
 | 
				
			||||||
 | 
						chunk := &Chunk{
 | 
				
			||||||
 | 
							ChunkHash: "chunkhash123",
 | 
				
			||||||
 | 
							SHA256:    "sha256hash123",
 | 
				
			||||||
 | 
							Size:      4096,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err := repo.Create(ctx, nil, chunk)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create chunk: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test GetByHash
 | 
				
			||||||
 | 
						retrieved, err := repo.GetByHash(ctx, chunk.ChunkHash)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get chunk: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved == nil {
 | 
				
			||||||
 | 
							t.Fatal("expected chunk, got nil")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.ChunkHash != chunk.ChunkHash {
 | 
				
			||||||
 | 
							t.Errorf("chunk hash mismatch: got %s, want %s", retrieved.ChunkHash, chunk.ChunkHash)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.SHA256 != chunk.SHA256 {
 | 
				
			||||||
 | 
							t.Errorf("sha256 mismatch: got %s, want %s", retrieved.SHA256, chunk.SHA256)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.Size != chunk.Size {
 | 
				
			||||||
 | 
							t.Errorf("size mismatch: got %d, want %d", retrieved.Size, chunk.Size)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test duplicate insert (should be idempotent)
 | 
				
			||||||
 | 
						err = repo.Create(ctx, nil, chunk)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create duplicate chunk: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test GetByHashes
 | 
				
			||||||
 | 
						chunk2 := &Chunk{
 | 
				
			||||||
 | 
							ChunkHash: "chunkhash456",
 | 
				
			||||||
 | 
							SHA256:    "sha256hash456",
 | 
				
			||||||
 | 
							Size:      8192,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						err = repo.Create(ctx, nil, chunk2)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create second chunk: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						chunks, err := repo.GetByHashes(ctx, []string{chunk.ChunkHash, chunk2.ChunkHash})
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get chunks by hashes: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(chunks) != 2 {
 | 
				
			||||||
 | 
							t.Errorf("expected 2 chunks, got %d", len(chunks))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test ListUnpacked
 | 
				
			||||||
 | 
						unpacked, err := repo.ListUnpacked(ctx, 10)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to list unpacked chunks: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(unpacked) != 2 {
 | 
				
			||||||
 | 
							t.Errorf("expected 2 unpacked chunks, got %d", len(unpacked))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestChunkRepositoryNotFound(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewChunkRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test GetByHash with non-existent hash
 | 
				
			||||||
 | 
						chunk, err := repo.GetByHash(ctx, "nonexistent")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("unexpected error: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if chunk != nil {
 | 
				
			||||||
 | 
							t.Error("expected nil for non-existent chunk")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test GetByHashes with empty list
 | 
				
			||||||
 | 
						chunks, err := repo.GetByHashes(ctx, []string{})
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("unexpected error: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if chunks != nil {
 | 
				
			||||||
 | 
							t.Error("expected nil for empty hash list")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										143
									
								
								internal/database/database.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										143
									
								
								internal/database/database.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,143 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"database/sql"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
						"sync"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						_ "modernc.org/sqlite"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					type DB struct {
 | 
				
			||||||
 | 
						conn      *sql.DB
 | 
				
			||||||
 | 
						writeLock sync.Mutex
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func New(ctx context.Context, path string) (*DB, error) {
 | 
				
			||||||
 | 
						conn, err := sql.Open("sqlite", path+"?_journal_mode=WAL&_synchronous=NORMAL&_busy_timeout=5000")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("opening database: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err := conn.PingContext(ctx); err != nil {
 | 
				
			||||||
 | 
							if closeErr := conn.Close(); closeErr != nil {
 | 
				
			||||||
 | 
								Fatal("failed to close database connection: %v", closeErr)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("pinging database: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						db := &DB{conn: conn}
 | 
				
			||||||
 | 
						if err := db.createSchema(ctx); err != nil {
 | 
				
			||||||
 | 
							if closeErr := conn.Close(); closeErr != nil {
 | 
				
			||||||
 | 
								Fatal("failed to close database connection: %v", closeErr)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("creating schema: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return db, nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (db *DB) Close() error {
 | 
				
			||||||
 | 
						if err := db.conn.Close(); err != nil {
 | 
				
			||||||
 | 
							Fatal("failed to close database: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (db *DB) Conn() *sql.DB {
 | 
				
			||||||
 | 
						return db.conn
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (db *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) {
 | 
				
			||||||
 | 
						return db.conn.BeginTx(ctx, opts)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// LockForWrite acquires the write lock
 | 
				
			||||||
 | 
					func (db *DB) LockForWrite() {
 | 
				
			||||||
 | 
						db.writeLock.Lock()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// UnlockWrite releases the write lock
 | 
				
			||||||
 | 
					func (db *DB) UnlockWrite() {
 | 
				
			||||||
 | 
						db.writeLock.Unlock()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// ExecWithLock executes a write query with the write lock held
 | 
				
			||||||
 | 
					func (db *DB) ExecWithLock(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
 | 
				
			||||||
 | 
						db.writeLock.Lock()
 | 
				
			||||||
 | 
						defer db.writeLock.Unlock()
 | 
				
			||||||
 | 
						return db.conn.ExecContext(ctx, query, args...)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// QueryRowWithLock executes a write query that returns a row with the write lock held
 | 
				
			||||||
 | 
					func (db *DB) QueryRowWithLock(ctx context.Context, query string, args ...interface{}) *sql.Row {
 | 
				
			||||||
 | 
						db.writeLock.Lock()
 | 
				
			||||||
 | 
						defer db.writeLock.Unlock()
 | 
				
			||||||
 | 
						return db.conn.QueryRowContext(ctx, query, args...)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (db *DB) createSchema(ctx context.Context) error {
 | 
				
			||||||
 | 
						schema := `
 | 
				
			||||||
 | 
						CREATE TABLE IF NOT EXISTS files (
 | 
				
			||||||
 | 
							path TEXT PRIMARY KEY,
 | 
				
			||||||
 | 
							mtime INTEGER NOT NULL,
 | 
				
			||||||
 | 
							ctime INTEGER NOT NULL,
 | 
				
			||||||
 | 
							size INTEGER NOT NULL,
 | 
				
			||||||
 | 
							mode INTEGER NOT NULL,
 | 
				
			||||||
 | 
							uid INTEGER NOT NULL,
 | 
				
			||||||
 | 
							gid INTEGER NOT NULL,
 | 
				
			||||||
 | 
							link_target TEXT
 | 
				
			||||||
 | 
						);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						CREATE TABLE IF NOT EXISTS file_chunks (
 | 
				
			||||||
 | 
							path TEXT NOT NULL,
 | 
				
			||||||
 | 
							idx INTEGER NOT NULL,
 | 
				
			||||||
 | 
							chunk_hash TEXT NOT NULL,
 | 
				
			||||||
 | 
							PRIMARY KEY (path, idx)
 | 
				
			||||||
 | 
						);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						CREATE TABLE IF NOT EXISTS chunks (
 | 
				
			||||||
 | 
							chunk_hash TEXT PRIMARY KEY,
 | 
				
			||||||
 | 
							sha256 TEXT NOT NULL,
 | 
				
			||||||
 | 
							size INTEGER NOT NULL
 | 
				
			||||||
 | 
						);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						CREATE TABLE IF NOT EXISTS blobs (
 | 
				
			||||||
 | 
							blob_hash TEXT PRIMARY KEY,
 | 
				
			||||||
 | 
							created_ts INTEGER NOT NULL
 | 
				
			||||||
 | 
						);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						CREATE TABLE IF NOT EXISTS blob_chunks (
 | 
				
			||||||
 | 
							blob_hash TEXT NOT NULL,
 | 
				
			||||||
 | 
							chunk_hash TEXT NOT NULL,
 | 
				
			||||||
 | 
							offset INTEGER NOT NULL,
 | 
				
			||||||
 | 
							length INTEGER NOT NULL,
 | 
				
			||||||
 | 
							PRIMARY KEY (blob_hash, chunk_hash)
 | 
				
			||||||
 | 
						);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						CREATE TABLE IF NOT EXISTS chunk_files (
 | 
				
			||||||
 | 
							chunk_hash TEXT NOT NULL,
 | 
				
			||||||
 | 
							file_path TEXT NOT NULL,
 | 
				
			||||||
 | 
							file_offset INTEGER NOT NULL,
 | 
				
			||||||
 | 
							length INTEGER NOT NULL,
 | 
				
			||||||
 | 
							PRIMARY KEY (chunk_hash, file_path)
 | 
				
			||||||
 | 
						);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						CREATE TABLE IF NOT EXISTS snapshots (
 | 
				
			||||||
 | 
							id TEXT PRIMARY KEY,
 | 
				
			||||||
 | 
							hostname TEXT NOT NULL,
 | 
				
			||||||
 | 
							vaultik_version TEXT NOT NULL,
 | 
				
			||||||
 | 
							created_ts INTEGER NOT NULL,
 | 
				
			||||||
 | 
							file_count INTEGER NOT NULL,
 | 
				
			||||||
 | 
							chunk_count INTEGER NOT NULL,
 | 
				
			||||||
 | 
							blob_count INTEGER NOT NULL,
 | 
				
			||||||
 | 
							total_size INTEGER NOT NULL,
 | 
				
			||||||
 | 
							blob_size INTEGER NOT NULL,
 | 
				
			||||||
 | 
							compression_ratio REAL NOT NULL
 | 
				
			||||||
 | 
						);
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						_, err := db.conn.ExecContext(ctx, schema)
 | 
				
			||||||
 | 
						return err
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										96
									
								
								internal/database/database_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										96
									
								
								internal/database/database_test.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,96 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
						"path/filepath"
 | 
				
			||||||
 | 
						"testing"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestDatabase(t *testing.T) {
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						dbPath := filepath.Join(t.TempDir(), "test.db")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						db, err := New(ctx, dbPath)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create database: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer func() {
 | 
				
			||||||
 | 
							if err := db.Close(); err != nil {
 | 
				
			||||||
 | 
								t.Errorf("failed to close database: %v", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test connection
 | 
				
			||||||
 | 
						if db.Conn() == nil {
 | 
				
			||||||
 | 
							t.Fatal("database connection is nil")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test schema creation (already done in New)
 | 
				
			||||||
 | 
						// Verify tables exist
 | 
				
			||||||
 | 
						tables := []string{
 | 
				
			||||||
 | 
							"files", "file_chunks", "chunks", "blobs",
 | 
				
			||||||
 | 
							"blob_chunks", "chunk_files", "snapshots",
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for _, table := range tables {
 | 
				
			||||||
 | 
							var name string
 | 
				
			||||||
 | 
							err := db.conn.QueryRow("SELECT name FROM sqlite_master WHERE type='table' AND name=?", table).Scan(&name)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								t.Errorf("table %s does not exist: %v", table, err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestDatabaseInvalidPath(t *testing.T) {
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test with invalid path
 | 
				
			||||||
 | 
						_, err := New(ctx, "/invalid/path/that/does/not/exist/test.db")
 | 
				
			||||||
 | 
						if err == nil {
 | 
				
			||||||
 | 
							t.Fatal("expected error for invalid path")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestDatabaseConcurrentAccess(t *testing.T) {
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						dbPath := filepath.Join(t.TempDir(), "test.db")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						db, err := New(ctx, dbPath)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create database: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer func() {
 | 
				
			||||||
 | 
							if err := db.Close(); err != nil {
 | 
				
			||||||
 | 
								t.Errorf("failed to close database: %v", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test concurrent writes
 | 
				
			||||||
 | 
						done := make(chan bool, 10)
 | 
				
			||||||
 | 
						for i := 0; i < 10; i++ {
 | 
				
			||||||
 | 
							go func(i int) {
 | 
				
			||||||
 | 
								_, err := db.ExecWithLock(ctx, "INSERT INTO chunks (chunk_hash, sha256, size) VALUES (?, ?, ?)",
 | 
				
			||||||
 | 
									fmt.Sprintf("hash%d", i), fmt.Sprintf("sha%d", i), i*1024)
 | 
				
			||||||
 | 
								if err != nil {
 | 
				
			||||||
 | 
									t.Errorf("concurrent insert failed: %v", err)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								done <- true
 | 
				
			||||||
 | 
							}(i)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Wait for all goroutines
 | 
				
			||||||
 | 
						for i := 0; i < 10; i++ {
 | 
				
			||||||
 | 
							<-done
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Verify all inserts succeeded
 | 
				
			||||||
 | 
						var count int
 | 
				
			||||||
 | 
						err = db.conn.QueryRowContext(ctx, "SELECT COUNT(*) FROM chunks").Scan(&count)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to count chunks: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if count != 10 {
 | 
				
			||||||
 | 
							t.Errorf("expected 10 chunks, got %d", count)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										20
									
								
								internal/database/errors.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								internal/database/errors.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,20 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"database/sql"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
						"os"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Fatal prints an error message to stderr and exits with status 1
 | 
				
			||||||
 | 
					func Fatal(format string, args ...interface{}) {
 | 
				
			||||||
 | 
						fmt.Fprintf(os.Stderr, "FATAL: "+format+"\n", args...)
 | 
				
			||||||
 | 
						os.Exit(1)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// CloseRows closes rows and exits on error
 | 
				
			||||||
 | 
					func CloseRows(rows *sql.Rows) {
 | 
				
			||||||
 | 
						if err := rows.Close(); err != nil {
 | 
				
			||||||
 | 
							Fatal("failed to close rows: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										80
									
								
								internal/database/file_chunks.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										80
									
								
								internal/database/file_chunks.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,80 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"database/sql"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					type FileChunkRepository struct {
 | 
				
			||||||
 | 
						db *DB
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func NewFileChunkRepository(db *DB) *FileChunkRepository {
 | 
				
			||||||
 | 
						return &FileChunkRepository{db: db}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *FileChunkRepository) Create(ctx context.Context, tx *sql.Tx, fc *FileChunk) error {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							INSERT INTO file_chunks (path, idx, chunk_hash)
 | 
				
			||||||
 | 
							VALUES (?, ?, ?)
 | 
				
			||||||
 | 
							ON CONFLICT(path, idx) DO NOTHING
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var err error
 | 
				
			||||||
 | 
						if tx != nil {
 | 
				
			||||||
 | 
							_, err = tx.ExecContext(ctx, query, fc.Path, fc.Idx, fc.ChunkHash)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							_, err = r.db.ExecWithLock(ctx, query, fc.Path, fc.Idx, fc.ChunkHash)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("inserting file_chunk: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *FileChunkRepository) GetByPath(ctx context.Context, path string) ([]*FileChunk, error) {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							SELECT path, idx, chunk_hash
 | 
				
			||||||
 | 
							FROM file_chunks
 | 
				
			||||||
 | 
							WHERE path = ?
 | 
				
			||||||
 | 
							ORDER BY idx
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						rows, err := r.db.conn.QueryContext(ctx, query, path)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("querying file chunks: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer CloseRows(rows)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var fileChunks []*FileChunk
 | 
				
			||||||
 | 
						for rows.Next() {
 | 
				
			||||||
 | 
							var fc FileChunk
 | 
				
			||||||
 | 
							err := rows.Scan(&fc.Path, &fc.Idx, &fc.ChunkHash)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return nil, fmt.Errorf("scanning file chunk: %w", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							fileChunks = append(fileChunks, &fc)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return fileChunks, rows.Err()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *FileChunkRepository) DeleteByPath(ctx context.Context, tx *sql.Tx, path string) error {
 | 
				
			||||||
 | 
						query := `DELETE FROM file_chunks WHERE path = ?`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var err error
 | 
				
			||||||
 | 
						if tx != nil {
 | 
				
			||||||
 | 
							_, err = tx.ExecContext(ctx, query, path)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							_, err = r.db.ExecWithLock(ctx, query, path)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("deleting file chunks: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										119
									
								
								internal/database/file_chunks_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										119
									
								
								internal/database/file_chunks_test.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,119 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
						"testing"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestFileChunkRepository(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewFileChunkRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test Create
 | 
				
			||||||
 | 
						fc1 := &FileChunk{
 | 
				
			||||||
 | 
							Path:      "/test/file.txt",
 | 
				
			||||||
 | 
							Idx:       0,
 | 
				
			||||||
 | 
							ChunkHash: "chunk1",
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err := repo.Create(ctx, nil, fc1)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create file chunk: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Add more chunks for the same file
 | 
				
			||||||
 | 
						fc2 := &FileChunk{
 | 
				
			||||||
 | 
							Path:      "/test/file.txt",
 | 
				
			||||||
 | 
							Idx:       1,
 | 
				
			||||||
 | 
							ChunkHash: "chunk2",
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						err = repo.Create(ctx, nil, fc2)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create second file chunk: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						fc3 := &FileChunk{
 | 
				
			||||||
 | 
							Path:      "/test/file.txt",
 | 
				
			||||||
 | 
							Idx:       2,
 | 
				
			||||||
 | 
							ChunkHash: "chunk3",
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						err = repo.Create(ctx, nil, fc3)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create third file chunk: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test GetByPath
 | 
				
			||||||
 | 
						chunks, err := repo.GetByPath(ctx, "/test/file.txt")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get file chunks: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(chunks) != 3 {
 | 
				
			||||||
 | 
							t.Errorf("expected 3 chunks, got %d", len(chunks))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Verify order
 | 
				
			||||||
 | 
						for i, chunk := range chunks {
 | 
				
			||||||
 | 
							if chunk.Idx != i {
 | 
				
			||||||
 | 
								t.Errorf("wrong chunk order: expected idx %d, got %d", i, chunk.Idx)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test duplicate insert (should be idempotent)
 | 
				
			||||||
 | 
						err = repo.Create(ctx, nil, fc1)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create duplicate file chunk: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test DeleteByPath
 | 
				
			||||||
 | 
						err = repo.DeleteByPath(ctx, nil, "/test/file.txt")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to delete file chunks: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						chunks, err = repo.GetByPath(ctx, "/test/file.txt")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get deleted file chunks: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(chunks) != 0 {
 | 
				
			||||||
 | 
							t.Errorf("expected 0 chunks after delete, got %d", len(chunks))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestFileChunkRepositoryMultipleFiles(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewFileChunkRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Create chunks for multiple files
 | 
				
			||||||
 | 
						files := []string{"/file1.txt", "/file2.txt", "/file3.txt"}
 | 
				
			||||||
 | 
						for _, path := range files {
 | 
				
			||||||
 | 
							for i := 0; i < 2; i++ {
 | 
				
			||||||
 | 
								fc := &FileChunk{
 | 
				
			||||||
 | 
									Path:      path,
 | 
				
			||||||
 | 
									Idx:       i,
 | 
				
			||||||
 | 
									ChunkHash: fmt.Sprintf("%s_chunk%d", path, i),
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								err := repo.Create(ctx, nil, fc)
 | 
				
			||||||
 | 
								if err != nil {
 | 
				
			||||||
 | 
									t.Fatalf("failed to create file chunk: %v", err)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Verify each file has correct chunks
 | 
				
			||||||
 | 
						for _, path := range files {
 | 
				
			||||||
 | 
							chunks, err := repo.GetByPath(ctx, path)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								t.Fatalf("failed to get chunks for %s: %v", path, err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if len(chunks) != 2 {
 | 
				
			||||||
 | 
								t.Errorf("expected 2 chunks for %s, got %d", path, len(chunks))
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										145
									
								
								internal/database/files.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										145
									
								
								internal/database/files.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,145 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"database/sql"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
						"time"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					type FileRepository struct {
 | 
				
			||||||
 | 
						db *DB
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func NewFileRepository(db *DB) *FileRepository {
 | 
				
			||||||
 | 
						return &FileRepository{db: db}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *FileRepository) Create(ctx context.Context, tx *sql.Tx, file *File) error {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							INSERT INTO files (path, mtime, ctime, size, mode, uid, gid, link_target)
 | 
				
			||||||
 | 
							VALUES (?, ?, ?, ?, ?, ?, ?, ?)
 | 
				
			||||||
 | 
							ON CONFLICT(path) DO UPDATE SET
 | 
				
			||||||
 | 
								mtime = excluded.mtime,
 | 
				
			||||||
 | 
								ctime = excluded.ctime,
 | 
				
			||||||
 | 
								size = excluded.size,
 | 
				
			||||||
 | 
								mode = excluded.mode,
 | 
				
			||||||
 | 
								uid = excluded.uid,
 | 
				
			||||||
 | 
								gid = excluded.gid,
 | 
				
			||||||
 | 
								link_target = excluded.link_target
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var err error
 | 
				
			||||||
 | 
						if tx != nil {
 | 
				
			||||||
 | 
							_, err = tx.ExecContext(ctx, query, file.Path, file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							_, err = r.db.ExecWithLock(ctx, query, file.Path, file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("inserting file: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *FileRepository) GetByPath(ctx context.Context, path string) (*File, error) {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							SELECT path, mtime, ctime, size, mode, uid, gid, link_target
 | 
				
			||||||
 | 
							FROM files
 | 
				
			||||||
 | 
							WHERE path = ?
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var file File
 | 
				
			||||||
 | 
						var mtimeUnix, ctimeUnix int64
 | 
				
			||||||
 | 
						var linkTarget sql.NullString
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err := r.db.conn.QueryRowContext(ctx, query, path).Scan(
 | 
				
			||||||
 | 
							&file.Path,
 | 
				
			||||||
 | 
							&mtimeUnix,
 | 
				
			||||||
 | 
							&ctimeUnix,
 | 
				
			||||||
 | 
							&file.Size,
 | 
				
			||||||
 | 
							&file.Mode,
 | 
				
			||||||
 | 
							&file.UID,
 | 
				
			||||||
 | 
							&file.GID,
 | 
				
			||||||
 | 
							&linkTarget,
 | 
				
			||||||
 | 
						)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err == sql.ErrNoRows {
 | 
				
			||||||
 | 
							return nil, nil
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("querying file: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						file.MTime = time.Unix(mtimeUnix, 0)
 | 
				
			||||||
 | 
						file.CTime = time.Unix(ctimeUnix, 0)
 | 
				
			||||||
 | 
						if linkTarget.Valid {
 | 
				
			||||||
 | 
							file.LinkTarget = linkTarget.String
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return &file, nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *FileRepository) ListModifiedSince(ctx context.Context, since time.Time) ([]*File, error) {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							SELECT path, mtime, ctime, size, mode, uid, gid, link_target
 | 
				
			||||||
 | 
							FROM files
 | 
				
			||||||
 | 
							WHERE mtime >= ?
 | 
				
			||||||
 | 
							ORDER BY path
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						rows, err := r.db.conn.QueryContext(ctx, query, since.Unix())
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("querying files: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer CloseRows(rows)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var files []*File
 | 
				
			||||||
 | 
						for rows.Next() {
 | 
				
			||||||
 | 
							var file File
 | 
				
			||||||
 | 
							var mtimeUnix, ctimeUnix int64
 | 
				
			||||||
 | 
							var linkTarget sql.NullString
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							err := rows.Scan(
 | 
				
			||||||
 | 
								&file.Path,
 | 
				
			||||||
 | 
								&mtimeUnix,
 | 
				
			||||||
 | 
								&ctimeUnix,
 | 
				
			||||||
 | 
								&file.Size,
 | 
				
			||||||
 | 
								&file.Mode,
 | 
				
			||||||
 | 
								&file.UID,
 | 
				
			||||||
 | 
								&file.GID,
 | 
				
			||||||
 | 
								&linkTarget,
 | 
				
			||||||
 | 
							)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return nil, fmt.Errorf("scanning file: %w", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							file.MTime = time.Unix(mtimeUnix, 0)
 | 
				
			||||||
 | 
							file.CTime = time.Unix(ctimeUnix, 0)
 | 
				
			||||||
 | 
							if linkTarget.Valid {
 | 
				
			||||||
 | 
								file.LinkTarget = linkTarget.String
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							files = append(files, &file)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return files, rows.Err()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *FileRepository) Delete(ctx context.Context, tx *sql.Tx, path string) error {
 | 
				
			||||||
 | 
						query := `DELETE FROM files WHERE path = ?`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var err error
 | 
				
			||||||
 | 
						if tx != nil {
 | 
				
			||||||
 | 
							_, err = tx.ExecContext(ctx, query, path)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							_, err = r.db.ExecWithLock(ctx, query, path)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("deleting file: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										191
									
								
								internal/database/files_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										191
									
								
								internal/database/files_test.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,191 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"database/sql"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
						"os"
 | 
				
			||||||
 | 
						"path/filepath"
 | 
				
			||||||
 | 
						"testing"
 | 
				
			||||||
 | 
						"time"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func setupTestDB(t *testing.T) (*DB, func()) {
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						dbPath := filepath.Join(t.TempDir(), "test.db")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						db, err := New(ctx, dbPath)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create database: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cleanup := func() {
 | 
				
			||||||
 | 
							if err := db.Close(); err != nil {
 | 
				
			||||||
 | 
								t.Errorf("failed to close database: %v", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return db, cleanup
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestFileRepository(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewFileRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test Create
 | 
				
			||||||
 | 
						file := &File{
 | 
				
			||||||
 | 
							Path:       "/test/file.txt",
 | 
				
			||||||
 | 
							MTime:      time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
							CTime:      time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
							Size:       1024,
 | 
				
			||||||
 | 
							Mode:       0644,
 | 
				
			||||||
 | 
							UID:        1000,
 | 
				
			||||||
 | 
							GID:        1000,
 | 
				
			||||||
 | 
							LinkTarget: "",
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err := repo.Create(ctx, nil, file)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create file: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test GetByPath
 | 
				
			||||||
 | 
						retrieved, err := repo.GetByPath(ctx, file.Path)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get file: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved == nil {
 | 
				
			||||||
 | 
							t.Fatal("expected file, got nil")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.Path != file.Path {
 | 
				
			||||||
 | 
							t.Errorf("path mismatch: got %s, want %s", retrieved.Path, file.Path)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if !retrieved.MTime.Equal(file.MTime) {
 | 
				
			||||||
 | 
							t.Errorf("mtime mismatch: got %v, want %v", retrieved.MTime, file.MTime)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.Size != file.Size {
 | 
				
			||||||
 | 
							t.Errorf("size mismatch: got %d, want %d", retrieved.Size, file.Size)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.Mode != file.Mode {
 | 
				
			||||||
 | 
							t.Errorf("mode mismatch: got %o, want %o", retrieved.Mode, file.Mode)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test Update (upsert)
 | 
				
			||||||
 | 
						file.Size = 2048
 | 
				
			||||||
 | 
						file.MTime = time.Now().Truncate(time.Second)
 | 
				
			||||||
 | 
						err = repo.Create(ctx, nil, file)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to update file: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						retrieved, err = repo.GetByPath(ctx, file.Path)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get updated file: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.Size != 2048 {
 | 
				
			||||||
 | 
							t.Errorf("size not updated: got %d, want %d", retrieved.Size, 2048)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test ListModifiedSince
 | 
				
			||||||
 | 
						files, err := repo.ListModifiedSince(ctx, time.Now().Add(-1*time.Hour))
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to list files: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(files) != 1 {
 | 
				
			||||||
 | 
							t.Errorf("expected 1 file, got %d", len(files))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test Delete
 | 
				
			||||||
 | 
						err = repo.Delete(ctx, nil, file.Path)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to delete file: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						retrieved, err = repo.GetByPath(ctx, file.Path)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("error getting deleted file: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved != nil {
 | 
				
			||||||
 | 
							t.Error("expected nil for deleted file")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestFileRepositorySymlink(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewFileRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test symlink
 | 
				
			||||||
 | 
						symlink := &File{
 | 
				
			||||||
 | 
							Path:       "/test/link",
 | 
				
			||||||
 | 
							MTime:      time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
							CTime:      time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
							Size:       0,
 | 
				
			||||||
 | 
							Mode:       uint32(0777 | os.ModeSymlink),
 | 
				
			||||||
 | 
							UID:        1000,
 | 
				
			||||||
 | 
							GID:        1000,
 | 
				
			||||||
 | 
							LinkTarget: "/test/target",
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err := repo.Create(ctx, nil, symlink)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create symlink: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						retrieved, err := repo.GetByPath(ctx, symlink.Path)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get symlink: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if !retrieved.IsSymlink() {
 | 
				
			||||||
 | 
							t.Error("expected IsSymlink() to be true")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.LinkTarget != symlink.LinkTarget {
 | 
				
			||||||
 | 
							t.Errorf("link target mismatch: got %s, want %s", retrieved.LinkTarget, symlink.LinkTarget)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestFileRepositoryTransaction(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repos := NewRepositories(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test transaction rollback
 | 
				
			||||||
 | 
						err := repos.WithTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
 | 
				
			||||||
 | 
							file := &File{
 | 
				
			||||||
 | 
								Path:  "/test/tx_file.txt",
 | 
				
			||||||
 | 
								MTime: time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
								CTime: time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
								Size:  1024,
 | 
				
			||||||
 | 
								Mode:  0644,
 | 
				
			||||||
 | 
								UID:   1000,
 | 
				
			||||||
 | 
								GID:   1000,
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if err := repos.Files.Create(ctx, tx, file); err != nil {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// Return error to trigger rollback
 | 
				
			||||||
 | 
							return fmt.Errorf("test rollback")
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err == nil || err.Error() != "test rollback" {
 | 
				
			||||||
 | 
							t.Fatalf("expected rollback error, got: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Verify file was not created
 | 
				
			||||||
 | 
						retrieved, err := repos.Files.GetByPath(ctx, "/test/tx_file.txt")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("error checking for file: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved != nil {
 | 
				
			||||||
 | 
							t.Error("file should not exist after rollback")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										70
									
								
								internal/database/models.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								internal/database/models.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,70 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import "time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// File represents a file record in the database
 | 
				
			||||||
 | 
					type File struct {
 | 
				
			||||||
 | 
						Path       string
 | 
				
			||||||
 | 
						MTime      time.Time
 | 
				
			||||||
 | 
						CTime      time.Time
 | 
				
			||||||
 | 
						Size       int64
 | 
				
			||||||
 | 
						Mode       uint32
 | 
				
			||||||
 | 
						UID        uint32
 | 
				
			||||||
 | 
						GID        uint32
 | 
				
			||||||
 | 
						LinkTarget string // empty for regular files, target path for symlinks
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// IsSymlink returns true if this file is a symbolic link
 | 
				
			||||||
 | 
					func (f *File) IsSymlink() bool {
 | 
				
			||||||
 | 
						return f.LinkTarget != ""
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// FileChunk represents the mapping between files and chunks
 | 
				
			||||||
 | 
					type FileChunk struct {
 | 
				
			||||||
 | 
						Path      string
 | 
				
			||||||
 | 
						Idx       int
 | 
				
			||||||
 | 
						ChunkHash string
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Chunk represents a chunk record in the database
 | 
				
			||||||
 | 
					type Chunk struct {
 | 
				
			||||||
 | 
						ChunkHash string
 | 
				
			||||||
 | 
						SHA256    string
 | 
				
			||||||
 | 
						Size      int64
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Blob represents a blob record in the database
 | 
				
			||||||
 | 
					type Blob struct {
 | 
				
			||||||
 | 
						BlobHash  string
 | 
				
			||||||
 | 
						CreatedTS time.Time
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// BlobChunk represents the mapping between blobs and chunks
 | 
				
			||||||
 | 
					type BlobChunk struct {
 | 
				
			||||||
 | 
						BlobHash  string
 | 
				
			||||||
 | 
						ChunkHash string
 | 
				
			||||||
 | 
						Offset    int64
 | 
				
			||||||
 | 
						Length    int64
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// ChunkFile represents the reverse mapping of chunks to files
 | 
				
			||||||
 | 
					type ChunkFile struct {
 | 
				
			||||||
 | 
						ChunkHash  string
 | 
				
			||||||
 | 
						FilePath   string
 | 
				
			||||||
 | 
						FileOffset int64
 | 
				
			||||||
 | 
						Length     int64
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Snapshot represents a snapshot record in the database
 | 
				
			||||||
 | 
					type Snapshot struct {
 | 
				
			||||||
 | 
						ID               string
 | 
				
			||||||
 | 
						Hostname         string
 | 
				
			||||||
 | 
						VaultikVersion   string
 | 
				
			||||||
 | 
						CreatedTS        time.Time
 | 
				
			||||||
 | 
						FileCount        int64
 | 
				
			||||||
 | 
						ChunkCount       int64
 | 
				
			||||||
 | 
						BlobCount        int64
 | 
				
			||||||
 | 
						TotalSize        int64   // Total size of all referenced files
 | 
				
			||||||
 | 
						BlobSize         int64   // Total size of all referenced blobs (compressed and encrypted)
 | 
				
			||||||
 | 
						CompressionRatio float64 // Compression ratio (BlobSize / TotalSize)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										40
									
								
								internal/database/module.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								internal/database/module.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,40 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
						"os"
 | 
				
			||||||
 | 
						"path/filepath"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"git.eeqj.de/sneak/vaultik/internal/config"
 | 
				
			||||||
 | 
						"go.uber.org/fx"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Module provides database dependencies
 | 
				
			||||||
 | 
					var Module = fx.Module("database",
 | 
				
			||||||
 | 
						fx.Provide(
 | 
				
			||||||
 | 
							provideDatabase,
 | 
				
			||||||
 | 
							NewRepositories,
 | 
				
			||||||
 | 
						),
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func provideDatabase(lc fx.Lifecycle, cfg *config.Config) (*DB, error) {
 | 
				
			||||||
 | 
						// Ensure the index directory exists
 | 
				
			||||||
 | 
						indexDir := filepath.Dir(cfg.IndexPath)
 | 
				
			||||||
 | 
						if err := os.MkdirAll(indexDir, 0700); err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("creating index directory: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						db, err := New(context.Background(), cfg.IndexPath)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("opening database: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						lc.Append(fx.Hook{
 | 
				
			||||||
 | 
							OnStop: func(ctx context.Context) error {
 | 
				
			||||||
 | 
								return db.Close()
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return db, nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										94
									
								
								internal/database/repositories.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										94
									
								
								internal/database/repositories.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,94 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"database/sql"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					type Repositories struct {
 | 
				
			||||||
 | 
						db         *DB
 | 
				
			||||||
 | 
						Files      *FileRepository
 | 
				
			||||||
 | 
						Chunks     *ChunkRepository
 | 
				
			||||||
 | 
						Blobs      *BlobRepository
 | 
				
			||||||
 | 
						FileChunks *FileChunkRepository
 | 
				
			||||||
 | 
						BlobChunks *BlobChunkRepository
 | 
				
			||||||
 | 
						ChunkFiles *ChunkFileRepository
 | 
				
			||||||
 | 
						Snapshots  *SnapshotRepository
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func NewRepositories(db *DB) *Repositories {
 | 
				
			||||||
 | 
						return &Repositories{
 | 
				
			||||||
 | 
							db:         db,
 | 
				
			||||||
 | 
							Files:      NewFileRepository(db),
 | 
				
			||||||
 | 
							Chunks:     NewChunkRepository(db),
 | 
				
			||||||
 | 
							Blobs:      NewBlobRepository(db),
 | 
				
			||||||
 | 
							FileChunks: NewFileChunkRepository(db),
 | 
				
			||||||
 | 
							BlobChunks: NewBlobChunkRepository(db),
 | 
				
			||||||
 | 
							ChunkFiles: NewChunkFileRepository(db),
 | 
				
			||||||
 | 
							Snapshots:  NewSnapshotRepository(db),
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					type TxFunc func(ctx context.Context, tx *sql.Tx) error
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *Repositories) WithTx(ctx context.Context, fn TxFunc) error {
 | 
				
			||||||
 | 
						// Acquire write lock for the entire transaction
 | 
				
			||||||
 | 
						r.db.LockForWrite()
 | 
				
			||||||
 | 
						defer r.db.UnlockWrite()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						tx, err := r.db.BeginTx(ctx, nil)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("beginning transaction: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						defer func() {
 | 
				
			||||||
 | 
							if p := recover(); p != nil {
 | 
				
			||||||
 | 
								if rollbackErr := tx.Rollback(); rollbackErr != nil {
 | 
				
			||||||
 | 
									Fatal("failed to rollback transaction: %v", rollbackErr)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								panic(p)
 | 
				
			||||||
 | 
							} else if err != nil {
 | 
				
			||||||
 | 
								if rollbackErr := tx.Rollback(); rollbackErr != nil {
 | 
				
			||||||
 | 
									Fatal("failed to rollback transaction: %v", rollbackErr)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err = fn(ctx, tx)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return tx.Commit()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *Repositories) WithReadTx(ctx context.Context, fn TxFunc) error {
 | 
				
			||||||
 | 
						opts := &sql.TxOptions{
 | 
				
			||||||
 | 
							ReadOnly: true,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						tx, err := r.db.BeginTx(ctx, opts)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("beginning read transaction: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						defer func() {
 | 
				
			||||||
 | 
							if p := recover(); p != nil {
 | 
				
			||||||
 | 
								if rollbackErr := tx.Rollback(); rollbackErr != nil {
 | 
				
			||||||
 | 
									Fatal("failed to rollback transaction: %v", rollbackErr)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								panic(p)
 | 
				
			||||||
 | 
							} else if err != nil {
 | 
				
			||||||
 | 
								if rollbackErr := tx.Rollback(); rollbackErr != nil {
 | 
				
			||||||
 | 
									Fatal("failed to rollback transaction: %v", rollbackErr)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err = fn(ctx, tx)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return tx.Commit()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										247
									
								
								internal/database/repositories_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										247
									
								
								internal/database/repositories_test.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,247 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"database/sql"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
						"testing"
 | 
				
			||||||
 | 
						"time"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestRepositoriesTransaction(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repos := NewRepositories(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test successful transaction with multiple operations
 | 
				
			||||||
 | 
						err := repos.WithTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
 | 
				
			||||||
 | 
							// Create a file
 | 
				
			||||||
 | 
							file := &File{
 | 
				
			||||||
 | 
								Path:  "/test/tx_file.txt",
 | 
				
			||||||
 | 
								MTime: time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
								CTime: time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
								Size:  1024,
 | 
				
			||||||
 | 
								Mode:  0644,
 | 
				
			||||||
 | 
								UID:   1000,
 | 
				
			||||||
 | 
								GID:   1000,
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if err := repos.Files.Create(ctx, tx, file); err != nil {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// Create chunks
 | 
				
			||||||
 | 
							chunk1 := &Chunk{
 | 
				
			||||||
 | 
								ChunkHash: "tx_chunk1",
 | 
				
			||||||
 | 
								SHA256:    "tx_sha1",
 | 
				
			||||||
 | 
								Size:      512,
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if err := repos.Chunks.Create(ctx, tx, chunk1); err != nil {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							chunk2 := &Chunk{
 | 
				
			||||||
 | 
								ChunkHash: "tx_chunk2",
 | 
				
			||||||
 | 
								SHA256:    "tx_sha2",
 | 
				
			||||||
 | 
								Size:      512,
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if err := repos.Chunks.Create(ctx, tx, chunk2); err != nil {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// Map chunks to file
 | 
				
			||||||
 | 
							fc1 := &FileChunk{
 | 
				
			||||||
 | 
								Path:      file.Path,
 | 
				
			||||||
 | 
								Idx:       0,
 | 
				
			||||||
 | 
								ChunkHash: chunk1.ChunkHash,
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if err := repos.FileChunks.Create(ctx, tx, fc1); err != nil {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							fc2 := &FileChunk{
 | 
				
			||||||
 | 
								Path:      file.Path,
 | 
				
			||||||
 | 
								Idx:       1,
 | 
				
			||||||
 | 
								ChunkHash: chunk2.ChunkHash,
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if err := repos.FileChunks.Create(ctx, tx, fc2); err != nil {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// Create blob
 | 
				
			||||||
 | 
							blob := &Blob{
 | 
				
			||||||
 | 
								BlobHash:  "tx_blob1",
 | 
				
			||||||
 | 
								CreatedTS: time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if err := repos.Blobs.Create(ctx, tx, blob); err != nil {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// Map chunks to blob
 | 
				
			||||||
 | 
							bc1 := &BlobChunk{
 | 
				
			||||||
 | 
								BlobHash:  blob.BlobHash,
 | 
				
			||||||
 | 
								ChunkHash: chunk1.ChunkHash,
 | 
				
			||||||
 | 
								Offset:    0,
 | 
				
			||||||
 | 
								Length:    512,
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if err := repos.BlobChunks.Create(ctx, tx, bc1); err != nil {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							bc2 := &BlobChunk{
 | 
				
			||||||
 | 
								BlobHash:  blob.BlobHash,
 | 
				
			||||||
 | 
								ChunkHash: chunk2.ChunkHash,
 | 
				
			||||||
 | 
								Offset:    512,
 | 
				
			||||||
 | 
								Length:    512,
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if err := repos.BlobChunks.Create(ctx, tx, bc2); err != nil {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							return nil
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("transaction failed: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Verify all data was committed
 | 
				
			||||||
 | 
						file, err := repos.Files.GetByPath(ctx, "/test/tx_file.txt")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get file: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if file == nil {
 | 
				
			||||||
 | 
							t.Error("expected file after transaction")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						chunks, err := repos.FileChunks.GetByPath(ctx, "/test/tx_file.txt")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get file chunks: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(chunks) != 2 {
 | 
				
			||||||
 | 
							t.Errorf("expected 2 file chunks, got %d", len(chunks))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						blob, err := repos.Blobs.GetByHash(ctx, "tx_blob1")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get blob: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if blob == nil {
 | 
				
			||||||
 | 
							t.Error("expected blob after transaction")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestRepositoriesTransactionRollback(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repos := NewRepositories(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test transaction rollback
 | 
				
			||||||
 | 
						err := repos.WithTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
 | 
				
			||||||
 | 
							// Create a file
 | 
				
			||||||
 | 
							file := &File{
 | 
				
			||||||
 | 
								Path:  "/test/rollback_file.txt",
 | 
				
			||||||
 | 
								MTime: time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
								CTime: time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
								Size:  1024,
 | 
				
			||||||
 | 
								Mode:  0644,
 | 
				
			||||||
 | 
								UID:   1000,
 | 
				
			||||||
 | 
								GID:   1000,
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if err := repos.Files.Create(ctx, tx, file); err != nil {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// Create a chunk
 | 
				
			||||||
 | 
							chunk := &Chunk{
 | 
				
			||||||
 | 
								ChunkHash: "rollback_chunk",
 | 
				
			||||||
 | 
								SHA256:    "rollback_sha",
 | 
				
			||||||
 | 
								Size:      1024,
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if err := repos.Chunks.Create(ctx, tx, chunk); err != nil {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// Return error to trigger rollback
 | 
				
			||||||
 | 
							return fmt.Errorf("intentional rollback")
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err == nil || err.Error() != "intentional rollback" {
 | 
				
			||||||
 | 
							t.Fatalf("expected rollback error, got: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Verify nothing was committed
 | 
				
			||||||
 | 
						file, err := repos.Files.GetByPath(ctx, "/test/rollback_file.txt")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("error checking for file: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if file != nil {
 | 
				
			||||||
 | 
							t.Error("file should not exist after rollback")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						chunk, err := repos.Chunks.GetByHash(ctx, "rollback_chunk")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("error checking for chunk: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if chunk != nil {
 | 
				
			||||||
 | 
							t.Error("chunk should not exist after rollback")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestRepositoriesReadTransaction(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repos := NewRepositories(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// First, create some data
 | 
				
			||||||
 | 
						file := &File{
 | 
				
			||||||
 | 
							Path:  "/test/read_file.txt",
 | 
				
			||||||
 | 
							MTime: time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
							CTime: time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
							Size:  1024,
 | 
				
			||||||
 | 
							Mode:  0644,
 | 
				
			||||||
 | 
							UID:   1000,
 | 
				
			||||||
 | 
							GID:   1000,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						err := repos.Files.Create(ctx, nil, file)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create file: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test read-only transaction
 | 
				
			||||||
 | 
						var retrievedFile *File
 | 
				
			||||||
 | 
						err = repos.WithReadTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
 | 
				
			||||||
 | 
							var err error
 | 
				
			||||||
 | 
							retrievedFile, err = repos.Files.GetByPath(ctx, "/test/read_file.txt")
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// Try to write in read-only transaction (should fail)
 | 
				
			||||||
 | 
							_ = repos.Files.Create(ctx, tx, &File{
 | 
				
			||||||
 | 
								Path:  "/test/should_fail.txt",
 | 
				
			||||||
 | 
								MTime: time.Now(),
 | 
				
			||||||
 | 
								CTime: time.Now(),
 | 
				
			||||||
 | 
								Size:  0,
 | 
				
			||||||
 | 
								Mode:  0644,
 | 
				
			||||||
 | 
								UID:   1000,
 | 
				
			||||||
 | 
								GID:   1000,
 | 
				
			||||||
 | 
							})
 | 
				
			||||||
 | 
							// SQLite might not enforce read-only at this level, but we test the pattern
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							return nil
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("read transaction failed: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if retrievedFile == nil {
 | 
				
			||||||
 | 
							t.Error("expected to retrieve file in read transaction")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										147
									
								
								internal/database/snapshots.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										147
									
								
								internal/database/snapshots.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,147 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"database/sql"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
						"time"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					type SnapshotRepository struct {
 | 
				
			||||||
 | 
						db *DB
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func NewSnapshotRepository(db *DB) *SnapshotRepository {
 | 
				
			||||||
 | 
						return &SnapshotRepository{db: db}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *SnapshotRepository) Create(ctx context.Context, tx *sql.Tx, snapshot *Snapshot) error {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							INSERT INTO snapshots (id, hostname, vaultik_version, created_ts, file_count, chunk_count, blob_count, total_size, blob_size, compression_ratio)
 | 
				
			||||||
 | 
							VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var err error
 | 
				
			||||||
 | 
						if tx != nil {
 | 
				
			||||||
 | 
							_, err = tx.ExecContext(ctx, query, snapshot.ID, snapshot.Hostname, snapshot.VaultikVersion, snapshot.CreatedTS.Unix(),
 | 
				
			||||||
 | 
								snapshot.FileCount, snapshot.ChunkCount, snapshot.BlobCount, snapshot.TotalSize, snapshot.BlobSize, snapshot.CompressionRatio)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							_, err = r.db.ExecWithLock(ctx, query, snapshot.ID, snapshot.Hostname, snapshot.VaultikVersion, snapshot.CreatedTS.Unix(),
 | 
				
			||||||
 | 
								snapshot.FileCount, snapshot.ChunkCount, snapshot.BlobCount, snapshot.TotalSize, snapshot.BlobSize, snapshot.CompressionRatio)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("inserting snapshot: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *SnapshotRepository) UpdateCounts(ctx context.Context, tx *sql.Tx, snapshotID string, fileCount, chunkCount, blobCount, totalSize, blobSize int64) error {
 | 
				
			||||||
 | 
						compressionRatio := 1.0
 | 
				
			||||||
 | 
						if totalSize > 0 {
 | 
				
			||||||
 | 
							compressionRatio = float64(blobSize) / float64(totalSize)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							UPDATE snapshots 
 | 
				
			||||||
 | 
							SET file_count = ?,
 | 
				
			||||||
 | 
							    chunk_count = ?,
 | 
				
			||||||
 | 
							    blob_count = ?,
 | 
				
			||||||
 | 
							    total_size = ?,
 | 
				
			||||||
 | 
							    blob_size = ?,
 | 
				
			||||||
 | 
							    compression_ratio = ?
 | 
				
			||||||
 | 
							WHERE id = ?
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var err error
 | 
				
			||||||
 | 
						if tx != nil {
 | 
				
			||||||
 | 
							_, err = tx.ExecContext(ctx, query, fileCount, chunkCount, blobCount, totalSize, blobSize, compressionRatio, snapshotID)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							_, err = r.db.ExecWithLock(ctx, query, fileCount, chunkCount, blobCount, totalSize, blobSize, compressionRatio, snapshotID)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("updating snapshot: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *SnapshotRepository) GetByID(ctx context.Context, snapshotID string) (*Snapshot, error) {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							SELECT id, hostname, vaultik_version, created_ts, file_count, chunk_count, blob_count, total_size, blob_size, compression_ratio
 | 
				
			||||||
 | 
							FROM snapshots
 | 
				
			||||||
 | 
							WHERE id = ?
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var snapshot Snapshot
 | 
				
			||||||
 | 
						var createdTSUnix int64
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err := r.db.conn.QueryRowContext(ctx, query, snapshotID).Scan(
 | 
				
			||||||
 | 
							&snapshot.ID,
 | 
				
			||||||
 | 
							&snapshot.Hostname,
 | 
				
			||||||
 | 
							&snapshot.VaultikVersion,
 | 
				
			||||||
 | 
							&createdTSUnix,
 | 
				
			||||||
 | 
							&snapshot.FileCount,
 | 
				
			||||||
 | 
							&snapshot.ChunkCount,
 | 
				
			||||||
 | 
							&snapshot.BlobCount,
 | 
				
			||||||
 | 
							&snapshot.TotalSize,
 | 
				
			||||||
 | 
							&snapshot.BlobSize,
 | 
				
			||||||
 | 
							&snapshot.CompressionRatio,
 | 
				
			||||||
 | 
						)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err == sql.ErrNoRows {
 | 
				
			||||||
 | 
							return nil, nil
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("querying snapshot: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						snapshot.CreatedTS = time.Unix(createdTSUnix, 0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return &snapshot, nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (r *SnapshotRepository) ListRecent(ctx context.Context, limit int) ([]*Snapshot, error) {
 | 
				
			||||||
 | 
						query := `
 | 
				
			||||||
 | 
							SELECT id, hostname, vaultik_version, created_ts, file_count, chunk_count, blob_count, total_size, blob_size, compression_ratio
 | 
				
			||||||
 | 
							FROM snapshots
 | 
				
			||||||
 | 
							ORDER BY created_ts DESC
 | 
				
			||||||
 | 
							LIMIT ?
 | 
				
			||||||
 | 
						`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						rows, err := r.db.conn.QueryContext(ctx, query, limit)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("querying snapshots: %w", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer CloseRows(rows)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var snapshots []*Snapshot
 | 
				
			||||||
 | 
						for rows.Next() {
 | 
				
			||||||
 | 
							var snapshot Snapshot
 | 
				
			||||||
 | 
							var createdTSUnix int64
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							err := rows.Scan(
 | 
				
			||||||
 | 
								&snapshot.ID,
 | 
				
			||||||
 | 
								&snapshot.Hostname,
 | 
				
			||||||
 | 
								&snapshot.VaultikVersion,
 | 
				
			||||||
 | 
								&createdTSUnix,
 | 
				
			||||||
 | 
								&snapshot.FileCount,
 | 
				
			||||||
 | 
								&snapshot.ChunkCount,
 | 
				
			||||||
 | 
								&snapshot.BlobCount,
 | 
				
			||||||
 | 
								&snapshot.TotalSize,
 | 
				
			||||||
 | 
								&snapshot.BlobSize,
 | 
				
			||||||
 | 
								&snapshot.CompressionRatio,
 | 
				
			||||||
 | 
							)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return nil, fmt.Errorf("scanning snapshot: %w", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							snapshot.CreatedTS = time.Unix(createdTSUnix, 0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							snapshots = append(snapshots, &snapshot)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return snapshots, rows.Err()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										181
									
								
								internal/database/snapshots_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										181
									
								
								internal/database/snapshots_test.go
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,181 @@
 | 
				
			|||||||
 | 
					package database
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
						"math"
 | 
				
			||||||
 | 
						"testing"
 | 
				
			||||||
 | 
						"time"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					const (
 | 
				
			||||||
 | 
						Mebibyte               = 1024 * 1024
 | 
				
			||||||
 | 
						oneHundredMebibytes    = 100 * Mebibyte
 | 
				
			||||||
 | 
						fortyMebibytes         = 40 * Mebibyte
 | 
				
			||||||
 | 
						sixtyMebibytes         = 60 * Mebibyte
 | 
				
			||||||
 | 
						twoHundredMebibytes    = 200 * Mebibyte
 | 
				
			||||||
 | 
						compressionRatioPoint4 = 0.4
 | 
				
			||||||
 | 
						compressionRatioPoint3 = 0.3
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestSnapshotRepository(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewSnapshotRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test Create
 | 
				
			||||||
 | 
						snapshot := &Snapshot{
 | 
				
			||||||
 | 
							ID:               "2024-01-01T12:00:00Z",
 | 
				
			||||||
 | 
							Hostname:         "test-host",
 | 
				
			||||||
 | 
							VaultikVersion:   "1.0.0",
 | 
				
			||||||
 | 
							CreatedTS:        time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
							FileCount:        100,
 | 
				
			||||||
 | 
							ChunkCount:       500,
 | 
				
			||||||
 | 
							BlobCount:        10,
 | 
				
			||||||
 | 
							TotalSize:        oneHundredMebibytes,
 | 
				
			||||||
 | 
							BlobSize:         fortyMebibytes,
 | 
				
			||||||
 | 
							CompressionRatio: compressionRatioPoint4, // 40MB / 100MB
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err := repo.Create(ctx, nil, snapshot)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create snapshot: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test GetByID
 | 
				
			||||||
 | 
						retrieved, err := repo.GetByID(ctx, snapshot.ID)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get snapshot: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved == nil {
 | 
				
			||||||
 | 
							t.Fatal("expected snapshot, got nil")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.ID != snapshot.ID {
 | 
				
			||||||
 | 
							t.Errorf("ID mismatch: got %s, want %s", retrieved.ID, snapshot.ID)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.Hostname != snapshot.Hostname {
 | 
				
			||||||
 | 
							t.Errorf("hostname mismatch: got %s, want %s", retrieved.Hostname, snapshot.Hostname)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.FileCount != snapshot.FileCount {
 | 
				
			||||||
 | 
							t.Errorf("file count mismatch: got %d, want %d", retrieved.FileCount, snapshot.FileCount)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test UpdateCounts
 | 
				
			||||||
 | 
						err = repo.UpdateCounts(ctx, nil, snapshot.ID, 200, 1000, 20, twoHundredMebibytes, sixtyMebibytes)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to update counts: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						retrieved, err = repo.GetByID(ctx, snapshot.ID)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to get updated snapshot: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.FileCount != 200 {
 | 
				
			||||||
 | 
							t.Errorf("file count not updated: got %d, want %d", retrieved.FileCount, 200)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.ChunkCount != 1000 {
 | 
				
			||||||
 | 
							t.Errorf("chunk count not updated: got %d, want %d", retrieved.ChunkCount, 1000)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.BlobCount != 20 {
 | 
				
			||||||
 | 
							t.Errorf("blob count not updated: got %d, want %d", retrieved.BlobCount, 20)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.TotalSize != twoHundredMebibytes {
 | 
				
			||||||
 | 
							t.Errorf("total size not updated: got %d, want %d", retrieved.TotalSize, twoHundredMebibytes)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if retrieved.BlobSize != sixtyMebibytes {
 | 
				
			||||||
 | 
							t.Errorf("blob size not updated: got %d, want %d", retrieved.BlobSize, sixtyMebibytes)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						expectedRatio := compressionRatioPoint3 // 0.3
 | 
				
			||||||
 | 
						if math.Abs(retrieved.CompressionRatio-expectedRatio) > 0.001 {
 | 
				
			||||||
 | 
							t.Errorf("compression ratio not updated: got %f, want %f", retrieved.CompressionRatio, expectedRatio)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test ListRecent
 | 
				
			||||||
 | 
						// Add more snapshots
 | 
				
			||||||
 | 
						for i := 2; i <= 5; i++ {
 | 
				
			||||||
 | 
							s := &Snapshot{
 | 
				
			||||||
 | 
								ID:             fmt.Sprintf("2024-01-0%dT12:00:00Z", i),
 | 
				
			||||||
 | 
								Hostname:       "test-host",
 | 
				
			||||||
 | 
								VaultikVersion: "1.0.0",
 | 
				
			||||||
 | 
								CreatedTS:      time.Now().Add(time.Duration(i) * time.Hour).Truncate(time.Second),
 | 
				
			||||||
 | 
								FileCount:      int64(100 * i),
 | 
				
			||||||
 | 
								ChunkCount:     int64(500 * i),
 | 
				
			||||||
 | 
								BlobCount:      int64(10 * i),
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							err := repo.Create(ctx, nil, s)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								t.Fatalf("failed to create snapshot %d: %v", i, err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test listing with limit
 | 
				
			||||||
 | 
						recent, err := repo.ListRecent(ctx, 3)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to list recent snapshots: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if len(recent) != 3 {
 | 
				
			||||||
 | 
							t.Errorf("expected 3 recent snapshots, got %d", len(recent))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Verify order (most recent first)
 | 
				
			||||||
 | 
						for i := 0; i < len(recent)-1; i++ {
 | 
				
			||||||
 | 
							if recent[i].CreatedTS.Before(recent[i+1].CreatedTS) {
 | 
				
			||||||
 | 
								t.Error("snapshots not in descending order")
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestSnapshotRepositoryNotFound(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewSnapshotRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test GetByID with non-existent ID
 | 
				
			||||||
 | 
						snapshot, err := repo.GetByID(ctx, "nonexistent")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("unexpected error: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if snapshot != nil {
 | 
				
			||||||
 | 
							t.Error("expected nil for non-existent snapshot")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Test UpdateCounts on non-existent snapshot
 | 
				
			||||||
 | 
						err = repo.UpdateCounts(ctx, nil, "nonexistent", 100, 200, 10, oneHundredMebibytes, fortyMebibytes)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("unexpected error: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						// No error expected, but no rows should be affected
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestSnapshotRepositoryDuplicate(t *testing.T) {
 | 
				
			||||||
 | 
						db, cleanup := setupTestDB(t)
 | 
				
			||||||
 | 
						defer cleanup()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx := context.Background()
 | 
				
			||||||
 | 
						repo := NewSnapshotRepository(db)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						snapshot := &Snapshot{
 | 
				
			||||||
 | 
							ID:             "2024-01-01T12:00:00Z",
 | 
				
			||||||
 | 
							Hostname:       "test-host",
 | 
				
			||||||
 | 
							VaultikVersion: "1.0.0",
 | 
				
			||||||
 | 
							CreatedTS:      time.Now().Truncate(time.Second),
 | 
				
			||||||
 | 
							FileCount:      100,
 | 
				
			||||||
 | 
							ChunkCount:     500,
 | 
				
			||||||
 | 
							BlobCount:      10,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err := repo.Create(ctx, nil, snapshot)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("failed to create snapshot: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Try to create duplicate - should fail due to primary key constraint
 | 
				
			||||||
 | 
						err = repo.Create(ctx, nil, snapshot)
 | 
				
			||||||
 | 
						if err == nil {
 | 
				
			||||||
 | 
							t.Error("expected error for duplicate snapshot")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@ -1,10 +1,7 @@
 | 
				
			|||||||
package globals
 | 
					package globals
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"context"
 | 
					 | 
				
			||||||
	"time"
 | 
						"time"
 | 
				
			||||||
 | 
					 | 
				
			||||||
	"go.uber.org/fx"
 | 
					 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// these get populated from main() and copied into the Globals object.
 | 
					// these get populated from main() and copied into the Globals object.
 | 
				
			||||||
@ -21,19 +18,13 @@ type Globals struct {
 | 
				
			|||||||
	StartTime time.Time
 | 
						StartTime time.Time
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func New(lc fx.Lifecycle) (*Globals, error) {
 | 
					func New() (*Globals, error) {
 | 
				
			||||||
	n := &Globals{
 | 
						n := &Globals{
 | 
				
			||||||
		Appname:   Appname,
 | 
							Appname:   Appname,
 | 
				
			||||||
		Version:   Version,
 | 
							Version:   Version,
 | 
				
			||||||
		Commit:    Commit,
 | 
							Commit:    Commit,
 | 
				
			||||||
 | 
							StartTime: time.Now(),
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	lc.Append(fx.Hook{
 | 
					 | 
				
			||||||
		OnStart: func(ctx context.Context) error {
 | 
					 | 
				
			||||||
			n.StartTime = time.Now()
 | 
					 | 
				
			||||||
			return nil
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
	})
 | 
					 | 
				
			||||||
	
 | 
					 | 
				
			||||||
	return n, nil
 | 
						return n, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
				
			|||||||
@ -27,8 +27,7 @@ type ChunkRef struct {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// BlobInfo represents an encrypted blob containing multiple chunks
 | 
					// BlobInfo represents an encrypted blob containing multiple chunks
 | 
				
			||||||
type BlobInfo struct {
 | 
					type BlobInfo struct {
 | 
				
			||||||
	Hash       string    // Hash of encrypted blob
 | 
						Hash       string // SHA256 hash of the blob content (content-addressable)
 | 
				
			||||||
	FinalHash  string    // Hash after compression and encryption
 | 
					 | 
				
			||||||
	CreatedAt  time.Time
 | 
						CreatedAt  time.Time
 | 
				
			||||||
	Size       int64
 | 
						Size       int64
 | 
				
			||||||
	ChunkCount int
 | 
						ChunkCount int
 | 
				
			||||||
 | 
				
			|||||||
@ -33,7 +33,6 @@ func TestModelsCompilation(t *testing.T) {
 | 
				
			|||||||
	// Test BlobInfo
 | 
						// Test BlobInfo
 | 
				
			||||||
	bi := &BlobInfo{
 | 
						bi := &BlobInfo{
 | 
				
			||||||
		Hash:       "blob123",
 | 
							Hash:       "blob123",
 | 
				
			||||||
		FinalHash:  "final123",
 | 
					 | 
				
			||||||
		CreatedAt:  time.Now(),
 | 
							CreatedAt:  time.Now(),
 | 
				
			||||||
		Size:       1024,
 | 
							Size:       1024,
 | 
				
			||||||
		ChunkCount: 2,
 | 
							ChunkCount: 2,
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
		Reference in New Issue
	
	Block a user