Implement CLI skeleton with cobra and fx dependency injection
- Set up cobra CLI with all commands (backup, restore, prune, verify, fetch) - Integrate uber/fx for dependency injection and lifecycle management - Add globals package with build-time variables (Version, Commit) - Implement config loading from YAML with validation - Create core data models (FileInfo, ChunkInfo, BlobInfo, Snapshot) - Add Makefile with build, test, lint, and clean targets - Include minimal test suite for compilation verification - Update documentation with --quick flag for verify command - Fix markdown numbering in implementation TODO
This commit is contained in:
parent
0df07790ba
commit
3e8b98dec6
228
DESIGN.md
228
DESIGN.md
@ -243,31 +243,30 @@ Verify runs on a host that has no state, but access to the bucket.
|
||||
4. Decrypt the metadata SQLite database chunks using the private key and
|
||||
reassemble the snapshot db file
|
||||
5. Calculate the SHA256 hash of the decrypted snapshot database
|
||||
5. Verify the db file hash matches the decrypted hash
|
||||
3. For each blob in the snapshot:
|
||||
6. Verify the db file hash matches the decrypted hash
|
||||
7. For each blob in the snapshot:
|
||||
* Fetch the blob metadata from the snapshot db
|
||||
* Ensure the blob exists in S3
|
||||
* Ensure the S3 object hash matches the final (encrypted) blob hash
|
||||
stored in the metadata db
|
||||
* For each chunk in the blob:
|
||||
* Fetch the chunk metadata from the snapshot db
|
||||
* Ensure the S3 object hash matches the chunk hash stored in the
|
||||
metadata db
|
||||
* Check the S3 content hash matches the expected blob hash
|
||||
* If not using --quick mode:
|
||||
* Download and decrypt the blob
|
||||
* Decompress and verify chunk hashes match metadata
|
||||
|
||||
---
|
||||
|
||||
## 6. CLI Commands
|
||||
|
||||
```
|
||||
vaultik backup /etc/vaultik.yaml
|
||||
vaultik backup /etc/vaultik.yaml [--cron] [--daemon]
|
||||
vaultik restore <bucket> <prefix> <snapshot_id> <target_dir>
|
||||
vaultik prune <bucket> <prefix>
|
||||
vaultik verify <bucket> <prefix> [<snapshot_id>] [--quick]
|
||||
vaultik fetch <bucket> <prefix> <snapshot_id> <filepath> <target>
|
||||
```
|
||||
|
||||
* `VAULTIK_PRIVATE_KEY` is required for `restore` and `prune` and
|
||||
`retrieve` commands as.
|
||||
|
||||
* It is passed via environment variable.
|
||||
* `VAULTIK_PRIVATE_KEY` is required for `restore`, `prune`, `verify`, and
|
||||
`fetch` commands.
|
||||
* It is passed via environment variable containing the age private key.
|
||||
|
||||
---
|
||||
|
||||
@ -359,119 +358,120 @@ func RunPrune(bucket, prefix, privateKey string) error
|
||||
|
||||
## Implementation TODO
|
||||
|
||||
### Phase 1: Core Infrastructure
|
||||
### Core Infrastructure
|
||||
1. Set up Go module and project structure
|
||||
2. Create Makefile with test, fmt, and lint targets
|
||||
3. Set up cobra CLI skeleton with all commands
|
||||
4. Implement config loading and validation from YAML
|
||||
5. Create data structures for FileInfo, ChunkInfo, BlobInfo, etc.
|
||||
1. Create Makefile with test, fmt, and lint targets
|
||||
1. Set up cobra CLI skeleton with all commands
|
||||
1. Implement config loading and validation from YAML
|
||||
1. Create data structures for FileInfo, ChunkInfo, BlobInfo, etc.
|
||||
|
||||
### Phase 2: Local Index Database
|
||||
6. Implement SQLite schema creation and migrations
|
||||
7. Create Index type with all database operations
|
||||
8. Add transaction support and proper locking
|
||||
9. Implement file tracking (save, lookup, delete)
|
||||
10. Implement chunk tracking and deduplication
|
||||
11. Implement blob tracking and chunk-to-blob mapping
|
||||
12. Write tests for all index operations
|
||||
### Local Index Database
|
||||
1. Implement SQLite schema creation and migrations
|
||||
1. Create Index type with all database operations
|
||||
1. Add transaction support and proper locking
|
||||
1. Implement file tracking (save, lookup, delete)
|
||||
1. Implement chunk tracking and deduplication
|
||||
1. Implement blob tracking and chunk-to-blob mapping
|
||||
1. Write tests for all index operations
|
||||
|
||||
### Phase 3: Chunking and Hashing
|
||||
13. Implement Rabin fingerprint chunker
|
||||
14. Create streaming chunk processor
|
||||
15. Implement SHA256 hashing for chunks
|
||||
16. Add configurable chunk size parameters
|
||||
17. Write tests for chunking consistency
|
||||
### Chunking and Hashing
|
||||
1. Implement Rabin fingerprint chunker
|
||||
1. Create streaming chunk processor
|
||||
1. Implement SHA256 hashing for chunks
|
||||
1. Add configurable chunk size parameters
|
||||
1. Write tests for chunking consistency
|
||||
|
||||
### Phase 4: Compression and Encryption
|
||||
18. Implement zstd compression wrapper
|
||||
19. Integrate age encryption library
|
||||
20. Create Encryptor type for public key encryption
|
||||
21. Create Decryptor type for private key decryption
|
||||
22. Implement streaming encrypt/decrypt pipelines
|
||||
23. Write tests for compression and encryption
|
||||
### Compression and Encryption
|
||||
1. Implement zstd compression wrapper
|
||||
1. Integrate age encryption library
|
||||
1. Create Encryptor type for public key encryption
|
||||
1. Create Decryptor type for private key decryption
|
||||
1. Implement streaming encrypt/decrypt pipelines
|
||||
1. Write tests for compression and encryption
|
||||
|
||||
### Phase 5: Blob Packing
|
||||
24. Implement BlobWriter with size limits
|
||||
25. Add chunk accumulation and flushing
|
||||
26. Create blob hash calculation
|
||||
27. Implement proper error handling and rollback
|
||||
28. Write tests for blob packing scenarios
|
||||
### Blob Packing
|
||||
1. Implement BlobWriter with size limits
|
||||
1. Add chunk accumulation and flushing
|
||||
1. Create blob hash calculation
|
||||
1. Implement proper error handling and rollback
|
||||
1. Write tests for blob packing scenarios
|
||||
|
||||
### Phase 6: S3 Operations
|
||||
29. Integrate MinIO client library
|
||||
30. Implement S3Client wrapper type
|
||||
31. Add multipart upload support for large blobs
|
||||
32. Implement retry logic with exponential backoff
|
||||
33. Add connection pooling and timeout handling
|
||||
34. Write tests using MinIO container
|
||||
### S3 Operations
|
||||
1. Integrate MinIO client library
|
||||
1. Implement S3Client wrapper type
|
||||
1. Add multipart upload support for large blobs
|
||||
1. Implement retry logic with exponential backoff
|
||||
1. Add connection pooling and timeout handling
|
||||
1. Write tests using MinIO container
|
||||
|
||||
### Phase 7: Backup Command - Basic
|
||||
35. Implement directory walking with exclusion patterns
|
||||
36. Add file change detection using index
|
||||
37. Integrate chunking pipeline for changed files
|
||||
38. Implement blob upload coordination
|
||||
39. Add progress reporting to stderr
|
||||
40. Write integration tests for backup
|
||||
### Backup Command - Basic
|
||||
1. Implement directory walking with exclusion patterns
|
||||
1. Add file change detection using index
|
||||
1. Integrate chunking pipeline for changed files
|
||||
1. Implement blob upload coordination
|
||||
1. Add progress reporting to stderr
|
||||
1. Write integration tests for backup
|
||||
|
||||
### Phase 8: Snapshot Metadata
|
||||
41. Implement snapshot metadata extraction from index
|
||||
42. Create SQLite snapshot database builder
|
||||
43. Add metadata compression and encryption
|
||||
44. Implement metadata chunking for large snapshots
|
||||
45. Add hash calculation and verification
|
||||
46. Implement metadata upload to S3
|
||||
47. Write tests for metadata operations
|
||||
### Snapshot Metadata
|
||||
1. Implement snapshot metadata extraction from index
|
||||
1. Create SQLite snapshot database builder
|
||||
1. Add metadata compression and encryption
|
||||
1. Implement metadata chunking for large snapshots
|
||||
1. Add hash calculation and verification
|
||||
1. Implement metadata upload to S3
|
||||
1. Write tests for metadata operations
|
||||
|
||||
### Phase 9: Restore Command
|
||||
48. Implement snapshot listing and selection
|
||||
49. Add metadata download and reconstruction
|
||||
50. Implement hash verification for metadata
|
||||
51. Create file restoration logic with chunk retrieval
|
||||
52. Add blob caching for efficiency
|
||||
53. Implement proper file permissions and mtime restoration
|
||||
54. Write integration tests for restore
|
||||
### Restore Command
|
||||
1. Implement snapshot listing and selection
|
||||
1. Add metadata download and reconstruction
|
||||
1. Implement hash verification for metadata
|
||||
1. Create file restoration logic with chunk retrieval
|
||||
1. Add blob caching for efficiency
|
||||
1. Implement proper file permissions and mtime restoration
|
||||
1. Write integration tests for restore
|
||||
|
||||
### Phase 10: Prune Command
|
||||
55. Implement latest snapshot detection
|
||||
56. Add referenced blob extraction from metadata
|
||||
57. Create S3 blob listing and comparison
|
||||
58. Implement safe deletion of unreferenced blobs
|
||||
59. Add dry-run mode for safety
|
||||
60. Write tests for prune scenarios
|
||||
### Prune Command
|
||||
1. Implement latest snapshot detection
|
||||
1. Add referenced blob extraction from metadata
|
||||
1. Create S3 blob listing and comparison
|
||||
1. Implement safe deletion of unreferenced blobs
|
||||
1. Add dry-run mode for safety
|
||||
1. Write tests for prune scenarios
|
||||
|
||||
### Phase 11: Verify Command
|
||||
61. Implement metadata integrity checking
|
||||
62. Add blob existence verification
|
||||
63. Create optional deep verification mode
|
||||
64. Implement detailed error reporting
|
||||
65. Write tests for verification
|
||||
### Verify Command
|
||||
1. Implement metadata integrity checking
|
||||
1. Add blob existence verification
|
||||
1. Implement quick mode (S3 hash checking)
|
||||
1. Implement deep mode (download and verify chunks)
|
||||
1. Add detailed error reporting
|
||||
1. Write tests for verification
|
||||
|
||||
### Phase 12: Fetch Command
|
||||
66. Implement single-file metadata query
|
||||
67. Add minimal blob downloading for file
|
||||
68. Create streaming file reconstruction
|
||||
69. Add support for output redirection
|
||||
70. Write tests for fetch command
|
||||
### Fetch Command
|
||||
1. Implement single-file metadata query
|
||||
1. Add minimal blob downloading for file
|
||||
1. Create streaming file reconstruction
|
||||
1. Add support for output redirection
|
||||
1. Write tests for fetch command
|
||||
|
||||
### Phase 13: Daemon Mode
|
||||
71. Implement inotify watcher for Linux
|
||||
72. Add dirty path tracking in index
|
||||
73. Create periodic full scan scheduler
|
||||
74. Implement backup interval enforcement
|
||||
75. Add proper signal handling and shutdown
|
||||
76. Write tests for daemon behavior
|
||||
### Daemon Mode
|
||||
1. Implement inotify watcher for Linux
|
||||
1. Add dirty path tracking in index
|
||||
1. Create periodic full scan scheduler
|
||||
1. Implement backup interval enforcement
|
||||
1. Add proper signal handling and shutdown
|
||||
1. Write tests for daemon behavior
|
||||
|
||||
### Phase 14: Cron Mode
|
||||
77. Implement silent operation mode
|
||||
78. Add proper exit codes for cron
|
||||
79. Implement lock file to prevent concurrent runs
|
||||
80. Add error summary reporting
|
||||
81. Write tests for cron mode
|
||||
### Cron Mode
|
||||
1. Implement silent operation mode
|
||||
1. Add proper exit codes for cron
|
||||
1. Implement lock file to prevent concurrent runs
|
||||
1. Add error summary reporting
|
||||
1. Write tests for cron mode
|
||||
|
||||
### Phase 15: Finalization
|
||||
82. Add comprehensive logging throughout
|
||||
83. Implement proper error wrapping and context
|
||||
84. Add performance metrics collection
|
||||
85. Create end-to-end integration tests
|
||||
86. Write documentation and examples
|
||||
87. Set up CI/CD pipeline
|
||||
### Finalization
|
||||
1. Add comprehensive logging throughout
|
||||
1. Implement proper error wrapping and context
|
||||
1. Add performance metrics collection
|
||||
1. Create end-to-end integration tests
|
||||
1. Write documentation and examples
|
||||
1. Set up CI/CD pipeline
|
||||
|
47
Makefile
Normal file
47
Makefile
Normal file
@ -0,0 +1,47 @@
|
||||
.PHONY: test fmt lint build clean all
|
||||
|
||||
# Build variables
|
||||
VERSION := $(shell git describe --tags --always --dirty 2>/dev/null || echo "dev")
|
||||
COMMIT := $(shell git rev-parse HEAD 2>/dev/null || echo "unknown")
|
||||
|
||||
# Linker flags
|
||||
LDFLAGS := -X 'git.eeqj.de/sneak/vaultik/internal/globals.Version=$(VERSION)' \
|
||||
-X 'git.eeqj.de/sneak/vaultik/internal/globals.Commit=$(COMMIT)'
|
||||
|
||||
# Default target
|
||||
all: test
|
||||
|
||||
# Run tests
|
||||
test: lint
|
||||
go test -v ./...
|
||||
|
||||
# Format code
|
||||
fmt:
|
||||
go fmt ./...
|
||||
|
||||
# Run linter
|
||||
lint:
|
||||
golangci-lint run
|
||||
|
||||
# Build binary
|
||||
build:
|
||||
go build -ldflags "$(LDFLAGS)" -o vaultik ./cmd/vaultik
|
||||
|
||||
# Clean build artifacts
|
||||
clean:
|
||||
rm -f vaultik
|
||||
go clean
|
||||
|
||||
# Install dependencies
|
||||
deps:
|
||||
go mod download
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
|
||||
# Run tests with coverage
|
||||
test-coverage:
|
||||
go test -v -coverprofile=coverage.out ./...
|
||||
go tool cover -html=coverage.out -o coverage.html
|
||||
|
||||
# Run integration tests
|
||||
test-integration:
|
||||
go test -v -tags=integration ./...
|
@ -134,7 +134,8 @@ vaultik verify <bucket> <prefix> [<snapshot_id>]
|
||||
**verify**: Validate backup integrity
|
||||
* Checks metadata hash
|
||||
* Verifies all referenced blobs exist
|
||||
* Validates chunk integrity
|
||||
* Default: Downloads blobs and validates chunk integrity
|
||||
* `--quick`: Only checks blob existence and S3 content hashes
|
||||
|
||||
---
|
||||
|
||||
|
9
cmd/vaultik/main.go
Normal file
9
cmd/vaultik/main.go
Normal file
@ -0,0 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"git.eeqj.de/sneak/vaultik/internal/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cli.CLIEntry()
|
||||
}
|
12
go.mod
12
go.mod
@ -1,3 +1,15 @@
|
||||
module git.eeqj.de/sneak/vaultik
|
||||
|
||||
go 1.24.4
|
||||
|
||||
require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/spf13/cobra v1.9.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
go.uber.org/dig v1.19.0 // indirect
|
||||
go.uber.org/fx v1.24.0 // indirect
|
||||
go.uber.org/multierr v1.10.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
21
go.sum
Normal file
21
go.sum
Normal file
@ -0,0 +1,21 @@
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
|
||||
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
|
||||
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
|
||||
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
|
||||
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
72
internal/cli/backup.go
Normal file
72
internal/cli/backup.go
Normal file
@ -0,0 +1,72 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"git.eeqj.de/sneak/vaultik/internal/config"
|
||||
"git.eeqj.de/sneak/vaultik/internal/globals"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
// BackupOptions contains options for the backup command
|
||||
type BackupOptions struct {
|
||||
ConfigPath string
|
||||
Daemon bool
|
||||
Cron bool
|
||||
}
|
||||
|
||||
// NewBackupCommand creates the backup command
|
||||
func NewBackupCommand() *cobra.Command {
|
||||
opts := &BackupOptions{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "backup <config.yaml>",
|
||||
Short: "Perform incremental backup",
|
||||
Long: `Backup configured directories using incremental deduplication and encryption`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.ConfigPath = args[0]
|
||||
return runBackup(cmd.Context(), opts)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&opts.Daemon, "daemon", false, "Run in daemon mode with inotify monitoring")
|
||||
cmd.Flags().BoolVar(&opts.Cron, "cron", false, "Run in cron mode (silent unless error)")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runBackup(ctx context.Context, opts *BackupOptions) error {
|
||||
app := fx.New(
|
||||
fx.Supply(config.ConfigPath(opts.ConfigPath)),
|
||||
fx.Provide(globals.New),
|
||||
config.Module,
|
||||
// Additional modules will be added here
|
||||
fx.Invoke(func(g *globals.Globals, cfg *config.Config) error {
|
||||
// TODO: Implement backup logic
|
||||
fmt.Printf("Running backup with config: %s\n", opts.ConfigPath)
|
||||
fmt.Printf("Version: %s, Commit: %s\n", g.Version, g.Commit)
|
||||
if opts.Daemon {
|
||||
fmt.Println("Running in daemon mode")
|
||||
}
|
||||
if opts.Cron {
|
||||
fmt.Println("Running in cron mode")
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
fx.NopLogger,
|
||||
)
|
||||
|
||||
if err := app.Start(ctx); err != nil {
|
||||
return fmt.Errorf("failed to start backup: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := app.Stop(ctx); err != nil {
|
||||
fmt.Printf("error stopping app: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
13
internal/cli/entry.go
Normal file
13
internal/cli/entry.go
Normal file
@ -0,0 +1,13 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// CLIEntry is the main entry point for the CLI application
|
||||
func CLIEntry() {
|
||||
rootCmd := NewRootCommand()
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
34
internal/cli/entry_test.go
Normal file
34
internal/cli/entry_test.go
Normal file
@ -0,0 +1,34 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestCLIEntry ensures the CLI can be imported and basic initialization works
|
||||
func TestCLIEntry(t *testing.T) {
|
||||
// This test primarily serves as a compilation test
|
||||
// to ensure all imports resolve correctly
|
||||
cmd := NewRootCommand()
|
||||
if cmd == nil {
|
||||
t.Fatal("NewRootCommand() returned nil")
|
||||
}
|
||||
|
||||
if cmd.Use != "vaultik" {
|
||||
t.Errorf("Expected command use to be 'vaultik', got '%s'", cmd.Use)
|
||||
}
|
||||
|
||||
// Verify all subcommands are registered
|
||||
expectedCommands := []string{"backup", "restore", "prune", "verify", "fetch"}
|
||||
for _, expected := range expectedCommands {
|
||||
found := false
|
||||
for _, cmd := range cmd.Commands() {
|
||||
if cmd.Use == expected || cmd.Name() == expected {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Expected command '%s' not found", expected)
|
||||
}
|
||||
}
|
||||
}
|
71
internal/cli/fetch.go
Normal file
71
internal/cli/fetch.go
Normal file
@ -0,0 +1,71 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"git.eeqj.de/sneak/vaultik/internal/globals"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
// FetchOptions contains options for the fetch command
|
||||
type FetchOptions struct {
|
||||
Bucket string
|
||||
Prefix string
|
||||
SnapshotID string
|
||||
FilePath string
|
||||
Target string
|
||||
}
|
||||
|
||||
// NewFetchCommand creates the fetch command
|
||||
func NewFetchCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "fetch <bucket> <prefix> <snapshot_id> <filepath> <target>",
|
||||
Short: "Extract single file from backup",
|
||||
Long: `Download and decrypt a single file from a backup snapshot`,
|
||||
Args: cobra.ExactArgs(5),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts := &FetchOptions{
|
||||
Bucket: args[0],
|
||||
Prefix: args[1],
|
||||
SnapshotID: args[2],
|
||||
FilePath: args[3],
|
||||
Target: args[4],
|
||||
}
|
||||
return runFetch(cmd.Context(), opts)
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runFetch(ctx context.Context, opts *FetchOptions) error {
|
||||
if os.Getenv("VAULTIK_PRIVATE_KEY") == "" {
|
||||
return fmt.Errorf("VAULTIK_PRIVATE_KEY environment variable must be set")
|
||||
}
|
||||
|
||||
app := fx.New(
|
||||
fx.Supply(opts),
|
||||
fx.Provide(globals.New),
|
||||
// Additional modules will be added here
|
||||
fx.Invoke(func(g *globals.Globals) error {
|
||||
// TODO: Implement fetch logic
|
||||
fmt.Printf("Fetching %s from snapshot %s to %s\n", opts.FilePath, opts.SnapshotID, opts.Target)
|
||||
return nil
|
||||
}),
|
||||
fx.NopLogger,
|
||||
)
|
||||
|
||||
if err := app.Start(ctx); err != nil {
|
||||
return fmt.Errorf("failed to start fetch: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := app.Stop(ctx); err != nil {
|
||||
fmt.Printf("error stopping app: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
71
internal/cli/prune.go
Normal file
71
internal/cli/prune.go
Normal file
@ -0,0 +1,71 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"git.eeqj.de/sneak/vaultik/internal/globals"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
// PruneOptions contains options for the prune command
|
||||
type PruneOptions struct {
|
||||
Bucket string
|
||||
Prefix string
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
// NewPruneCommand creates the prune command
|
||||
func NewPruneCommand() *cobra.Command {
|
||||
opts := &PruneOptions{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "prune <bucket> <prefix>",
|
||||
Short: "Remove unreferenced blobs",
|
||||
Long: `Delete blobs that are no longer referenced by any snapshot`,
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Bucket = args[0]
|
||||
opts.Prefix = args[1]
|
||||
return runPrune(cmd.Context(), opts)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&opts.DryRun, "dry-run", false, "Show what would be deleted without actually deleting")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runPrune(ctx context.Context, opts *PruneOptions) error {
|
||||
if os.Getenv("VAULTIK_PRIVATE_KEY") == "" {
|
||||
return fmt.Errorf("VAULTIK_PRIVATE_KEY environment variable must be set")
|
||||
}
|
||||
|
||||
app := fx.New(
|
||||
fx.Supply(opts),
|
||||
fx.Provide(globals.New),
|
||||
// Additional modules will be added here
|
||||
fx.Invoke(func(g *globals.Globals) error {
|
||||
// TODO: Implement prune logic
|
||||
fmt.Printf("Pruning bucket %s with prefix %s\n", opts.Bucket, opts.Prefix)
|
||||
if opts.DryRun {
|
||||
fmt.Println("Running in dry-run mode")
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
fx.NopLogger,
|
||||
)
|
||||
|
||||
if err := app.Start(ctx); err != nil {
|
||||
return fmt.Errorf("failed to start prune: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := app.Stop(ctx); err != nil {
|
||||
fmt.Printf("error stopping app: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
69
internal/cli/restore.go
Normal file
69
internal/cli/restore.go
Normal file
@ -0,0 +1,69 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"git.eeqj.de/sneak/vaultik/internal/globals"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
// RestoreOptions contains options for the restore command
|
||||
type RestoreOptions struct {
|
||||
Bucket string
|
||||
Prefix string
|
||||
SnapshotID string
|
||||
TargetDir string
|
||||
}
|
||||
|
||||
// NewRestoreCommand creates the restore command
|
||||
func NewRestoreCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "restore <bucket> <prefix> <snapshot_id> <target_dir>",
|
||||
Short: "Restore files from backup",
|
||||
Long: `Download and decrypt files from a backup snapshot`,
|
||||
Args: cobra.ExactArgs(4),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts := &RestoreOptions{
|
||||
Bucket: args[0],
|
||||
Prefix: args[1],
|
||||
SnapshotID: args[2],
|
||||
TargetDir: args[3],
|
||||
}
|
||||
return runRestore(cmd.Context(), opts)
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runRestore(ctx context.Context, opts *RestoreOptions) error {
|
||||
if os.Getenv("VAULTIK_PRIVATE_KEY") == "" {
|
||||
return fmt.Errorf("VAULTIK_PRIVATE_KEY environment variable must be set")
|
||||
}
|
||||
|
||||
app := fx.New(
|
||||
fx.Supply(opts),
|
||||
fx.Provide(globals.New),
|
||||
// Additional modules will be added here
|
||||
fx.Invoke(func(g *globals.Globals) error {
|
||||
// TODO: Implement restore logic
|
||||
fmt.Printf("Restoring snapshot %s to %s\n", opts.SnapshotID, opts.TargetDir)
|
||||
return nil
|
||||
}),
|
||||
fx.NopLogger,
|
||||
)
|
||||
|
||||
if err := app.Start(ctx); err != nil {
|
||||
return fmt.Errorf("failed to start restore: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := app.Stop(ctx); err != nil {
|
||||
fmt.Printf("error stopping app: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
28
internal/cli/root.go
Normal file
28
internal/cli/root.go
Normal file
@ -0,0 +1,28 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewRootCommand creates the root cobra command
|
||||
func NewRootCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "vaultik",
|
||||
Short: "Secure incremental backup tool with asymmetric encryption",
|
||||
Long: `vaultik is a secure incremental backup daemon that encrypts data using age
|
||||
public keys and uploads to S3-compatible storage. No private keys are needed
|
||||
on the source system.`,
|
||||
SilenceUsage: true,
|
||||
}
|
||||
|
||||
// Add subcommands
|
||||
cmd.AddCommand(
|
||||
NewBackupCommand(),
|
||||
NewRestoreCommand(),
|
||||
NewPruneCommand(),
|
||||
NewVerifyCommand(),
|
||||
NewFetchCommand(),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
81
internal/cli/verify.go
Normal file
81
internal/cli/verify.go
Normal file
@ -0,0 +1,81 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"git.eeqj.de/sneak/vaultik/internal/globals"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
// VerifyOptions contains options for the verify command
|
||||
type VerifyOptions struct {
|
||||
Bucket string
|
||||
Prefix string
|
||||
SnapshotID string
|
||||
Quick bool
|
||||
}
|
||||
|
||||
// NewVerifyCommand creates the verify command
|
||||
func NewVerifyCommand() *cobra.Command {
|
||||
opts := &VerifyOptions{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "verify <bucket> <prefix> [<snapshot_id>]",
|
||||
Short: "Verify backup integrity",
|
||||
Long: `Check that all referenced blobs exist and verify metadata integrity`,
|
||||
Args: cobra.RangeArgs(2, 3),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Bucket = args[0]
|
||||
opts.Prefix = args[1]
|
||||
if len(args) > 2 {
|
||||
opts.SnapshotID = args[2]
|
||||
}
|
||||
return runVerify(cmd.Context(), opts)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&opts.Quick, "quick", false, "Perform quick verification by checking blob existence and S3 content hashes without downloading")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runVerify(ctx context.Context, opts *VerifyOptions) error {
|
||||
if os.Getenv("VAULTIK_PRIVATE_KEY") == "" {
|
||||
return fmt.Errorf("VAULTIK_PRIVATE_KEY environment variable must be set")
|
||||
}
|
||||
|
||||
app := fx.New(
|
||||
fx.Supply(opts),
|
||||
fx.Provide(globals.New),
|
||||
// Additional modules will be added here
|
||||
fx.Invoke(func(g *globals.Globals) error {
|
||||
// TODO: Implement verify logic
|
||||
if opts.SnapshotID == "" {
|
||||
fmt.Printf("Verifying latest snapshot in bucket %s with prefix %s\n", opts.Bucket, opts.Prefix)
|
||||
} else {
|
||||
fmt.Printf("Verifying snapshot %s in bucket %s with prefix %s\n", opts.SnapshotID, opts.Bucket, opts.Prefix)
|
||||
}
|
||||
if opts.Quick {
|
||||
fmt.Println("Performing quick verification")
|
||||
} else {
|
||||
fmt.Println("Performing deep verification")
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
fx.NopLogger,
|
||||
)
|
||||
|
||||
if err := app.Start(ctx); err != nil {
|
||||
return fmt.Errorf("failed to start verify: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := app.Stop(ctx); err != nil {
|
||||
fmt.Printf("error stopping app: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
149
internal/config/config.go
Normal file
149
internal/config/config.go
Normal file
@ -0,0 +1,149 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"go.uber.org/fx"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// Config represents the application configuration
|
||||
type Config struct {
|
||||
AgeRecipient string `yaml:"age_recipient"`
|
||||
BackupInterval time.Duration `yaml:"backup_interval"`
|
||||
BlobSizeLimit int64 `yaml:"blob_size_limit"`
|
||||
ChunkSize int64 `yaml:"chunk_size"`
|
||||
Exclude []string `yaml:"exclude"`
|
||||
FullScanInterval time.Duration `yaml:"full_scan_interval"`
|
||||
Hostname string `yaml:"hostname"`
|
||||
IndexPath string `yaml:"index_path"`
|
||||
IndexPrefix string `yaml:"index_prefix"`
|
||||
MinTimeBetweenRun time.Duration `yaml:"min_time_between_run"`
|
||||
S3 S3Config `yaml:"s3"`
|
||||
SourceDirs []string `yaml:"source_dirs"`
|
||||
CompressionLevel int `yaml:"compression_level"`
|
||||
}
|
||||
|
||||
// S3Config represents S3 storage configuration
|
||||
type S3Config struct {
|
||||
Endpoint string `yaml:"endpoint"`
|
||||
Bucket string `yaml:"bucket"`
|
||||
Prefix string `yaml:"prefix"`
|
||||
AccessKeyID string `yaml:"access_key_id"`
|
||||
SecretAccessKey string `yaml:"secret_access_key"`
|
||||
Region string `yaml:"region"`
|
||||
UseSSL bool `yaml:"use_ssl"`
|
||||
PartSize int64 `yaml:"part_size"`
|
||||
}
|
||||
|
||||
// ConfigPath wraps the config file path for fx injection
|
||||
type ConfigPath string
|
||||
|
||||
// New creates a new Config instance
|
||||
func New(path ConfigPath) (*Config, error) {
|
||||
if path == "" {
|
||||
return nil, fmt.Errorf("config path not provided")
|
||||
}
|
||||
|
||||
cfg, err := Load(string(path))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// Load reads and parses the configuration file
|
||||
func Load(path string) (*Config, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read config file: %w", err)
|
||||
}
|
||||
|
||||
cfg := &Config{
|
||||
// Set defaults
|
||||
BlobSizeLimit: 10 * 1024 * 1024 * 1024, // 10GB
|
||||
ChunkSize: 10 * 1024 * 1024, // 10MB
|
||||
BackupInterval: 1 * time.Hour,
|
||||
FullScanInterval: 24 * time.Hour,
|
||||
MinTimeBetweenRun: 15 * time.Minute,
|
||||
IndexPath: "/var/lib/vaultik/index.sqlite",
|
||||
IndexPrefix: "index/",
|
||||
CompressionLevel: 3,
|
||||
}
|
||||
|
||||
if err := yaml.Unmarshal(data, cfg); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config: %w", err)
|
||||
}
|
||||
|
||||
// Get hostname if not set
|
||||
if cfg.Hostname == "" {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get hostname: %w", err)
|
||||
}
|
||||
cfg.Hostname = hostname
|
||||
}
|
||||
|
||||
// Set default S3 settings
|
||||
if cfg.S3.Region == "" {
|
||||
cfg.S3.Region = "us-east-1"
|
||||
}
|
||||
if cfg.S3.PartSize == 0 {
|
||||
cfg.S3.PartSize = 5 * 1024 * 1024 // 5MB
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("invalid config: %w", err)
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// Validate checks if the configuration is valid
|
||||
func (c *Config) Validate() error {
|
||||
if c.AgeRecipient == "" {
|
||||
return fmt.Errorf("age_recipient is required")
|
||||
}
|
||||
|
||||
if len(c.SourceDirs) == 0 {
|
||||
return fmt.Errorf("at least one source directory is required")
|
||||
}
|
||||
|
||||
if c.S3.Endpoint == "" {
|
||||
return fmt.Errorf("s3.endpoint is required")
|
||||
}
|
||||
|
||||
if c.S3.Bucket == "" {
|
||||
return fmt.Errorf("s3.bucket is required")
|
||||
}
|
||||
|
||||
if c.S3.AccessKeyID == "" {
|
||||
return fmt.Errorf("s3.access_key_id is required")
|
||||
}
|
||||
|
||||
if c.S3.SecretAccessKey == "" {
|
||||
return fmt.Errorf("s3.secret_access_key is required")
|
||||
}
|
||||
|
||||
if c.ChunkSize < 1024*1024 { // 1MB minimum
|
||||
return fmt.Errorf("chunk_size must be at least 1MB")
|
||||
}
|
||||
|
||||
if c.BlobSizeLimit < c.ChunkSize {
|
||||
return fmt.Errorf("blob_size_limit must be at least chunk_size")
|
||||
}
|
||||
|
||||
if c.CompressionLevel < 1 || c.CompressionLevel > 19 {
|
||||
return fmt.Errorf("compression_level must be between 1 and 19")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Module exports the config module for fx
|
||||
var Module = fx.Module("config",
|
||||
fx.Provide(New),
|
||||
)
|
47
internal/config/config_test.go
Normal file
47
internal/config/config_test.go
Normal file
@ -0,0 +1,47 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestConfigLoad ensures the config package can be imported and basic functionality works
|
||||
func TestConfigLoad(t *testing.T) {
|
||||
// Create a temporary config file
|
||||
tmpDir := t.TempDir()
|
||||
configPath := filepath.Join(tmpDir, "test-config.yaml")
|
||||
|
||||
configContent := `age_recipient: age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
source_dirs:
|
||||
- /tmp/test
|
||||
s3:
|
||||
endpoint: https://s3.example.com
|
||||
bucket: test-bucket
|
||||
access_key_id: test-key
|
||||
secret_access_key: test-secret
|
||||
`
|
||||
|
||||
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
|
||||
t.Fatalf("Failed to write test config: %v", err)
|
||||
}
|
||||
|
||||
// Test loading the config
|
||||
cfg, err := Load(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load config: %v", err)
|
||||
}
|
||||
|
||||
// Basic validation
|
||||
if cfg.AgeRecipient != "age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" {
|
||||
t.Errorf("Expected age recipient to be set, got '%s'", cfg.AgeRecipient)
|
||||
}
|
||||
|
||||
if len(cfg.SourceDirs) != 1 || cfg.SourceDirs[0] != "/tmp/test" {
|
||||
t.Errorf("Expected source dirs to be ['/tmp/test'], got %v", cfg.SourceDirs)
|
||||
}
|
||||
|
||||
if cfg.S3.Bucket != "test-bucket" {
|
||||
t.Errorf("Expected S3 bucket to be 'test-bucket', got '%s'", cfg.S3.Bucket)
|
||||
}
|
||||
}
|
39
internal/globals/globals.go
Normal file
39
internal/globals/globals.go
Normal file
@ -0,0 +1,39 @@
|
||||
package globals
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
// these get populated from main() and copied into the Globals object.
|
||||
var (
|
||||
Appname string = "vaultik"
|
||||
Version string = "dev"
|
||||
Commit string = "unknown"
|
||||
)
|
||||
|
||||
type Globals struct {
|
||||
Appname string
|
||||
Version string
|
||||
Commit string
|
||||
StartTime time.Time
|
||||
}
|
||||
|
||||
func New(lc fx.Lifecycle) (*Globals, error) {
|
||||
n := &Globals{
|
||||
Appname: Appname,
|
||||
Version: Version,
|
||||
Commit: Commit,
|
||||
}
|
||||
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
n.StartTime = time.Now()
|
||||
return nil
|
||||
},
|
||||
})
|
||||
|
||||
return n, nil
|
||||
}
|
36
internal/globals/globals_test.go
Normal file
36
internal/globals/globals_test.go
Normal file
@ -0,0 +1,36 @@
|
||||
package globals
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"go.uber.org/fx"
|
||||
"go.uber.org/fx/fxtest"
|
||||
)
|
||||
|
||||
// TestGlobalsNew ensures the globals package initializes correctly
|
||||
func TestGlobalsNew(t *testing.T) {
|
||||
app := fxtest.New(t,
|
||||
fx.Provide(New),
|
||||
fx.Invoke(func(g *Globals) {
|
||||
if g == nil {
|
||||
t.Fatal("Globals instance is nil")
|
||||
}
|
||||
|
||||
if g.Appname != "vaultik" {
|
||||
t.Errorf("Expected Appname to be 'vaultik', got '%s'", g.Appname)
|
||||
}
|
||||
|
||||
// Version and Commit will be "dev" and "unknown" by default
|
||||
if g.Version == "" {
|
||||
t.Error("Version should not be empty")
|
||||
}
|
||||
|
||||
if g.Commit == "" {
|
||||
t.Error("Commit should not be empty")
|
||||
}
|
||||
}),
|
||||
)
|
||||
|
||||
app.RequireStart()
|
||||
app.RequireStop()
|
||||
}
|
73
internal/models/models.go
Normal file
73
internal/models/models.go
Normal file
@ -0,0 +1,73 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// FileInfo represents a file in the backup system
|
||||
type FileInfo struct {
|
||||
Path string
|
||||
MTime time.Time
|
||||
Size int64
|
||||
}
|
||||
|
||||
// ChunkInfo represents a content-addressed chunk
|
||||
type ChunkInfo struct {
|
||||
Hash string // SHA256 hash
|
||||
Size int64
|
||||
Offset int64 // Offset within source file
|
||||
}
|
||||
|
||||
// ChunkRef represents a reference to a chunk in a blob or file
|
||||
type ChunkRef struct {
|
||||
ChunkHash string
|
||||
Offset int64
|
||||
Length int64
|
||||
}
|
||||
|
||||
// BlobInfo represents an encrypted blob containing multiple chunks
|
||||
type BlobInfo struct {
|
||||
Hash string // Hash of encrypted blob
|
||||
FinalHash string // Hash after compression and encryption
|
||||
CreatedAt time.Time
|
||||
Size int64
|
||||
ChunkCount int
|
||||
}
|
||||
|
||||
// Snapshot represents a backup snapshot
|
||||
type Snapshot struct {
|
||||
ID string // ISO8601 timestamp
|
||||
Hostname string
|
||||
Version string
|
||||
CreatedAt time.Time
|
||||
FileCount int64
|
||||
ChunkCount int64
|
||||
BlobCount int64
|
||||
TotalSize int64
|
||||
MetadataSize int64
|
||||
}
|
||||
|
||||
// SnapshotMetadata contains the full metadata for a snapshot
|
||||
type SnapshotMetadata struct {
|
||||
Snapshot *Snapshot
|
||||
Files map[string]*FileInfo
|
||||
Chunks map[string]*ChunkInfo
|
||||
Blobs map[string]*BlobInfo
|
||||
FileChunks map[string][]*ChunkRef // path -> chunks
|
||||
BlobChunks map[string][]*ChunkRef // blob hash -> chunks
|
||||
}
|
||||
|
||||
// Chunk represents a data chunk for processing
|
||||
type Chunk struct {
|
||||
Data []byte
|
||||
Hash string
|
||||
Offset int64
|
||||
Length int64
|
||||
}
|
||||
|
||||
// DirtyPath represents a path marked for backup by inotify
|
||||
type DirtyPath struct {
|
||||
Path string
|
||||
MarkedAt time.Time
|
||||
EventType string // "create", "modify", "delete"
|
||||
}
|
55
internal/models/models_test.go
Normal file
55
internal/models/models_test.go
Normal file
@ -0,0 +1,55 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestModelsCompilation ensures all model types can be instantiated
|
||||
func TestModelsCompilation(t *testing.T) {
|
||||
// This test primarily serves as a compilation test
|
||||
// to ensure all types are properly defined
|
||||
|
||||
// Test FileInfo
|
||||
fi := &FileInfo{
|
||||
Path: "/test/file.txt",
|
||||
MTime: time.Now(),
|
||||
Size: 1024,
|
||||
}
|
||||
if fi.Path != "/test/file.txt" {
|
||||
t.Errorf("FileInfo.Path not set correctly")
|
||||
}
|
||||
|
||||
// Test ChunkInfo
|
||||
ci := &ChunkInfo{
|
||||
Hash: "abc123",
|
||||
Size: 512,
|
||||
Offset: 0,
|
||||
}
|
||||
if ci.Hash != "abc123" {
|
||||
t.Errorf("ChunkInfo.Hash not set correctly")
|
||||
}
|
||||
|
||||
// Test BlobInfo
|
||||
bi := &BlobInfo{
|
||||
Hash: "blob123",
|
||||
FinalHash: "final123",
|
||||
CreatedAt: time.Now(),
|
||||
Size: 1024,
|
||||
ChunkCount: 2,
|
||||
}
|
||||
if bi.Hash != "blob123" {
|
||||
t.Errorf("BlobInfo.Hash not set correctly")
|
||||
}
|
||||
|
||||
// Test Snapshot
|
||||
s := &Snapshot{
|
||||
ID: "2024-01-01T00:00:00Z",
|
||||
Hostname: "test-host",
|
||||
Version: "1.0.0",
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
if s.ID != "2024-01-01T00:00:00Z" {
|
||||
t.Errorf("Snapshot.ID not set correctly")
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user