Compare commits
10 Commits
a36b314c79
...
feature/da
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
87acc05a77 | ||
|
|
07a31a54d4 | ||
| 65da291ddf | |||
| dcf3ec399a | |||
| 495dede1bc | |||
| 1c72a37bc8 | |||
| 60b6746db9 | |||
| f28c8a73b7 | |||
| 1c0f5b8eb2 | |||
| 689109a2b8 |
@@ -54,7 +54,7 @@ The database tracks five primary entities and their relationships:
|
||||
|
||||
#### File (`database.File`)
|
||||
Represents a file or directory in the backup system. Stores metadata needed for restoration:
|
||||
- Path, timestamps (mtime, ctime)
|
||||
- Path, mtime
|
||||
- Size, mode, ownership (uid, gid)
|
||||
- Symlink target (if applicable)
|
||||
|
||||
|
||||
55
README.md
55
README.md
@@ -150,7 +150,7 @@ passphrase is needed or stored locally.
|
||||
vaultik [--config <path>] snapshot create [snapshot-names...] [--cron] [--daemon] [--prune]
|
||||
vaultik [--config <path>] snapshot list [--json]
|
||||
vaultik [--config <path>] snapshot verify <snapshot-id> [--deep]
|
||||
vaultik [--config <path>] snapshot purge [--keep-latest | --older-than <duration>] [--force]
|
||||
vaultik [--config <path>] snapshot purge [--keep-latest | --older-than <duration>] [--name <name>] [--force]
|
||||
vaultik [--config <path>] snapshot remove <snapshot-id> [--dry-run] [--force]
|
||||
vaultik [--config <path>] snapshot prune
|
||||
vaultik [--config <path>] restore <snapshot-id> <target-dir> [paths...]
|
||||
@@ -170,8 +170,9 @@ vaultik [--config <path>] store info
|
||||
* Config is located at `/etc/vaultik/config.yml` by default
|
||||
* Optional snapshot names argument to create specific snapshots (default: all)
|
||||
* `--cron`: Silent unless error (for crontab)
|
||||
* `--daemon`: Run continuously with inotify monitoring and periodic scans
|
||||
* `--daemon`: Run continuously with filesystem monitoring and periodic scans (see [daemon mode](#daemon-mode))
|
||||
* `--prune`: Delete old snapshots and orphaned blobs after backup
|
||||
* `--skip-errors`: Skip file read errors (log them loudly but continue)
|
||||
|
||||
**snapshot list**: List all snapshots with their timestamps and sizes
|
||||
* `--json`: Output in JSON format
|
||||
@@ -180,8 +181,9 @@ vaultik [--config <path>] store info
|
||||
* `--deep`: Download and verify blob contents (not just existence)
|
||||
|
||||
**snapshot purge**: Remove old snapshots based on criteria
|
||||
* `--keep-latest`: Keep only the most recent snapshot
|
||||
* `--keep-latest`: Keep the most recent snapshot per snapshot name
|
||||
* `--older-than`: Remove snapshots older than duration (e.g., 30d, 6mo, 1y)
|
||||
* `--name`: Filter purge to a specific snapshot name
|
||||
* `--force`: Skip confirmation prompt
|
||||
|
||||
**snapshot remove**: Remove a specific snapshot
|
||||
@@ -207,6 +209,53 @@ vaultik [--config <path>] store info
|
||||
|
||||
---
|
||||
|
||||
## daemon mode
|
||||
|
||||
When `--daemon` is passed to `snapshot create`, vaultik runs as a
|
||||
long-running process that continuously monitors configured directories for
|
||||
changes and creates backups automatically.
|
||||
|
||||
```sh
|
||||
vaultik --config /etc/vaultik.yaml snapshot create --daemon
|
||||
```
|
||||
|
||||
### how it works
|
||||
|
||||
1. **Initial backup**: On startup, a full backup of all configured snapshots
|
||||
runs immediately.
|
||||
2. **Filesystem watching**: All configured snapshot paths are monitored for
|
||||
file changes using OS-native filesystem notifications (inotify on Linux,
|
||||
FSEvents on macOS, ReadDirectoryChangesW on Windows) via the
|
||||
[fsnotify](https://github.com/fsnotify/fsnotify) library.
|
||||
3. **Periodic backups**: At each `backup_interval` tick, if filesystem
|
||||
changes have been detected and `min_time_between_run` has elapsed since
|
||||
the last backup, a backup runs for only the affected snapshots.
|
||||
4. **Full scans**: At each `full_scan_interval` tick, a full backup of all
|
||||
snapshots runs regardless of detected changes. This catches any changes
|
||||
that filesystem notifications may have missed.
|
||||
5. **Graceful shutdown**: On SIGTERM or SIGINT, the daemon completes any
|
||||
in-progress backup before exiting.
|
||||
|
||||
### configuration
|
||||
|
||||
These config fields control daemon behavior:
|
||||
|
||||
```yaml
|
||||
backup_interval: 1h # How often to check for changes and run backups
|
||||
full_scan_interval: 24h # How often to do a complete scan of all paths
|
||||
min_time_between_run: 15m # Minimum gap between consecutive backup runs
|
||||
```
|
||||
|
||||
### notes
|
||||
|
||||
* New directories created under watched paths are automatically picked up.
|
||||
* The daemon uses the same `CreateSnapshot` logic as one-shot mode — each
|
||||
backup run is a standard incremental snapshot.
|
||||
* The `--prune`, `--cron`, and `--skip-errors` flags work in daemon mode
|
||||
and apply to each individual backup run.
|
||||
|
||||
---
|
||||
|
||||
## architecture
|
||||
|
||||
### s3 bucket layout
|
||||
|
||||
28
TODO.md
28
TODO.md
@@ -106,23 +106,21 @@ User must have rclone configured separately (via `rclone config`).
|
||||
|
||||
---
|
||||
|
||||
## Post-1.0 (Daemon Mode)
|
||||
## Daemon Mode (Complete)
|
||||
|
||||
1. Implement inotify file watcher for Linux
|
||||
- Watch source directories for changes
|
||||
- Track dirty paths in memory
|
||||
1. [x] Implement cross-platform filesystem watcher (via fsnotify)
|
||||
- Watches source directories for changes
|
||||
- Tracks dirty paths in memory
|
||||
- Automatically watches new directories
|
||||
|
||||
1. Implement FSEvents watcher for macOS
|
||||
- Watch source directories for changes
|
||||
- Track dirty paths in memory
|
||||
1. [x] Implement backup scheduler in daemon mode
|
||||
- Respects backup_interval config
|
||||
- Triggers backup when dirty paths exist and interval elapsed
|
||||
- Implements full_scan_interval for periodic full scans
|
||||
- Respects min_time_between_run to prevent excessive runs
|
||||
|
||||
1. Implement backup scheduler in daemon mode
|
||||
- Respect backup_interval config
|
||||
- Trigger backup when dirty paths exist and interval elapsed
|
||||
- Implement full_scan_interval for periodic full scans
|
||||
|
||||
1. Add proper signal handling for daemon
|
||||
1. [x] Add proper signal handling for daemon
|
||||
- Graceful shutdown on SIGTERM/SIGINT
|
||||
- Complete in-progress backup before exit
|
||||
- Completes in-progress backup before exit
|
||||
|
||||
1. Write tests for daemon mode
|
||||
1. [x] Write tests for daemon mode
|
||||
|
||||
@@ -17,7 +17,6 @@ Stores metadata about files in the filesystem being backed up.
|
||||
- `id` (TEXT PRIMARY KEY) - UUID for the file record
|
||||
- `path` (TEXT NOT NULL UNIQUE) - Absolute file path
|
||||
- `mtime` (INTEGER NOT NULL) - Modification time as Unix timestamp
|
||||
- `ctime` (INTEGER NOT NULL) - Change time as Unix timestamp
|
||||
- `size` (INTEGER NOT NULL) - File size in bytes
|
||||
- `mode` (INTEGER NOT NULL) - Unix file permissions and type
|
||||
- `uid` (INTEGER NOT NULL) - User ID of file owner
|
||||
|
||||
3
go.mod
3
go.mod
@@ -13,6 +13,7 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.90.0
|
||||
github.com/aws/smithy-go v1.23.2
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/johannesboyne/gofakes3 v0.0.0-20250603205740-ed9094be7668
|
||||
@@ -24,6 +25,7 @@ require (
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/stretchr/testify v1.11.1
|
||||
go.uber.org/fx v1.24.0
|
||||
golang.org/x/sync v0.18.0
|
||||
golang.org/x/term v0.37.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
modernc.org/sqlite v1.38.0
|
||||
@@ -266,7 +268,6 @@ require (
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/oauth2 v0.33.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
|
||||
4
go.sum
4
go.sum
@@ -286,8 +286,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik=
|
||||
|
||||
@@ -11,16 +11,9 @@ import (
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
// PurgeOptions contains options for the purge command
|
||||
type PurgeOptions struct {
|
||||
KeepLatest bool
|
||||
OlderThan string
|
||||
Force bool
|
||||
}
|
||||
|
||||
// NewPurgeCommand creates the purge command
|
||||
func NewPurgeCommand() *cobra.Command {
|
||||
opts := &PurgeOptions{}
|
||||
opts := &vaultik.SnapshotPurgeOptions{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "purge",
|
||||
@@ -28,8 +21,15 @@ func NewPurgeCommand() *cobra.Command {
|
||||
Long: `Removes snapshots based on age or count criteria.
|
||||
|
||||
This command allows you to:
|
||||
- Keep only the latest snapshot (--keep-latest)
|
||||
- Keep only the latest snapshot per name (--keep-latest)
|
||||
- Remove snapshots older than a specific duration (--older-than)
|
||||
- Filter to a specific snapshot name (--name)
|
||||
|
||||
When --keep-latest is used, retention is applied per snapshot name. For example,
|
||||
if you have snapshots named "home" and "system", --keep-latest keeps the most
|
||||
recent of each.
|
||||
|
||||
Use --name to restrict the purge to a single snapshot name.
|
||||
|
||||
Config is located at /etc/vaultik/config.yml by default, but can be overridden by
|
||||
specifying a path using --config or by setting VAULTIK_CONFIG to a path.`,
|
||||
@@ -66,7 +66,7 @@ specifying a path using --config or by setting VAULTIK_CONFIG to a path.`,
|
||||
// Start the purge operation in a goroutine
|
||||
go func() {
|
||||
// Run the purge operation
|
||||
if err := v.PurgeSnapshots(opts.KeepLatest, opts.OlderThan, opts.Force); err != nil {
|
||||
if err := v.PurgeSnapshotsWithOptions(opts); err != nil {
|
||||
if err != context.Canceled {
|
||||
log.Error("Purge operation failed", "error", err)
|
||||
os.Exit(1)
|
||||
@@ -92,9 +92,10 @@ specifying a path using --config or by setting VAULTIK_CONFIG to a path.`,
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&opts.KeepLatest, "keep-latest", false, "Keep only the latest snapshot")
|
||||
cmd.Flags().BoolVar(&opts.KeepLatest, "keep-latest", false, "Keep only the latest snapshot per name")
|
||||
cmd.Flags().StringVar(&opts.OlderThan, "older-than", "", "Remove snapshots older than duration (e.g. 30d, 6m, 1y)")
|
||||
cmd.Flags().BoolVar(&opts.Force, "force", false, "Skip confirmation prompts")
|
||||
cmd.Flags().StringVar(&opts.Name, "name", "", "Filter purge to a specific snapshot name")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -167,21 +167,25 @@ func newSnapshotListCommand() *cobra.Command {
|
||||
|
||||
// newSnapshotPurgeCommand creates the 'snapshot purge' subcommand
|
||||
func newSnapshotPurgeCommand() *cobra.Command {
|
||||
var keepLatest bool
|
||||
var olderThan string
|
||||
var force bool
|
||||
opts := &vaultik.SnapshotPurgeOptions{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "purge",
|
||||
Short: "Purge old snapshots",
|
||||
Long: "Removes snapshots based on age or count criteria",
|
||||
Args: cobra.NoArgs,
|
||||
Long: `Removes snapshots based on age or count criteria.
|
||||
|
||||
When --keep-latest is used, retention is applied per snapshot name. For example,
|
||||
if you have snapshots named "home" and "system", --keep-latest keeps the most
|
||||
recent of each.
|
||||
|
||||
Use --name to restrict the purge to a single snapshot name.`,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Validate flags
|
||||
if !keepLatest && olderThan == "" {
|
||||
if !opts.KeepLatest && opts.OlderThan == "" {
|
||||
return fmt.Errorf("must specify either --keep-latest or --older-than")
|
||||
}
|
||||
if keepLatest && olderThan != "" {
|
||||
if opts.KeepLatest && opts.OlderThan != "" {
|
||||
return fmt.Errorf("cannot specify both --keep-latest and --older-than")
|
||||
}
|
||||
|
||||
@@ -205,7 +209,7 @@ func newSnapshotPurgeCommand() *cobra.Command {
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
go func() {
|
||||
if err := v.PurgeSnapshots(keepLatest, olderThan, force); err != nil {
|
||||
if err := v.PurgeSnapshotsWithOptions(opts); err != nil {
|
||||
if err != context.Canceled {
|
||||
log.Error("Failed to purge snapshots", "error", err)
|
||||
os.Exit(1)
|
||||
@@ -228,9 +232,10 @@ func newSnapshotPurgeCommand() *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&keepLatest, "keep-latest", false, "Keep only the latest snapshot")
|
||||
cmd.Flags().StringVar(&olderThan, "older-than", "", "Remove snapshots older than duration (e.g., 30d, 6m, 1y)")
|
||||
cmd.Flags().BoolVar(&force, "force", false, "Skip confirmation prompt")
|
||||
cmd.Flags().BoolVar(&opts.KeepLatest, "keep-latest", false, "Keep only the latest snapshot per name")
|
||||
cmd.Flags().StringVar(&opts.OlderThan, "older-than", "", "Remove snapshots older than duration (e.g., 30d, 6m, 1y)")
|
||||
cmd.Flags().BoolVar(&opts.Force, "force", false, "Skip confirmation prompt")
|
||||
cmd.Flags().StringVar(&opts.Name, "name", "", "Filter purge to a specific snapshot name")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -29,7 +29,6 @@ func TestCascadeDeleteDebug(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/cascade-test.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
|
||||
@@ -22,7 +22,6 @@ func TestChunkFileRepository(t *testing.T) {
|
||||
file1 := &File{
|
||||
Path: "/file1.txt",
|
||||
MTime: testTime,
|
||||
CTime: testTime,
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -37,7 +36,6 @@ func TestChunkFileRepository(t *testing.T) {
|
||||
file2 := &File{
|
||||
Path: "/file2.txt",
|
||||
MTime: testTime,
|
||||
CTime: testTime,
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -138,9 +136,9 @@ func TestChunkFileRepositoryComplexDeduplication(t *testing.T) {
|
||||
|
||||
// Create test files
|
||||
testTime := time.Now().Truncate(time.Second)
|
||||
file1 := &File{Path: "/file1.txt", MTime: testTime, CTime: testTime, Size: 3072, Mode: 0644, UID: 1000, GID: 1000}
|
||||
file2 := &File{Path: "/file2.txt", MTime: testTime, CTime: testTime, Size: 3072, Mode: 0644, UID: 1000, GID: 1000}
|
||||
file3 := &File{Path: "/file3.txt", MTime: testTime, CTime: testTime, Size: 2048, Mode: 0644, UID: 1000, GID: 1000}
|
||||
file1 := &File{Path: "/file1.txt", MTime: testTime, Size: 3072, Mode: 0644, UID: 1000, GID: 1000}
|
||||
file2 := &File{Path: "/file2.txt", MTime: testTime, Size: 3072, Mode: 0644, UID: 1000, GID: 1000}
|
||||
file3 := &File{Path: "/file3.txt", MTime: testTime, Size: 2048, Mode: 0644, UID: 1000, GID: 1000}
|
||||
|
||||
if err := fileRepo.Create(ctx, nil, file1); err != nil {
|
||||
t.Fatalf("failed to create file1: %v", err)
|
||||
|
||||
@@ -22,7 +22,6 @@ func TestFileChunkRepository(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test/file.txt",
|
||||
MTime: testTime,
|
||||
CTime: testTime,
|
||||
Size: 3072,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -135,7 +134,6 @@ func TestFileChunkRepositoryMultipleFiles(t *testing.T) {
|
||||
file := &File{
|
||||
Path: types.FilePath(path),
|
||||
MTime: testTime,
|
||||
CTime: testTime,
|
||||
Size: 2048,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
|
||||
@@ -25,12 +25,11 @@ func (r *FileRepository) Create(ctx context.Context, tx *sql.Tx, file *File) err
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO files (id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
INSERT INTO files (id, path, source_path, mtime, size, mode, uid, gid, link_target)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(path) DO UPDATE SET
|
||||
source_path = excluded.source_path,
|
||||
mtime = excluded.mtime,
|
||||
ctime = excluded.ctime,
|
||||
size = excluded.size,
|
||||
mode = excluded.mode,
|
||||
uid = excluded.uid,
|
||||
@@ -42,10 +41,10 @@ func (r *FileRepository) Create(ctx context.Context, tx *sql.Tx, file *File) err
|
||||
var idStr string
|
||||
var err error
|
||||
if tx != nil {
|
||||
LogSQL("Execute", query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String())
|
||||
err = tx.QueryRowContext(ctx, query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String()).Scan(&idStr)
|
||||
LogSQL("Execute", query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String())
|
||||
err = tx.QueryRowContext(ctx, query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String()).Scan(&idStr)
|
||||
} else {
|
||||
err = r.db.QueryRowWithLog(ctx, query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String()).Scan(&idStr)
|
||||
err = r.db.QueryRowWithLog(ctx, query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String()).Scan(&idStr)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -63,7 +62,7 @@ func (r *FileRepository) Create(ctx context.Context, tx *sql.Tx, file *File) err
|
||||
|
||||
func (r *FileRepository) GetByPath(ctx context.Context, path string) (*File, error) {
|
||||
query := `
|
||||
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
||||
FROM files
|
||||
WHERE path = ?
|
||||
`
|
||||
@@ -82,7 +81,7 @@ func (r *FileRepository) GetByPath(ctx context.Context, path string) (*File, err
|
||||
// GetByID retrieves a file by its UUID
|
||||
func (r *FileRepository) GetByID(ctx context.Context, id types.FileID) (*File, error) {
|
||||
query := `
|
||||
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
||||
FROM files
|
||||
WHERE id = ?
|
||||
`
|
||||
@@ -100,7 +99,7 @@ func (r *FileRepository) GetByID(ctx context.Context, id types.FileID) (*File, e
|
||||
|
||||
func (r *FileRepository) GetByPathTx(ctx context.Context, tx *sql.Tx, path string) (*File, error) {
|
||||
query := `
|
||||
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
||||
FROM files
|
||||
WHERE path = ?
|
||||
`
|
||||
@@ -123,7 +122,7 @@ func (r *FileRepository) GetByPathTx(ctx context.Context, tx *sql.Tx, path strin
|
||||
func (r *FileRepository) scanFile(row *sql.Row) (*File, error) {
|
||||
var file File
|
||||
var idStr, pathStr, sourcePathStr string
|
||||
var mtimeUnix, ctimeUnix int64
|
||||
var mtimeUnix int64
|
||||
var linkTarget sql.NullString
|
||||
|
||||
err := row.Scan(
|
||||
@@ -131,7 +130,6 @@ func (r *FileRepository) scanFile(row *sql.Row) (*File, error) {
|
||||
&pathStr,
|
||||
&sourcePathStr,
|
||||
&mtimeUnix,
|
||||
&ctimeUnix,
|
||||
&file.Size,
|
||||
&file.Mode,
|
||||
&file.UID,
|
||||
@@ -149,7 +147,6 @@ func (r *FileRepository) scanFile(row *sql.Row) (*File, error) {
|
||||
file.Path = types.FilePath(pathStr)
|
||||
file.SourcePath = types.SourcePath(sourcePathStr)
|
||||
file.MTime = time.Unix(mtimeUnix, 0).UTC()
|
||||
file.CTime = time.Unix(ctimeUnix, 0).UTC()
|
||||
if linkTarget.Valid {
|
||||
file.LinkTarget = types.FilePath(linkTarget.String)
|
||||
}
|
||||
@@ -161,7 +158,7 @@ func (r *FileRepository) scanFile(row *sql.Row) (*File, error) {
|
||||
func (r *FileRepository) scanFileRows(rows *sql.Rows) (*File, error) {
|
||||
var file File
|
||||
var idStr, pathStr, sourcePathStr string
|
||||
var mtimeUnix, ctimeUnix int64
|
||||
var mtimeUnix int64
|
||||
var linkTarget sql.NullString
|
||||
|
||||
err := rows.Scan(
|
||||
@@ -169,7 +166,6 @@ func (r *FileRepository) scanFileRows(rows *sql.Rows) (*File, error) {
|
||||
&pathStr,
|
||||
&sourcePathStr,
|
||||
&mtimeUnix,
|
||||
&ctimeUnix,
|
||||
&file.Size,
|
||||
&file.Mode,
|
||||
&file.UID,
|
||||
@@ -187,7 +183,6 @@ func (r *FileRepository) scanFileRows(rows *sql.Rows) (*File, error) {
|
||||
file.Path = types.FilePath(pathStr)
|
||||
file.SourcePath = types.SourcePath(sourcePathStr)
|
||||
file.MTime = time.Unix(mtimeUnix, 0).UTC()
|
||||
file.CTime = time.Unix(ctimeUnix, 0).UTC()
|
||||
if linkTarget.Valid {
|
||||
file.LinkTarget = types.FilePath(linkTarget.String)
|
||||
}
|
||||
@@ -197,7 +192,7 @@ func (r *FileRepository) scanFileRows(rows *sql.Rows) (*File, error) {
|
||||
|
||||
func (r *FileRepository) ListModifiedSince(ctx context.Context, since time.Time) ([]*File, error) {
|
||||
query := `
|
||||
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
||||
FROM files
|
||||
WHERE mtime >= ?
|
||||
ORDER BY path
|
||||
@@ -258,7 +253,7 @@ func (r *FileRepository) DeleteByID(ctx context.Context, tx *sql.Tx, id types.Fi
|
||||
|
||||
func (r *FileRepository) ListByPrefix(ctx context.Context, prefix string) ([]*File, error) {
|
||||
query := `
|
||||
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
||||
FROM files
|
||||
WHERE path LIKE ? || '%'
|
||||
ORDER BY path
|
||||
@@ -285,7 +280,7 @@ func (r *FileRepository) ListByPrefix(ctx context.Context, prefix string) ([]*Fi
|
||||
// ListAll returns all files in the database
|
||||
func (r *FileRepository) ListAll(ctx context.Context) ([]*File, error) {
|
||||
query := `
|
||||
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
||||
FROM files
|
||||
ORDER BY path
|
||||
`
|
||||
@@ -315,7 +310,7 @@ func (r *FileRepository) CreateBatch(ctx context.Context, tx *sql.Tx, files []*F
|
||||
return nil
|
||||
}
|
||||
|
||||
// Each File has 10 values, so batch at 100 to be safe with SQLite's variable limit
|
||||
// Each File has 9 values, so batch at 100 to be safe with SQLite's variable limit
|
||||
const batchSize = 100
|
||||
|
||||
for i := 0; i < len(files); i += batchSize {
|
||||
@@ -325,19 +320,18 @@ func (r *FileRepository) CreateBatch(ctx context.Context, tx *sql.Tx, files []*F
|
||||
}
|
||||
batch := files[i:end]
|
||||
|
||||
query := `INSERT INTO files (id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target) VALUES `
|
||||
args := make([]interface{}, 0, len(batch)*10)
|
||||
query := `INSERT INTO files (id, path, source_path, mtime, size, mode, uid, gid, link_target) VALUES `
|
||||
args := make([]interface{}, 0, len(batch)*9)
|
||||
for j, f := range batch {
|
||||
if j > 0 {
|
||||
query += ", "
|
||||
}
|
||||
query += "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
|
||||
args = append(args, f.ID.String(), f.Path.String(), f.SourcePath.String(), f.MTime.Unix(), f.CTime.Unix(), f.Size, f.Mode, f.UID, f.GID, f.LinkTarget.String())
|
||||
query += "(?, ?, ?, ?, ?, ?, ?, ?, ?)"
|
||||
args = append(args, f.ID.String(), f.Path.String(), f.SourcePath.String(), f.MTime.Unix(), f.Size, f.Mode, f.UID, f.GID, f.LinkTarget.String())
|
||||
}
|
||||
query += ` ON CONFLICT(path) DO UPDATE SET
|
||||
source_path = excluded.source_path,
|
||||
mtime = excluded.mtime,
|
||||
ctime = excluded.ctime,
|
||||
size = excluded.size,
|
||||
mode = excluded.mode,
|
||||
uid = excluded.uid,
|
||||
|
||||
@@ -39,7 +39,6 @@ func TestFileRepository(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test/file.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -124,7 +123,6 @@ func TestFileRepositorySymlink(t *testing.T) {
|
||||
symlink := &File{
|
||||
Path: "/test/link",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 0,
|
||||
Mode: uint32(0777 | os.ModeSymlink),
|
||||
UID: 1000,
|
||||
@@ -161,7 +159,6 @@ func TestFileRepositoryTransaction(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test/tx_file.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
|
||||
@@ -17,7 +17,6 @@ type File struct {
|
||||
Path types.FilePath // Absolute path of the file
|
||||
SourcePath types.SourcePath // The source directory this file came from (for restore path stripping)
|
||||
MTime time.Time
|
||||
CTime time.Time
|
||||
Size int64
|
||||
Mode uint32
|
||||
UID uint32
|
||||
|
||||
@@ -23,7 +23,6 @@ func TestRepositoriesTransaction(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test/tx_file.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -146,7 +145,6 @@ func TestRepositoriesTransactionRollback(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test/rollback_file.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -202,7 +200,6 @@ func TestRepositoriesReadTransaction(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test/read_file.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -226,7 +223,6 @@ func TestRepositoriesReadTransaction(t *testing.T) {
|
||||
_ = repos.Files.Create(ctx, tx, &File{
|
||||
Path: "/test/should_fail.txt",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 0,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
|
||||
@@ -23,7 +23,6 @@ func TestFileRepositoryUUIDGeneration(t *testing.T) {
|
||||
{
|
||||
Path: "/file1.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -32,7 +31,6 @@ func TestFileRepositoryUUIDGeneration(t *testing.T) {
|
||||
{
|
||||
Path: "/file2.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 2048,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -72,7 +70,6 @@ func TestFileRepositoryGetByID(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -120,7 +117,6 @@ func TestOrphanedFileCleanup(t *testing.T) {
|
||||
file1 := &File{
|
||||
Path: "/orphaned.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -129,7 +125,6 @@ func TestOrphanedFileCleanup(t *testing.T) {
|
||||
file2 := &File{
|
||||
Path: "/referenced.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 2048,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -218,7 +213,6 @@ func TestOrphanedChunkCleanup(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -348,7 +342,6 @@ func TestFileChunkRepositoryWithUUIDs(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 3072,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -419,7 +412,6 @@ func TestChunkFileRepositoryWithUUIDs(t *testing.T) {
|
||||
file1 := &File{
|
||||
Path: "/file1.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -428,7 +420,6 @@ func TestChunkFileRepositoryWithUUIDs(t *testing.T) {
|
||||
file2 := &File{
|
||||
Path: "/file2.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -586,7 +577,6 @@ func TestComplexOrphanedDataScenario(t *testing.T) {
|
||||
files[i] = &File{
|
||||
Path: types.FilePath(fmt.Sprintf("/file%d.txt", i)),
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -678,7 +668,6 @@ func TestCascadeDelete(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/cascade-test.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -750,7 +739,6 @@ func TestTransactionIsolation(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/tx-test.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -812,7 +800,6 @@ func TestConcurrentOrphanedCleanup(t *testing.T) {
|
||||
file := &File{
|
||||
Path: types.FilePath(fmt.Sprintf("/concurrent-%d.txt", i)),
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
|
||||
@@ -18,7 +18,6 @@ func TestOrphanedFileCleanupDebug(t *testing.T) {
|
||||
file1 := &File{
|
||||
Path: "/orphaned.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -27,7 +26,6 @@ func TestOrphanedFileCleanupDebug(t *testing.T) {
|
||||
file2 := &File{
|
||||
Path: "/referenced.txt",
|
||||
MTime: time.Now().Truncate(time.Second),
|
||||
CTime: time.Now().Truncate(time.Second),
|
||||
Size: 2048,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
|
||||
@@ -29,7 +29,6 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
||||
file: &File{
|
||||
Path: "",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -42,7 +41,6 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
||||
file: &File{
|
||||
Path: types.FilePath("/" + strings.Repeat("a", 4096)),
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -55,7 +53,6 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
||||
file: &File{
|
||||
Path: "/test/file with spaces and 特殊文字.txt",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -68,7 +65,6 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
||||
file: &File{
|
||||
Path: "/empty.txt",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 0,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -81,7 +77,6 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
||||
file: &File{
|
||||
Path: "/link",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 0,
|
||||
Mode: 0777 | 0120000, // symlink mode
|
||||
UID: 1000,
|
||||
@@ -123,7 +118,6 @@ func TestDuplicateHandling(t *testing.T) {
|
||||
file1 := &File{
|
||||
Path: "/duplicate.txt",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -132,7 +126,6 @@ func TestDuplicateHandling(t *testing.T) {
|
||||
file2 := &File{
|
||||
Path: "/duplicate.txt", // Same path
|
||||
MTime: time.Now().Add(time.Hour),
|
||||
CTime: time.Now().Add(time.Hour),
|
||||
Size: 2048,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -192,7 +185,6 @@ func TestDuplicateHandling(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/test-dup-fc.txt",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -244,7 +236,6 @@ func TestNullHandling(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/regular.txt",
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -349,7 +340,6 @@ func TestLargeDatasets(t *testing.T) {
|
||||
file := &File{
|
||||
Path: types.FilePath(fmt.Sprintf("/large/file%05d.txt", i)),
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: int64(i * 1024),
|
||||
Mode: 0644,
|
||||
UID: uint32(1000 + (i % 10)),
|
||||
@@ -474,7 +464,6 @@ func TestQueryInjection(t *testing.T) {
|
||||
file := &File{
|
||||
Path: types.FilePath(injection),
|
||||
MTime: time.Now(),
|
||||
CTime: time.Now(),
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
@@ -513,7 +502,6 @@ func TestTimezoneHandling(t *testing.T) {
|
||||
file := &File{
|
||||
Path: "/timezone-test.txt",
|
||||
MTime: nyTime,
|
||||
CTime: nyTime,
|
||||
Size: 1024,
|
||||
Mode: 0644,
|
||||
UID: 1000,
|
||||
|
||||
@@ -8,7 +8,6 @@ CREATE TABLE IF NOT EXISTS files (
|
||||
path TEXT NOT NULL UNIQUE,
|
||||
source_path TEXT NOT NULL DEFAULT '', -- The source directory this file came from (for restore path stripping)
|
||||
mtime INTEGER NOT NULL,
|
||||
ctime INTEGER NOT NULL,
|
||||
size INTEGER NOT NULL,
|
||||
mode INTEGER NOT NULL,
|
||||
uid INTEGER NOT NULL,
|
||||
@@ -103,7 +102,7 @@ CREATE TABLE IF NOT EXISTS snapshot_files (
|
||||
file_id TEXT NOT NULL,
|
||||
PRIMARY KEY (snapshot_id, file_id),
|
||||
FOREIGN KEY (snapshot_id) REFERENCES snapshots(id) ON DELETE CASCADE,
|
||||
FOREIGN KEY (file_id) REFERENCES files(id)
|
||||
FOREIGN KEY (file_id) REFERENCES files(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Index for efficient file lookups (used in orphan detection)
|
||||
@@ -116,7 +115,7 @@ CREATE TABLE IF NOT EXISTS snapshot_blobs (
|
||||
blob_hash TEXT NOT NULL,
|
||||
PRIMARY KEY (snapshot_id, blob_id),
|
||||
FOREIGN KEY (snapshot_id) REFERENCES snapshots(id) ON DELETE CASCADE,
|
||||
FOREIGN KEY (blob_id) REFERENCES blobs(id)
|
||||
FOREIGN KEY (blob_id) REFERENCES blobs(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Index for efficient blob lookups (used in orphan detection)
|
||||
@@ -130,7 +129,7 @@ CREATE TABLE IF NOT EXISTS uploads (
|
||||
size INTEGER NOT NULL,
|
||||
duration_ms INTEGER NOT NULL,
|
||||
FOREIGN KEY (blob_hash) REFERENCES blobs(blob_hash),
|
||||
FOREIGN KEY (snapshot_id) REFERENCES snapshots(id)
|
||||
FOREIGN KEY (snapshot_id) REFERENCES snapshots(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Index for efficient snapshot lookups
|
||||
|
||||
@@ -345,9 +345,8 @@ func (b *BackupEngine) Backup(ctx context.Context, fsys fs.FS, root string) (str
|
||||
Size: info.Size(),
|
||||
Mode: uint32(info.Mode()),
|
||||
MTime: info.ModTime(),
|
||||
CTime: info.ModTime(), // Use mtime as ctime for test
|
||||
UID: 1000, // Default UID for test
|
||||
GID: 1000, // Default GID for test
|
||||
UID: 1000, // Default UID for test
|
||||
GID: 1000, // Default GID for test
|
||||
}
|
||||
err = b.repos.WithTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
|
||||
return b.repos.Files.Create(ctx, tx, file)
|
||||
|
||||
@@ -785,7 +785,6 @@ func (s *Scanner) checkFileInMemory(path string, info os.FileInfo, knownFiles ma
|
||||
Path: types.FilePath(path),
|
||||
SourcePath: types.SourcePath(s.currentSourcePath), // Store source directory for restore path stripping
|
||||
MTime: info.ModTime(),
|
||||
CTime: info.ModTime(), // afero doesn't provide ctime
|
||||
Size: info.Size(),
|
||||
Mode: uint32(info.Mode()),
|
||||
UID: uid,
|
||||
|
||||
434
internal/vaultik/daemon.go
Normal file
434
internal/vaultik/daemon.go
Normal file
@@ -0,0 +1,434 @@
|
||||
package vaultik
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"git.eeqj.de/sneak/vaultik/internal/log"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
// daemonMinBackupInterval is the absolute minimum time allowed between backup runs,
|
||||
// regardless of config, to prevent runaway backup loops.
|
||||
const daemonMinBackupInterval = 1 * time.Minute
|
||||
|
||||
// daemonShutdownTimeout is the maximum time to wait for an in-progress backup
|
||||
// to complete during graceful shutdown before force-exiting.
|
||||
const daemonShutdownTimeout = 5 * time.Minute
|
||||
|
||||
// RunDaemon runs vaultik in daemon mode: it watches configured directories for
|
||||
// changes using filesystem notifications, runs periodic backups at the configured
|
||||
// interval, and performs full scans at the full_scan_interval. It handles
|
||||
// SIGTERM/SIGINT for graceful shutdown, completing any in-progress backup before
|
||||
// exiting.
|
||||
func (v *Vaultik) RunDaemon(opts *SnapshotCreateOptions) error {
|
||||
backupInterval := v.Config.BackupInterval
|
||||
if backupInterval < daemonMinBackupInterval {
|
||||
backupInterval = daemonMinBackupInterval
|
||||
}
|
||||
|
||||
minTimeBetween := v.Config.MinTimeBetweenRun
|
||||
if minTimeBetween < daemonMinBackupInterval {
|
||||
minTimeBetween = daemonMinBackupInterval
|
||||
}
|
||||
|
||||
fullScanInterval := v.Config.FullScanInterval
|
||||
if fullScanInterval <= 0 {
|
||||
fullScanInterval = 24 * time.Hour
|
||||
}
|
||||
|
||||
log.Info("Starting daemon mode",
|
||||
"backup_interval", backupInterval,
|
||||
"min_time_between_run", minTimeBetween,
|
||||
"full_scan_interval", fullScanInterval,
|
||||
)
|
||||
v.printfStdout("Daemon mode started\n")
|
||||
v.printfStdout(" Backup interval: %s\n", backupInterval)
|
||||
v.printfStdout(" Min time between: %s\n", minTimeBetween)
|
||||
v.printfStdout(" Full scan interval: %s\n", fullScanInterval)
|
||||
|
||||
// Create a daemon-scoped context that we cancel on signal.
|
||||
ctx, cancel := context.WithCancel(v.ctx)
|
||||
defer cancel()
|
||||
|
||||
// Set up signal handling for graceful shutdown.
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
// Tracker for filesystem change events.
|
||||
tracker := newChangeTracker()
|
||||
|
||||
// Start the filesystem watcher.
|
||||
watcher, err := v.startWatcher(ctx, tracker)
|
||||
if err != nil {
|
||||
return fmt.Errorf("starting filesystem watcher: %w", err)
|
||||
}
|
||||
defer func() { _ = watcher.Close() }()
|
||||
|
||||
// Timers
|
||||
backupTicker := time.NewTicker(backupInterval)
|
||||
defer backupTicker.Stop()
|
||||
|
||||
fullScanTicker := time.NewTicker(fullScanInterval)
|
||||
defer fullScanTicker.Stop()
|
||||
|
||||
var lastBackupTime time.Time
|
||||
backupRunning := make(chan struct{}, 1) // semaphore: 1 = backup in progress
|
||||
|
||||
// Run an initial full backup immediately on startup.
|
||||
log.Info("Running initial backup on daemon startup")
|
||||
v.printfStdout("Running initial backup...\n")
|
||||
if err := v.runDaemonBackup(ctx, opts, tracker, false); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
return nil // context cancelled, shutting down
|
||||
}
|
||||
log.Error("Initial backup failed", "error", err)
|
||||
v.printfStderr("Initial backup failed: %v\n", err)
|
||||
// Continue running — next scheduled backup may succeed.
|
||||
} else {
|
||||
lastBackupTime = time.Now()
|
||||
tracker.reset()
|
||||
}
|
||||
|
||||
v.printfStdout("Watching for changes...\n")
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Info("Daemon context cancelled, shutting down")
|
||||
return nil
|
||||
|
||||
case sig := <-sigCh:
|
||||
log.Info("Received signal, initiating graceful shutdown", "signal", sig)
|
||||
v.printfStdout("\nReceived %s, shutting down...\n", sig)
|
||||
cancel()
|
||||
|
||||
// Wait for any in-progress backup to finish.
|
||||
select {
|
||||
case backupRunning <- struct{}{}:
|
||||
// No backup running, we can exit immediately.
|
||||
<-backupRunning
|
||||
default:
|
||||
// Backup is running, wait for it to complete.
|
||||
v.printfStdout("Waiting for in-progress backup to complete...\n")
|
||||
shutdownTimer := time.NewTimer(daemonShutdownTimeout)
|
||||
select {
|
||||
case backupRunning <- struct{}{}:
|
||||
<-backupRunning
|
||||
shutdownTimer.Stop()
|
||||
case <-shutdownTimer.C:
|
||||
log.Warn("Shutdown timeout exceeded, forcing exit")
|
||||
v.printfStderr("Shutdown timeout exceeded, forcing exit\n")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
case <-backupTicker.C:
|
||||
// Periodic backup tick. Only run if there are changes and enough
|
||||
// time has elapsed since the last run.
|
||||
if !tracker.hasChanges() {
|
||||
log.Debug("Backup tick: no changes detected, skipping")
|
||||
continue
|
||||
}
|
||||
if time.Since(lastBackupTime) < minTimeBetween {
|
||||
log.Debug("Backup tick: too soon since last backup",
|
||||
"last_backup", lastBackupTime,
|
||||
"min_interval", minTimeBetween,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to acquire the backup semaphore (non-blocking).
|
||||
select {
|
||||
case backupRunning <- struct{}{}:
|
||||
default:
|
||||
log.Debug("Backup tick: backup already in progress, skipping")
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("Running scheduled backup", "changes", tracker.changeCount())
|
||||
v.printfStdout("Running scheduled backup (%d changes detected)...\n", tracker.changeCount())
|
||||
if err := v.runDaemonBackup(ctx, opts, tracker, false); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
<-backupRunning
|
||||
return nil
|
||||
}
|
||||
log.Error("Scheduled backup failed", "error", err)
|
||||
v.printfStderr("Scheduled backup failed: %v\n", err)
|
||||
} else {
|
||||
lastBackupTime = time.Now()
|
||||
tracker.reset()
|
||||
}
|
||||
<-backupRunning
|
||||
|
||||
case <-fullScanTicker.C:
|
||||
// Full scan — ignore whether changes were detected; do a complete scan.
|
||||
if time.Since(lastBackupTime) < minTimeBetween {
|
||||
log.Debug("Full scan tick: too soon since last backup, deferring")
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case backupRunning <- struct{}{}:
|
||||
default:
|
||||
log.Debug("Full scan tick: backup already in progress, skipping")
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("Running full periodic scan")
|
||||
v.printfStdout("Running full periodic scan...\n")
|
||||
if err := v.runDaemonBackup(ctx, opts, tracker, true); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
<-backupRunning
|
||||
return nil
|
||||
}
|
||||
log.Error("Full scan backup failed", "error", err)
|
||||
v.printfStderr("Full scan backup failed: %v\n", err)
|
||||
} else {
|
||||
lastBackupTime = time.Now()
|
||||
tracker.reset()
|
||||
}
|
||||
<-backupRunning
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// runDaemonBackup executes a single backup run within the daemon loop.
|
||||
// If fullScan is true, all snapshots are processed regardless of tracked changes.
|
||||
// Otherwise, only snapshots whose paths overlap with tracked changes are processed.
|
||||
func (v *Vaultik) runDaemonBackup(ctx context.Context, opts *SnapshotCreateOptions, tracker *changeTracker, fullScan bool) error {
|
||||
startTime := time.Now()
|
||||
|
||||
// Build a one-shot create options for this run.
|
||||
runOpts := &SnapshotCreateOptions{
|
||||
Cron: opts.Cron,
|
||||
Prune: opts.Prune,
|
||||
SkipErrors: opts.SkipErrors,
|
||||
}
|
||||
|
||||
if !fullScan {
|
||||
// Filter to only snapshots whose paths had changes.
|
||||
changedPaths := tracker.changedPaths()
|
||||
affected := v.snapshotsAffectedByChanges(changedPaths)
|
||||
if len(affected) == 0 {
|
||||
log.Debug("No snapshots affected by changes")
|
||||
return nil
|
||||
}
|
||||
runOpts.Snapshots = affected
|
||||
log.Info("Running incremental backup for affected snapshots", "snapshots", affected)
|
||||
}
|
||||
// fullScan: leave runOpts.Snapshots empty → CreateSnapshot processes all.
|
||||
|
||||
// Use a child context so cancellation propagates but we can still finish
|
||||
// if the parent hasn't been cancelled.
|
||||
childCtx, childCancel := context.WithCancel(ctx)
|
||||
defer childCancel()
|
||||
|
||||
// Temporarily swap the Vaultik context.
|
||||
origCtx := v.ctx
|
||||
v.ctx = childCtx
|
||||
defer func() { v.ctx = origCtx }()
|
||||
|
||||
if err := v.CreateSnapshot(runOpts); err != nil {
|
||||
return fmt.Errorf("backup run failed: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Daemon backup complete", "duration", time.Since(startTime))
|
||||
v.printfStdout("Backup complete in %s\n", formatDuration(time.Since(startTime)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// snapshotsAffectedByChanges returns the names of configured snapshots whose
|
||||
// paths overlap with any of the changed paths.
|
||||
func (v *Vaultik) snapshotsAffectedByChanges(changedPaths []string) []string {
|
||||
var affected []string
|
||||
for _, snapName := range v.Config.SnapshotNames() {
|
||||
snapCfg := v.Config.Snapshots[snapName]
|
||||
for _, snapPath := range snapCfg.Paths {
|
||||
absSnapPath, err := filepath.Abs(snapPath)
|
||||
if err != nil {
|
||||
absSnapPath = snapPath
|
||||
}
|
||||
for _, changed := range changedPaths {
|
||||
if isSubpath(changed, absSnapPath) {
|
||||
affected = append(affected, snapName)
|
||||
goto nextSnapshot
|
||||
}
|
||||
}
|
||||
}
|
||||
nextSnapshot:
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// isSubpath returns true if child is under parent (or equal to it).
|
||||
func isSubpath(child, parent string) bool {
|
||||
// Normalize both paths.
|
||||
child = filepath.Clean(child)
|
||||
parent = filepath.Clean(parent)
|
||||
if child == parent {
|
||||
return true
|
||||
}
|
||||
// Ensure parent ends with a separator for prefix matching,
|
||||
// unless parent is the root directory (which already ends with /).
|
||||
prefix := parent
|
||||
if !strings.HasSuffix(prefix, string(filepath.Separator)) {
|
||||
prefix += string(filepath.Separator)
|
||||
}
|
||||
return strings.HasPrefix(child, prefix)
|
||||
}
|
||||
|
||||
// startWatcher creates an fsnotify watcher and adds all configured snapshot paths.
|
||||
// It spawns a goroutine that reads events and feeds the change tracker.
|
||||
func (v *Vaultik) startWatcher(ctx context.Context, tracker *changeTracker) (*fsnotify.Watcher, error) {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating watcher: %w", err)
|
||||
}
|
||||
|
||||
// Collect unique absolute paths to watch.
|
||||
watchPaths := make(map[string]struct{})
|
||||
for _, snapName := range v.Config.SnapshotNames() {
|
||||
snapCfg := v.Config.Snapshots[snapName]
|
||||
for _, p := range snapCfg.Paths {
|
||||
absPath, err := filepath.Abs(p)
|
||||
if err != nil {
|
||||
log.Warn("Failed to resolve absolute path for watch", "path", p, "error", err)
|
||||
continue
|
||||
}
|
||||
watchPaths[absPath] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Add paths to watcher. Walk the top-level to add subdirectories
|
||||
// since fsnotify doesn't recurse automatically.
|
||||
for p := range watchPaths {
|
||||
if err := v.addWatchRecursive(watcher, p); err != nil {
|
||||
log.Warn("Failed to watch path", "path", p, "error", err)
|
||||
// Non-fatal: the path might not exist yet.
|
||||
}
|
||||
}
|
||||
|
||||
// Spawn the event reader goroutine.
|
||||
go v.watcherLoop(ctx, watcher, tracker)
|
||||
|
||||
return watcher, nil
|
||||
}
|
||||
|
||||
// addWatchRecursive walks a directory tree and adds each directory to the watcher.
|
||||
func (v *Vaultik) addWatchRecursive(watcher *fsnotify.Watcher, root string) error {
|
||||
return filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
// Can't read — skip this subtree.
|
||||
if info != nil && info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
// Skip common directories that don't need watching.
|
||||
base := filepath.Base(path)
|
||||
if base == ".git" || base == "node_modules" || base == "__pycache__" {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
if err := watcher.Add(path); err != nil {
|
||||
log.Debug("Failed to watch directory", "path", path, "error", err)
|
||||
// Non-fatal: continue walking.
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// watcherLoop reads filesystem events from the watcher and records them
|
||||
// in the change tracker. It runs until the context is cancelled.
|
||||
func (v *Vaultik) watcherLoop(ctx context.Context, watcher *fsnotify.Watcher, tracker *changeTracker) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case event, ok := <-watcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Only track write/create/remove/rename events.
|
||||
if event.Op&(fsnotify.Write|fsnotify.Create|fsnotify.Remove|fsnotify.Rename) != 0 {
|
||||
tracker.recordChange(event.Name)
|
||||
log.Debug("Filesystem change detected", "path", event.Name, "op", event.Op)
|
||||
}
|
||||
// If a new directory was created, watch it too.
|
||||
if event.Op&fsnotify.Create != 0 {
|
||||
if info, err := os.Stat(event.Name); err == nil && info.IsDir() {
|
||||
if err := v.addWatchRecursive(watcher, event.Name); err != nil {
|
||||
log.Debug("Failed to watch new directory", "path", event.Name, "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Warn("Filesystem watcher error", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// changeTracker records filesystem paths that have been modified since the
|
||||
// last backup. It is safe for concurrent use.
|
||||
type changeTracker struct {
|
||||
mu sync.Mutex
|
||||
changes map[string]time.Time // path → last change time
|
||||
}
|
||||
|
||||
// newChangeTracker creates a new empty change tracker.
|
||||
func newChangeTracker() *changeTracker {
|
||||
return &changeTracker{
|
||||
changes: make(map[string]time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
// recordChange records that a path has been modified.
|
||||
func (ct *changeTracker) recordChange(path string) {
|
||||
ct.mu.Lock()
|
||||
ct.changes[path] = time.Now()
|
||||
ct.mu.Unlock()
|
||||
}
|
||||
|
||||
// hasChanges returns true if any changes have been recorded.
|
||||
func (ct *changeTracker) hasChanges() bool {
|
||||
ct.mu.Lock()
|
||||
defer ct.mu.Unlock()
|
||||
return len(ct.changes) > 0
|
||||
}
|
||||
|
||||
// changeCount returns the number of unique changed paths.
|
||||
func (ct *changeTracker) changeCount() int {
|
||||
ct.mu.Lock()
|
||||
defer ct.mu.Unlock()
|
||||
return len(ct.changes)
|
||||
}
|
||||
|
||||
// changedPaths returns all changed paths.
|
||||
func (ct *changeTracker) changedPaths() []string {
|
||||
ct.mu.Lock()
|
||||
defer ct.mu.Unlock()
|
||||
paths := make([]string, 0, len(ct.changes))
|
||||
for p := range ct.changes {
|
||||
paths = append(paths, p)
|
||||
}
|
||||
return paths
|
||||
}
|
||||
|
||||
// reset clears all recorded changes.
|
||||
func (ct *changeTracker) reset() {
|
||||
ct.mu.Lock()
|
||||
ct.changes = make(map[string]time.Time)
|
||||
ct.mu.Unlock()
|
||||
}
|
||||
196
internal/vaultik/daemon_test.go
Normal file
196
internal/vaultik/daemon_test.go
Normal file
@@ -0,0 +1,196 @@
|
||||
package vaultik
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.eeqj.de/sneak/vaultik/internal/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewChangeTracker(t *testing.T) {
|
||||
ct := newChangeTracker()
|
||||
require.NotNil(t, ct)
|
||||
assert.False(t, ct.hasChanges())
|
||||
assert.Equal(t, 0, ct.changeCount())
|
||||
assert.Empty(t, ct.changedPaths())
|
||||
}
|
||||
|
||||
func TestChangeTrackerRecordChange(t *testing.T) {
|
||||
ct := newChangeTracker()
|
||||
|
||||
ct.recordChange("/home/user/file1.txt")
|
||||
assert.True(t, ct.hasChanges())
|
||||
assert.Equal(t, 1, ct.changeCount())
|
||||
|
||||
ct.recordChange("/home/user/file2.txt")
|
||||
assert.Equal(t, 2, ct.changeCount())
|
||||
|
||||
// Duplicate path should update time but not increase count.
|
||||
ct.recordChange("/home/user/file1.txt")
|
||||
assert.Equal(t, 2, ct.changeCount())
|
||||
|
||||
paths := ct.changedPaths()
|
||||
assert.Len(t, paths, 2)
|
||||
assert.Contains(t, paths, "/home/user/file1.txt")
|
||||
assert.Contains(t, paths, "/home/user/file2.txt")
|
||||
}
|
||||
|
||||
func TestChangeTrackerReset(t *testing.T) {
|
||||
ct := newChangeTracker()
|
||||
|
||||
ct.recordChange("/home/user/file1.txt")
|
||||
ct.recordChange("/home/user/file2.txt")
|
||||
assert.Equal(t, 2, ct.changeCount())
|
||||
|
||||
ct.reset()
|
||||
assert.False(t, ct.hasChanges())
|
||||
assert.Equal(t, 0, ct.changeCount())
|
||||
assert.Empty(t, ct.changedPaths())
|
||||
}
|
||||
|
||||
func TestChangeTrackerConcurrency(t *testing.T) {
|
||||
ct := newChangeTracker()
|
||||
done := make(chan struct{})
|
||||
|
||||
// Write from multiple goroutines simultaneously.
|
||||
for i := 0; i < 10; i++ {
|
||||
go func(n int) {
|
||||
for j := 0; j < 100; j++ {
|
||||
ct.recordChange("/path/" + string(rune('a'+n)))
|
||||
}
|
||||
done <- struct{}{}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Also read concurrently.
|
||||
go func() {
|
||||
for i := 0; i < 100; i++ {
|
||||
_ = ct.hasChanges()
|
||||
_ = ct.changeCount()
|
||||
_ = ct.changedPaths()
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
|
||||
// Wait for all goroutines.
|
||||
for i := 0; i < 11; i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
assert.True(t, ct.hasChanges())
|
||||
assert.LessOrEqual(t, ct.changeCount(), 10) // 10 unique paths
|
||||
}
|
||||
|
||||
func TestChangeTrackerRecordTimestamp(t *testing.T) {
|
||||
ct := newChangeTracker()
|
||||
|
||||
before := time.Now()
|
||||
ct.recordChange("/some/path")
|
||||
after := time.Now()
|
||||
|
||||
ct.mu.Lock()
|
||||
ts := ct.changes["/some/path"]
|
||||
ct.mu.Unlock()
|
||||
|
||||
assert.False(t, ts.Before(before))
|
||||
assert.False(t, ts.After(after))
|
||||
}
|
||||
|
||||
func TestIsSubpath(t *testing.T) {
|
||||
tests := []struct {
|
||||
child string
|
||||
parent string
|
||||
expected bool
|
||||
}{
|
||||
{"/home/user/file.txt", "/home/user", true},
|
||||
{"/home/user", "/home/user", true},
|
||||
{"/home/user/deep/nested/file.txt", "/home/user", true},
|
||||
{"/home/other/file.txt", "/home/user", false},
|
||||
{"/home/username/file.txt", "/home/user", false}, // not a subpath, just prefix match
|
||||
{"/etc/config", "/home/user", false},
|
||||
{"/", "/", true},
|
||||
{"/a", "/", true},
|
||||
{"/a/b", "/a", true},
|
||||
{"/ab", "/a", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.child+"_under_"+tt.parent, func(t *testing.T) {
|
||||
result := isSubpath(tt.child, tt.parent)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSnapshotsAffectedByChanges(t *testing.T) {
|
||||
// We can't easily test this without a full Vaultik instance with config,
|
||||
// but we can verify the helper function isSubpath which it depends on.
|
||||
// The full integration is tested via the daemon integration test.
|
||||
|
||||
// Verify basic subpath logic used by snapshotsAffectedByChanges.
|
||||
assert.True(t, isSubpath("/home/user/docs/report.txt", "/home/user"))
|
||||
assert.False(t, isSubpath("/var/log/syslog", "/home/user"))
|
||||
}
|
||||
|
||||
func TestDaemonConstants(t *testing.T) {
|
||||
// Verify daemon constants are reasonable values.
|
||||
assert.GreaterOrEqual(t, daemonMinBackupInterval, 1*time.Minute)
|
||||
assert.GreaterOrEqual(t, daemonShutdownTimeout, 1*time.Minute)
|
||||
}
|
||||
|
||||
func TestRunDaemon_CancelledContext(t *testing.T) {
|
||||
// Create a temporary directory to use as a snapshot path.
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Write a file so the watched path is non-empty.
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "testfile.txt"), []byte("hello"), 0o644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Build a minimal Vaultik with daemon-friendly config.
|
||||
// RunDaemon will fail on the initial backup (no storage configured),
|
||||
// but it should continue running. We cancel the context to verify
|
||||
// graceful shutdown.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
stdout := &bytes.Buffer{}
|
||||
stderr := &bytes.Buffer{}
|
||||
|
||||
v := &Vaultik{
|
||||
Config: &config.Config{
|
||||
BackupInterval: 1 * time.Hour,
|
||||
FullScanInterval: 24 * time.Hour,
|
||||
MinTimeBetweenRun: 1 * time.Minute,
|
||||
Snapshots: map[string]config.SnapshotConfig{
|
||||
"test": {
|
||||
Paths: []string{tmpDir},
|
||||
},
|
||||
},
|
||||
},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
}
|
||||
|
||||
// Cancel the context shortly after RunDaemon starts so the daemon
|
||||
// loop exits via its ctx.Done() path.
|
||||
go func() {
|
||||
// Wait for the initial backup to fail (it will, since there's no
|
||||
// storage backend), then cancel.
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
err = v.RunDaemon(&SnapshotCreateOptions{})
|
||||
// RunDaemon should return nil on context cancellation (graceful shutdown).
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify daemon printed startup messages.
|
||||
output := stdout.String()
|
||||
assert.Contains(t, output, "Daemon mode started")
|
||||
}
|
||||
@@ -79,6 +79,22 @@ func parseSnapshotTimestamp(snapshotID string) (time.Time, error) {
|
||||
return timestamp.UTC(), nil
|
||||
}
|
||||
|
||||
// parseSnapshotName extracts the snapshot name from a snapshot ID.
|
||||
// Format: hostname_snapshotname_timestamp — the middle part(s) between hostname
|
||||
// and the RFC3339 timestamp are the snapshot name (may contain underscores).
|
||||
// Returns the snapshot name, or empty string if the ID is malformed.
|
||||
func parseSnapshotName(snapshotID string) string {
|
||||
parts := strings.Split(snapshotID, "_")
|
||||
if len(parts) < 3 {
|
||||
// Format: hostname_timestamp — no snapshot name
|
||||
return ""
|
||||
}
|
||||
// Format: hostname_name_timestamp — middle parts are the name.
|
||||
// The last part is the RFC3339 timestamp, the first part is the hostname,
|
||||
// everything in between is the snapshot name (which may itself contain underscores).
|
||||
return strings.Join(parts[1:len(parts)-1], "_")
|
||||
}
|
||||
|
||||
// parseDuration parses a duration string with support for days
|
||||
func parseDuration(s string) (time.Duration, error) {
|
||||
// Check for days suffix
|
||||
|
||||
76
internal/vaultik/helpers_test.go
Normal file
76
internal/vaultik/helpers_test.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package vaultik
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseSnapshotName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
snapshotID string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "standard format with name",
|
||||
snapshotID: "myhost_home_2026-01-12T14:41:15Z",
|
||||
want: "home",
|
||||
},
|
||||
{
|
||||
name: "standard format with different name",
|
||||
snapshotID: "server1_system_2026-02-15T09:30:00Z",
|
||||
want: "system",
|
||||
},
|
||||
{
|
||||
name: "name with underscores",
|
||||
snapshotID: "myhost_my_special_backup_2026-03-01T00:00:00Z",
|
||||
want: "my_special_backup",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := parseSnapshotName(tt.snapshotID)
|
||||
if got != tt.want {
|
||||
t.Errorf("parseSnapshotName(%q) = %q, want %q", tt.snapshotID, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSnapshotTimestamp(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
snapshotID string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid with name",
|
||||
snapshotID: "myhost_home_2026-01-12T14:41:15Z",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid without name",
|
||||
snapshotID: "myhost_2026-01-12T14:41:15Z",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid - single part",
|
||||
snapshotID: "nounderscore",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid - bad timestamp",
|
||||
snapshotID: "myhost_home_notadate",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := parseSnapshotTimestamp(tt.snapshotID)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("parseSnapshotTimestamp(%q) error = %v, wantErr %v", tt.snapshotID, err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
256
internal/vaultik/purge_per_name_test.go
Normal file
256
internal/vaultik/purge_per_name_test.go
Normal file
@@ -0,0 +1,256 @@
|
||||
package vaultik_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.eeqj.de/sneak/vaultik/internal/database"
|
||||
"git.eeqj.de/sneak/vaultik/internal/log"
|
||||
"git.eeqj.de/sneak/vaultik/internal/types"
|
||||
"git.eeqj.de/sneak/vaultik/internal/vaultik"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupPurgeTest creates a Vaultik instance with an in-memory database and mock
|
||||
// storage pre-populated with the given snapshot IDs. Each snapshot is marked as
|
||||
// completed. Remote metadata stubs are created so syncWithRemote keeps them.
|
||||
func setupPurgeTest(t *testing.T, snapshotIDs []string) *vaultik.Vaultik {
|
||||
t.Helper()
|
||||
log.Initialize(log.Config{})
|
||||
|
||||
ctx := context.Background()
|
||||
db, err := database.New(ctx, ":memory:")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = db.Close() })
|
||||
|
||||
repos := database.NewRepositories(db)
|
||||
mockStorage := NewMockStorer()
|
||||
|
||||
// Insert each snapshot into the DB and create remote metadata stubs.
|
||||
// Use timestamps parsed from snapshot IDs for realistic ordering.
|
||||
for _, id := range snapshotIDs {
|
||||
// Parse timestamp from the snapshot ID
|
||||
parts := strings.Split(id, "_")
|
||||
timestampStr := parts[len(parts)-1]
|
||||
startedAt, err := time.Parse(time.RFC3339, timestampStr)
|
||||
require.NoError(t, err, "parsing timestamp from snapshot ID %q", id)
|
||||
|
||||
completedAt := startedAt.Add(5 * time.Minute)
|
||||
snap := &database.Snapshot{
|
||||
ID: types.SnapshotID(id),
|
||||
Hostname: "testhost",
|
||||
VaultikVersion: "test",
|
||||
StartedAt: startedAt,
|
||||
CompletedAt: &completedAt,
|
||||
}
|
||||
err = repos.WithTx(ctx, func(ctx context.Context, tx *sql.Tx) error {
|
||||
return repos.Snapshots.Create(ctx, tx, snap)
|
||||
})
|
||||
require.NoError(t, err, "creating snapshot %s", id)
|
||||
|
||||
// Create remote metadata stub so syncWithRemote keeps it
|
||||
metadataKey := "metadata/" + id + "/manifest.json.zst"
|
||||
err = mockStorage.Put(ctx, metadataKey, strings.NewReader("stub"))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
stdout := &bytes.Buffer{}
|
||||
stderr := &bytes.Buffer{}
|
||||
stdin := &bytes.Buffer{}
|
||||
|
||||
v := &vaultik.Vaultik{
|
||||
Storage: mockStorage,
|
||||
Repositories: repos,
|
||||
DB: db,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
Stdin: stdin,
|
||||
}
|
||||
v.SetContext(ctx)
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// listRemainingSnapshots returns IDs of all completed snapshots in the database.
|
||||
func listRemainingSnapshots(t *testing.T, v *vaultik.Vaultik) []string {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
dbSnaps, err := v.Repositories.Snapshots.ListRecent(ctx, 10000)
|
||||
require.NoError(t, err)
|
||||
|
||||
var ids []string
|
||||
for _, s := range dbSnaps {
|
||||
if s.CompletedAt != nil {
|
||||
ids = append(ids, s.ID.String())
|
||||
}
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
func TestPurgeKeepLatest_PerName(t *testing.T) {
|
||||
// Create snapshots for two different names: "home" and "system".
|
||||
// With per-name --keep-latest, the latest of each should be kept.
|
||||
snapshotIDs := []string{
|
||||
"testhost_system_2026-01-01T00:00:00Z",
|
||||
"testhost_home_2026-01-01T01:00:00Z",
|
||||
"testhost_system_2026-01-01T02:00:00Z",
|
||||
"testhost_home_2026-01-01T03:00:00Z",
|
||||
"testhost_system_2026-01-01T04:00:00Z",
|
||||
}
|
||||
|
||||
v := setupPurgeTest(t, snapshotIDs)
|
||||
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
remaining := listRemainingSnapshots(t, v)
|
||||
|
||||
// Should keep the latest of each name
|
||||
assert.Len(t, remaining, 2, "should keep exactly 2 snapshots (one per name)")
|
||||
assert.Contains(t, remaining, "testhost_system_2026-01-01T04:00:00Z", "should keep latest system")
|
||||
assert.Contains(t, remaining, "testhost_home_2026-01-01T03:00:00Z", "should keep latest home")
|
||||
}
|
||||
|
||||
func TestPurgeKeepLatest_SingleName(t *testing.T) {
|
||||
// All snapshots have the same name — keep-latest should keep exactly one.
|
||||
snapshotIDs := []string{
|
||||
"testhost_home_2026-01-01T00:00:00Z",
|
||||
"testhost_home_2026-01-01T01:00:00Z",
|
||||
"testhost_home_2026-01-01T02:00:00Z",
|
||||
}
|
||||
|
||||
v := setupPurgeTest(t, snapshotIDs)
|
||||
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
remaining := listRemainingSnapshots(t, v)
|
||||
assert.Len(t, remaining, 1)
|
||||
assert.Contains(t, remaining, "testhost_home_2026-01-01T02:00:00Z", "should keep the newest")
|
||||
}
|
||||
|
||||
func TestPurgeKeepLatest_WithNameFilter(t *testing.T) {
|
||||
// Use --name to filter purge to only "home" snapshots.
|
||||
// "system" snapshots should be untouched.
|
||||
snapshotIDs := []string{
|
||||
"testhost_system_2026-01-01T00:00:00Z",
|
||||
"testhost_home_2026-01-01T01:00:00Z",
|
||||
"testhost_system_2026-01-01T02:00:00Z",
|
||||
"testhost_home_2026-01-01T03:00:00Z",
|
||||
"testhost_home_2026-01-01T04:00:00Z",
|
||||
}
|
||||
|
||||
v := setupPurgeTest(t, snapshotIDs)
|
||||
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
Name: "home",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
remaining := listRemainingSnapshots(t, v)
|
||||
|
||||
// 2 system snapshots untouched + 1 latest home = 3
|
||||
assert.Len(t, remaining, 3)
|
||||
assert.Contains(t, remaining, "testhost_system_2026-01-01T00:00:00Z")
|
||||
assert.Contains(t, remaining, "testhost_system_2026-01-01T02:00:00Z")
|
||||
assert.Contains(t, remaining, "testhost_home_2026-01-01T04:00:00Z")
|
||||
}
|
||||
|
||||
func TestPurgeKeepLatest_NoSnapshots(t *testing.T) {
|
||||
v := setupPurgeTest(t, nil)
|
||||
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPurgeKeepLatest_NameFilterNoMatch(t *testing.T) {
|
||||
snapshotIDs := []string{
|
||||
"testhost_system_2026-01-01T00:00:00Z",
|
||||
"testhost_system_2026-01-01T01:00:00Z",
|
||||
}
|
||||
|
||||
v := setupPurgeTest(t, snapshotIDs)
|
||||
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
Name: "nonexistent",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// All snapshots should remain — the name filter matched nothing
|
||||
remaining := listRemainingSnapshots(t, v)
|
||||
assert.Len(t, remaining, 2)
|
||||
}
|
||||
|
||||
func TestPurgeOlderThan_WithNameFilter(t *testing.T) {
|
||||
// Snapshots with different names and timestamps.
|
||||
// --older-than should apply only to the named subset when --name is used.
|
||||
snapshotIDs := []string{
|
||||
"testhost_system_2020-01-01T00:00:00Z",
|
||||
"testhost_home_2020-01-01T00:00:00Z",
|
||||
"testhost_system_2026-01-01T00:00:00Z",
|
||||
"testhost_home_2026-01-01T00:00:00Z",
|
||||
}
|
||||
|
||||
v := setupPurgeTest(t, snapshotIDs)
|
||||
|
||||
// Purge only "home" snapshots older than 365 days
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
OlderThan: "365d",
|
||||
Force: true,
|
||||
Name: "home",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
remaining := listRemainingSnapshots(t, v)
|
||||
|
||||
// Old system stays (not filtered by name), old home deleted, recent ones stay
|
||||
assert.Len(t, remaining, 3)
|
||||
assert.Contains(t, remaining, "testhost_system_2020-01-01T00:00:00Z")
|
||||
assert.Contains(t, remaining, "testhost_system_2026-01-01T00:00:00Z")
|
||||
assert.Contains(t, remaining, "testhost_home_2026-01-01T00:00:00Z")
|
||||
}
|
||||
|
||||
func TestPurgeKeepLatest_ThreeNames(t *testing.T) {
|
||||
// Three different snapshot names with multiple snapshots each.
|
||||
snapshotIDs := []string{
|
||||
"testhost_home_2026-01-01T00:00:00Z",
|
||||
"testhost_system_2026-01-01T01:00:00Z",
|
||||
"testhost_media_2026-01-01T02:00:00Z",
|
||||
"testhost_home_2026-01-01T03:00:00Z",
|
||||
"testhost_system_2026-01-01T04:00:00Z",
|
||||
"testhost_media_2026-01-01T05:00:00Z",
|
||||
"testhost_home_2026-01-01T06:00:00Z",
|
||||
}
|
||||
|
||||
v := setupPurgeTest(t, snapshotIDs)
|
||||
|
||||
err := v.PurgeSnapshotsWithOptions(&vaultik.SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
remaining := listRemainingSnapshots(t, v)
|
||||
assert.Len(t, remaining, 3, "should keep one per name")
|
||||
assert.Contains(t, remaining, "testhost_home_2026-01-01T06:00:00Z")
|
||||
assert.Contains(t, remaining, "testhost_system_2026-01-01T04:00:00Z")
|
||||
assert.Contains(t, remaining, "testhost_media_2026-01-01T05:00:00Z")
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
@@ -16,6 +17,7 @@ import (
|
||||
"git.eeqj.de/sneak/vaultik/internal/snapshot"
|
||||
"git.eeqj.de/sneak/vaultik/internal/types"
|
||||
"github.com/dustin/go-humanize"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// SnapshotCreateOptions contains options for the snapshot create command
|
||||
@@ -56,9 +58,7 @@ func (v *Vaultik) CreateSnapshot(opts *SnapshotCreateOptions) error {
|
||||
}
|
||||
|
||||
if opts.Daemon {
|
||||
log.Info("Running in daemon mode")
|
||||
// TODO: Implement daemon mode with inotify
|
||||
return fmt.Errorf("daemon mode not yet implemented")
|
||||
return v.RunDaemon(opts)
|
||||
}
|
||||
|
||||
// Determine which snapshots to process
|
||||
@@ -95,7 +95,10 @@ func (v *Vaultik) CreateSnapshot(opts *SnapshotCreateOptions) error {
|
||||
log.Info("Pruning enabled - deleting old snapshots and unreferenced blobs")
|
||||
v.printlnStdout("\nPruning old snapshots (keeping latest)...")
|
||||
|
||||
if err := v.PurgeSnapshots(true, "", true); err != nil {
|
||||
if err := v.PurgeSnapshotsWithOptions(&SnapshotPurgeOptions{
|
||||
KeepLatest: true,
|
||||
Force: true,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("prune: purging old snapshots: %w", err)
|
||||
}
|
||||
|
||||
@@ -438,6 +441,9 @@ func (v *Vaultik) reconcileLocalWithRemote(remoteSnapshots map[string]bool) (map
|
||||
func (v *Vaultik) buildSnapshotInfoList(remoteSnapshots map[string]bool, localSnapshotMap map[string]*database.Snapshot) ([]SnapshotInfo, error) {
|
||||
snapshots := make([]SnapshotInfo, 0, len(remoteSnapshots))
|
||||
|
||||
// remoteOnly collects snapshot IDs that need a manifest download.
|
||||
var remoteOnly []string
|
||||
|
||||
for snapshotID := range remoteSnapshots {
|
||||
if localSnap, exists := localSnapshotMap[snapshotID]; exists && localSnap.CompletedAt != nil {
|
||||
totalSize, err := v.Repositories.Snapshots.GetSnapshotTotalCompressedSize(v.ctx, snapshotID)
|
||||
@@ -458,16 +464,73 @@ func (v *Vaultik) buildSnapshotInfoList(remoteSnapshots map[string]bool, localSn
|
||||
continue
|
||||
}
|
||||
|
||||
totalSize, err := v.getManifestSize(snapshotID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get manifest size for %s: %w", snapshotID, err)
|
||||
}
|
||||
|
||||
// Pre-add with zero size; will be filled by concurrent downloads.
|
||||
snapshots = append(snapshots, SnapshotInfo{
|
||||
ID: types.SnapshotID(snapshotID),
|
||||
Timestamp: timestamp,
|
||||
CompressedSize: totalSize,
|
||||
CompressedSize: 0,
|
||||
})
|
||||
remoteOnly = append(remoteOnly, snapshotID)
|
||||
}
|
||||
}
|
||||
|
||||
// Download manifests concurrently for remote-only snapshots.
|
||||
if len(remoteOnly) > 0 {
|
||||
// maxConcurrentManifestDownloads bounds parallel manifest fetches to
|
||||
// avoid overwhelming the S3 endpoint while still being much faster
|
||||
// than serial downloads.
|
||||
const maxConcurrentManifestDownloads = 10
|
||||
|
||||
type manifestResult struct {
|
||||
snapshotID string
|
||||
size int64
|
||||
}
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
results []manifestResult
|
||||
)
|
||||
|
||||
g, gctx := errgroup.WithContext(v.ctx)
|
||||
g.SetLimit(maxConcurrentManifestDownloads)
|
||||
|
||||
for _, sid := range remoteOnly {
|
||||
g.Go(func() error {
|
||||
manifestPath := fmt.Sprintf("metadata/%s/manifest.json.zst", sid)
|
||||
reader, err := v.Storage.Get(gctx, manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("downloading manifest for %s: %w", sid, err)
|
||||
}
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
manifest, err := snapshot.DecodeManifest(reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding manifest for %s: %w", sid, err)
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
results = append(results, manifestResult{
|
||||
snapshotID: sid,
|
||||
size: manifest.TotalCompressedSize,
|
||||
})
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return nil, fmt.Errorf("fetching manifest sizes: %w", err)
|
||||
}
|
||||
|
||||
// Build a lookup from results and patch the pre-added entries.
|
||||
sizeMap := make(map[string]int64, len(results))
|
||||
for _, r := range results {
|
||||
sizeMap[r.snapshotID] = r.size
|
||||
}
|
||||
for i := range snapshots {
|
||||
if sz, ok := sizeMap[string(snapshots[i].ID)]; ok {
|
||||
snapshots[i].CompressedSize = sz
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -520,8 +583,19 @@ func (v *Vaultik) printSnapshotTable(snapshots []SnapshotInfo) error {
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
// PurgeSnapshots removes old snapshots based on criteria
|
||||
func (v *Vaultik) PurgeSnapshots(keepLatest bool, olderThan string, force bool) error {
|
||||
// SnapshotPurgeOptions contains options for the snapshot purge command
|
||||
type SnapshotPurgeOptions struct {
|
||||
KeepLatest bool
|
||||
OlderThan string
|
||||
Force bool
|
||||
Name string // Filter purge to a specific snapshot name
|
||||
}
|
||||
|
||||
// PurgeSnapshotsWithOptions removes old snapshots based on criteria.
|
||||
// When KeepLatest is true, retention is applied per snapshot name — the latest
|
||||
// snapshot for each distinct name is kept. If Name is non-empty, only snapshots
|
||||
// matching that name are considered for purge.
|
||||
func (v *Vaultik) PurgeSnapshotsWithOptions(opts *SnapshotPurgeOptions) error {
|
||||
// Sync with remote first
|
||||
if err := v.syncWithRemote(); err != nil {
|
||||
return fmt.Errorf("syncing with remote: %w", err)
|
||||
@@ -545,14 +619,51 @@ func (v *Vaultik) PurgeSnapshots(keepLatest bool, olderThan string, force bool)
|
||||
}
|
||||
}
|
||||
|
||||
// If --name is specified, filter to only snapshots matching that name
|
||||
if opts.Name != "" {
|
||||
filtered := make([]SnapshotInfo, 0, len(snapshots))
|
||||
for _, snap := range snapshots {
|
||||
if parseSnapshotName(snap.ID.String()) == opts.Name {
|
||||
filtered = append(filtered, snap)
|
||||
}
|
||||
}
|
||||
snapshots = filtered
|
||||
}
|
||||
|
||||
// Sort by timestamp (newest first)
|
||||
sort.Slice(snapshots, func(i, j int) bool {
|
||||
return snapshots[i].Timestamp.After(snapshots[j].Timestamp)
|
||||
})
|
||||
|
||||
toDelete, err := v.collectSnapshotsToPurge(snapshots, keepLatest, olderThan)
|
||||
if err != nil {
|
||||
return err
|
||||
var toDelete []SnapshotInfo
|
||||
|
||||
if opts.KeepLatest {
|
||||
// Keep the latest snapshot per snapshot name
|
||||
// Group snapshots by name, then mark all but the newest in each group
|
||||
latestByName := make(map[string]bool) // tracks whether we've seen the latest for each name
|
||||
for _, snap := range snapshots {
|
||||
name := parseSnapshotName(snap.ID.String())
|
||||
if latestByName[name] {
|
||||
// Already kept the latest for this name — delete this one
|
||||
toDelete = append(toDelete, snap)
|
||||
} else {
|
||||
// This is the latest (sorted newest-first) — keep it
|
||||
latestByName[name] = true
|
||||
}
|
||||
}
|
||||
} else if opts.OlderThan != "" {
|
||||
// Parse duration
|
||||
duration, err := parseDuration(opts.OlderThan)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid duration: %w", err)
|
||||
}
|
||||
|
||||
cutoff := time.Now().UTC().Add(-duration)
|
||||
for _, snap := range snapshots {
|
||||
if snap.Timestamp.Before(cutoff) {
|
||||
toDelete = append(toDelete, snap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(toDelete) == 0 {
|
||||
@@ -560,37 +671,7 @@ func (v *Vaultik) PurgeSnapshots(keepLatest bool, olderThan string, force bool)
|
||||
return nil
|
||||
}
|
||||
|
||||
return v.confirmAndExecutePurge(toDelete, force)
|
||||
}
|
||||
|
||||
// collectSnapshotsToPurge determines which snapshots to delete based on retention criteria
|
||||
func (v *Vaultik) collectSnapshotsToPurge(snapshots []SnapshotInfo, keepLatest bool, olderThan string) ([]SnapshotInfo, error) {
|
||||
if keepLatest {
|
||||
// Keep only the most recent snapshot
|
||||
if len(snapshots) > 1 {
|
||||
return snapshots[1:], nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if olderThan != "" {
|
||||
// Parse duration
|
||||
duration, err := parseDuration(olderThan)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid duration: %w", err)
|
||||
}
|
||||
|
||||
cutoff := time.Now().UTC().Add(-duration)
|
||||
var toDelete []SnapshotInfo
|
||||
for _, snap := range snapshots {
|
||||
if snap.Timestamp.Before(cutoff) {
|
||||
toDelete = append(toDelete, snap)
|
||||
}
|
||||
}
|
||||
return toDelete, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return v.confirmAndExecutePurge(toDelete, opts.Force)
|
||||
}
|
||||
|
||||
// confirmAndExecutePurge shows deletion candidates, confirms with user, and deletes snapshots
|
||||
@@ -788,23 +869,6 @@ func (v *Vaultik) outputVerifyJSON(result *VerifyResult) error {
|
||||
|
||||
// Helper methods that were previously on SnapshotApp
|
||||
|
||||
func (v *Vaultik) getManifestSize(snapshotID string) (int64, error) {
|
||||
manifestPath := fmt.Sprintf("metadata/%s/manifest.json.zst", snapshotID)
|
||||
|
||||
reader, err := v.Storage.Get(v.ctx, manifestPath)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("downloading manifest: %w", err)
|
||||
}
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
manifest, err := snapshot.DecodeManifest(reader)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("decoding manifest: %w", err)
|
||||
}
|
||||
|
||||
return manifest.TotalCompressedSize, nil
|
||||
}
|
||||
|
||||
func (v *Vaultik) downloadManifest(snapshotID string) (*snapshot.Manifest, error) {
|
||||
manifestPath := fmt.Sprintf("metadata/%s/manifest.json.zst", snapshotID)
|
||||
|
||||
@@ -988,6 +1052,7 @@ func (v *Vaultik) listAllRemoteSnapshotIDs() ([]string, error) {
|
||||
log.Info("Listing all snapshots")
|
||||
objectCh := v.Storage.ListStream(v.ctx, "metadata/")
|
||||
|
||||
seen := make(map[string]bool)
|
||||
var snapshotIDs []string
|
||||
for object := range objectCh {
|
||||
if object.Err != nil {
|
||||
@@ -1002,14 +1067,8 @@ func (v *Vaultik) listAllRemoteSnapshotIDs() ([]string, error) {
|
||||
}
|
||||
if strings.HasSuffix(object.Key, "/") || strings.Contains(object.Key, "/manifest.json.zst") {
|
||||
sid := parts[1]
|
||||
found := false
|
||||
for _, id := range snapshotIDs {
|
||||
if id == sid {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
if !seen[sid] {
|
||||
seen[sid] = true
|
||||
snapshotIDs = append(snapshotIDs, sid)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user