Add pluggable storage backend, PID locking, and improved scan progress

Storage backend:
- Add internal/storage package with Storer interface
- Implement FileStorer for local filesystem storage (file:// URLs)
- Implement S3Storer wrapping existing s3.Client
- Support storage_url config field (s3:// or file://)
- Migrate all consumers to use storage.Storer interface

PID locking:
- Add internal/pidlock package to prevent concurrent instances
- Acquire lock before app start, release on exit
- Detect stale locks from crashed processes

Scan progress improvements:
- Add fast file enumeration pass before stat() phase
- Use enumerated set for deletion detection (no extra filesystem access)
- Show progress with percentage, files/sec, elapsed time, and ETA
- Change "changed" to "changed/new" for clarity

Config improvements:
- Add tilde expansion for paths (~/)
- Use xdg library for platform-specific default index path
This commit is contained in:
2025-12-19 11:52:51 +07:00
parent cda0cf865a
commit badc0c07e0
22 changed files with 1245 additions and 188 deletions

View File

@@ -7,14 +7,14 @@ import (
"time"
"git.eeqj.de/sneak/vaultik/internal/log"
"git.eeqj.de/sneak/vaultik/internal/s3"
"git.eeqj.de/sneak/vaultik/internal/storage"
"github.com/spf13/cobra"
"go.uber.org/fx"
)
// StoreApp contains dependencies for store commands
type StoreApp struct {
S3Client *s3.Client
Storage storage.Storer
Shutdowner fx.Shutdowner
}
@@ -48,19 +48,18 @@ func newStoreInfoCommand() *cobra.Command {
// Info displays storage information
func (app *StoreApp) Info(ctx context.Context) error {
// Get bucket info
bucketName := app.S3Client.BucketName()
endpoint := app.S3Client.Endpoint()
// Get storage info
storageInfo := app.Storage.Info()
fmt.Printf("Storage Information\n")
fmt.Printf("==================\n\n")
fmt.Printf("S3 Configuration:\n")
fmt.Printf(" Endpoint: %s\n", endpoint)
fmt.Printf(" Bucket: %s\n\n", bucketName)
fmt.Printf("Storage Configuration:\n")
fmt.Printf(" Type: %s\n", storageInfo.Type)
fmt.Printf(" Location: %s\n\n", storageInfo.Location)
// Count snapshots by listing metadata/ prefix
snapshotCount := 0
snapshotCh := app.S3Client.ListObjectsStream(ctx, "metadata/", true)
snapshotCh := app.Storage.ListStream(ctx, "metadata/")
snapshotDirs := make(map[string]bool)
for object := range snapshotCh {
@@ -79,7 +78,7 @@ func (app *StoreApp) Info(ctx context.Context) error {
blobCount := 0
var totalSize int64
blobCh := app.S3Client.ListObjectsStream(ctx, "blobs/", false)
blobCh := app.Storage.ListStream(ctx, "blobs/")
for object := range blobCh {
if object.Err != nil {
return fmt.Errorf("listing blobs: %w", object.Err)
@@ -130,10 +129,9 @@ func runWithApp(ctx context.Context, fn func(*StoreApp) error) error {
Debug: rootFlags.Debug,
},
Modules: []fx.Option{
s3.Module,
fx.Provide(func(s3Client *s3.Client, shutdowner fx.Shutdowner) *StoreApp {
fx.Provide(func(storer storage.Storer, shutdowner fx.Shutdowner) *StoreApp {
return &StoreApp{
S3Client: s3Client,
Storage: storer,
Shutdowner: shutdowner,
}
}),