Add pluggable storage backend, PID locking, and improved scan progress
Storage backend: - Add internal/storage package with Storer interface - Implement FileStorer for local filesystem storage (file:// URLs) - Implement S3Storer wrapping existing s3.Client - Support storage_url config field (s3:// or file://) - Migrate all consumers to use storage.Storer interface PID locking: - Add internal/pidlock package to prevent concurrent instances - Acquire lock before app start, release on exit - Detect stale locks from crashed processes Scan progress improvements: - Add fast file enumeration pass before stat() phase - Use enumerated set for deletion detection (no extra filesystem access) - Show progress with percentage, files/sec, elapsed time, and ETA - Change "changed" to "changed/new" for clarity Config improvements: - Add tilde expansion for paths (~/) - Use xdg library for platform-specific default index path
This commit is contained in:
@@ -3,16 +3,43 @@ package config
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.eeqj.de/sneak/smartconfig"
|
||||
"github.com/adrg/xdg"
|
||||
"go.uber.org/fx"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const appName = "berlin.sneak.app.vaultik"
|
||||
|
||||
// expandTilde expands ~ at the start of a path to the user's home directory.
|
||||
func expandTilde(path string) string {
|
||||
if path == "~" {
|
||||
home, _ := os.UserHomeDir()
|
||||
return home
|
||||
}
|
||||
if strings.HasPrefix(path, "~/") {
|
||||
home, _ := os.UserHomeDir()
|
||||
return filepath.Join(home, path[2:])
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// expandTildeInURL expands ~ in file:// URLs.
|
||||
func expandTildeInURL(url string) string {
|
||||
if strings.HasPrefix(url, "file://~/") {
|
||||
home, _ := os.UserHomeDir()
|
||||
return "file://" + filepath.Join(home, url[9:])
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
// Config represents the application configuration for Vaultik.
|
||||
// It defines all settings for backup operations, including source directories,
|
||||
// encryption recipients, S3 storage configuration, and performance tuning parameters.
|
||||
// encryption recipients, storage configuration, and performance tuning parameters.
|
||||
// Configuration is typically loaded from a YAML file.
|
||||
type Config struct {
|
||||
AgeRecipients []string `yaml:"age_recipients"`
|
||||
@@ -28,6 +55,14 @@ type Config struct {
|
||||
S3 S3Config `yaml:"s3"`
|
||||
SourceDirs []string `yaml:"source_dirs"`
|
||||
CompressionLevel int `yaml:"compression_level"`
|
||||
|
||||
// StorageURL specifies the storage backend using a URL format.
|
||||
// Takes precedence over S3Config if set.
|
||||
// Supported formats:
|
||||
// - s3://bucket/prefix?endpoint=host®ion=us-east-1
|
||||
// - file:///path/to/backup
|
||||
// For S3 URLs, credentials are still read from s3.access_key_id and s3.secret_access_key.
|
||||
StorageURL string `yaml:"storage_url"`
|
||||
}
|
||||
|
||||
// S3Config represents S3 storage configuration for backup storage.
|
||||
@@ -84,7 +119,7 @@ func Load(path string) (*Config, error) {
|
||||
BackupInterval: 1 * time.Hour,
|
||||
FullScanInterval: 24 * time.Hour,
|
||||
MinTimeBetweenRun: 15 * time.Minute,
|
||||
IndexPath: "/var/lib/vaultik/index.sqlite",
|
||||
IndexPath: filepath.Join(xdg.DataHome, appName, "index.sqlite"),
|
||||
CompressionLevel: 3,
|
||||
}
|
||||
|
||||
@@ -99,9 +134,16 @@ func Load(path string) (*Config, error) {
|
||||
return nil, fmt.Errorf("failed to parse config: %w", err)
|
||||
}
|
||||
|
||||
// Expand tilde in all path fields
|
||||
cfg.IndexPath = expandTilde(cfg.IndexPath)
|
||||
cfg.StorageURL = expandTildeInURL(cfg.StorageURL)
|
||||
for i, dir := range cfg.SourceDirs {
|
||||
cfg.SourceDirs[i] = expandTilde(dir)
|
||||
}
|
||||
|
||||
// Check for environment variable override for IndexPath
|
||||
if envIndexPath := os.Getenv("VAULTIK_INDEX_PATH"); envIndexPath != "" {
|
||||
cfg.IndexPath = envIndexPath
|
||||
cfg.IndexPath = expandTilde(envIndexPath)
|
||||
}
|
||||
|
||||
// Get hostname if not set
|
||||
@@ -132,7 +174,7 @@ func Load(path string) (*Config, error) {
|
||||
// It ensures all required fields are present and have valid values:
|
||||
// - At least one age recipient must be specified
|
||||
// - At least one source directory must be configured
|
||||
// - S3 credentials and endpoint must be provided
|
||||
// - Storage must be configured (either storage_url or s3.* fields)
|
||||
// - Chunk size must be at least 1MB
|
||||
// - Blob size limit must be at least the chunk size
|
||||
// - Compression level must be between 1 and 19
|
||||
@@ -146,20 +188,9 @@ func (c *Config) Validate() error {
|
||||
return fmt.Errorf("at least one source directory is required")
|
||||
}
|
||||
|
||||
if c.S3.Endpoint == "" {
|
||||
return fmt.Errorf("s3.endpoint is required")
|
||||
}
|
||||
|
||||
if c.S3.Bucket == "" {
|
||||
return fmt.Errorf("s3.bucket is required")
|
||||
}
|
||||
|
||||
if c.S3.AccessKeyID == "" {
|
||||
return fmt.Errorf("s3.access_key_id is required")
|
||||
}
|
||||
|
||||
if c.S3.SecretAccessKey == "" {
|
||||
return fmt.Errorf("s3.secret_access_key is required")
|
||||
// Validate storage configuration
|
||||
if err := c.validateStorage(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.ChunkSize.Int64() < 1024*1024 { // 1MB minimum
|
||||
@@ -177,6 +208,50 @@ func (c *Config) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateStorage validates storage configuration.
|
||||
// If StorageURL is set, it takes precedence. S3 URLs require credentials.
|
||||
// File URLs don't require any S3 configuration.
|
||||
// If StorageURL is not set, legacy S3 configuration is required.
|
||||
func (c *Config) validateStorage() error {
|
||||
if c.StorageURL != "" {
|
||||
// URL-based configuration
|
||||
if strings.HasPrefix(c.StorageURL, "file://") {
|
||||
// File storage doesn't need S3 credentials
|
||||
return nil
|
||||
}
|
||||
if strings.HasPrefix(c.StorageURL, "s3://") {
|
||||
// S3 storage needs credentials
|
||||
if c.S3.AccessKeyID == "" {
|
||||
return fmt.Errorf("s3.access_key_id is required for s3:// URLs")
|
||||
}
|
||||
if c.S3.SecretAccessKey == "" {
|
||||
return fmt.Errorf("s3.secret_access_key is required for s3:// URLs")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("storage_url must start with s3:// or file://")
|
||||
}
|
||||
|
||||
// Legacy S3 configuration
|
||||
if c.S3.Endpoint == "" {
|
||||
return fmt.Errorf("s3.endpoint is required (or set storage_url)")
|
||||
}
|
||||
|
||||
if c.S3.Bucket == "" {
|
||||
return fmt.Errorf("s3.bucket is required (or set storage_url)")
|
||||
}
|
||||
|
||||
if c.S3.AccessKeyID == "" {
|
||||
return fmt.Errorf("s3.access_key_id is required")
|
||||
}
|
||||
|
||||
if c.S3.SecretAccessKey == "" {
|
||||
return fmt.Errorf("s3.secret_access_key is required")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Module exports the config module for fx dependency injection.
|
||||
// It provides the Config type to other modules in the application.
|
||||
var Module = fx.Module("config",
|
||||
|
||||
Reference in New Issue
Block a user