Add pluggable storage backend, PID locking, and improved scan progress
Storage backend: - Add internal/storage package with Storer interface - Implement FileStorer for local filesystem storage (file:// URLs) - Implement S3Storer wrapping existing s3.Client - Support storage_url config field (s3:// or file://) - Migrate all consumers to use storage.Storer interface PID locking: - Add internal/pidlock package to prevent concurrent instances - Acquire lock before app start, release on exit - Detect stale locks from crashed processes Scan progress improvements: - Add fast file enumeration pass before stat() phase - Use enumerated set for deletion detection (no extra filesystem access) - Show progress with percentage, files/sec, elapsed time, and ETA - Change "changed" to "changed/new" for clarity Config improvements: - Add tilde expansion for paths (~/) - Use xdg library for platform-specific default index path
This commit is contained in:
262
internal/storage/file.go
Normal file
262
internal/storage/file.go
Normal file
@@ -0,0 +1,262 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
// FileStorer implements Storer using the local filesystem.
|
||||
// It mirrors the S3 path structure for consistency.
|
||||
type FileStorer struct {
|
||||
fs afero.Fs
|
||||
basePath string
|
||||
}
|
||||
|
||||
// NewFileStorer creates a new filesystem storage backend.
|
||||
// The basePath directory will be created if it doesn't exist.
|
||||
// Uses the real OS filesystem by default; call SetFilesystem to override for testing.
|
||||
func NewFileStorer(basePath string) (*FileStorer, error) {
|
||||
fs := afero.NewOsFs()
|
||||
// Ensure base path exists
|
||||
if err := fs.MkdirAll(basePath, 0755); err != nil {
|
||||
return nil, fmt.Errorf("creating base path: %w", err)
|
||||
}
|
||||
return &FileStorer{
|
||||
fs: fs,
|
||||
basePath: basePath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SetFilesystem overrides the filesystem for testing.
|
||||
func (f *FileStorer) SetFilesystem(fs afero.Fs) {
|
||||
f.fs = fs
|
||||
}
|
||||
|
||||
// fullPath returns the full filesystem path for a key.
|
||||
func (f *FileStorer) fullPath(key string) string {
|
||||
return filepath.Join(f.basePath, key)
|
||||
}
|
||||
|
||||
// Put stores data at the specified key.
|
||||
func (f *FileStorer) Put(ctx context.Context, key string, data io.Reader) error {
|
||||
path := f.fullPath(key)
|
||||
|
||||
// Create parent directories
|
||||
dir := filepath.Dir(path)
|
||||
if err := f.fs.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("creating directories: %w", err)
|
||||
}
|
||||
|
||||
file, err := f.fs.Create(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating file: %w", err)
|
||||
}
|
||||
defer func() { _ = file.Close() }()
|
||||
|
||||
if _, err := io.Copy(file, data); err != nil {
|
||||
return fmt.Errorf("writing file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutWithProgress stores data with progress reporting.
|
||||
func (f *FileStorer) PutWithProgress(ctx context.Context, key string, data io.Reader, size int64, progress ProgressCallback) error {
|
||||
path := f.fullPath(key)
|
||||
|
||||
// Create parent directories
|
||||
dir := filepath.Dir(path)
|
||||
if err := f.fs.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("creating directories: %w", err)
|
||||
}
|
||||
|
||||
file, err := f.fs.Create(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating file: %w", err)
|
||||
}
|
||||
defer func() { _ = file.Close() }()
|
||||
|
||||
// Wrap with progress tracking
|
||||
pw := &progressWriter{
|
||||
writer: file,
|
||||
callback: progress,
|
||||
}
|
||||
|
||||
if _, err := io.Copy(pw, data); err != nil {
|
||||
return fmt.Errorf("writing file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves data from the specified key.
|
||||
func (f *FileStorer) Get(ctx context.Context, key string) (io.ReadCloser, error) {
|
||||
path := f.fullPath(key)
|
||||
file, err := f.fs.Open(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("opening file: %w", err)
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// Stat returns metadata about an object without retrieving its contents.
|
||||
func (f *FileStorer) Stat(ctx context.Context, key string) (*ObjectInfo, error) {
|
||||
path := f.fullPath(key)
|
||||
info, err := f.fs.Stat(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("stat file: %w", err)
|
||||
}
|
||||
return &ObjectInfo{
|
||||
Key: key,
|
||||
Size: info.Size(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Delete removes an object.
|
||||
func (f *FileStorer) Delete(ctx context.Context, key string) error {
|
||||
path := f.fullPath(key)
|
||||
err := f.fs.Remove(path)
|
||||
if os.IsNotExist(err) {
|
||||
return nil // Match S3 behavior: no error if doesn't exist
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("removing file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List returns all keys with the given prefix.
|
||||
func (f *FileStorer) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
var keys []string
|
||||
basePath := f.fullPath(prefix)
|
||||
|
||||
// Check if base path exists
|
||||
exists, err := afero.Exists(f.fs, basePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("checking path: %w", err)
|
||||
}
|
||||
if !exists {
|
||||
return keys, nil // Empty list for non-existent prefix
|
||||
}
|
||||
|
||||
err = afero.Walk(f.fs, basePath, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check context cancellation
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
// Convert back to key (relative path from basePath)
|
||||
relPath, err := filepath.Rel(f.basePath, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("computing relative path: %w", err)
|
||||
}
|
||||
// Normalize path separators to forward slashes for consistency
|
||||
relPath = strings.ReplaceAll(relPath, string(filepath.Separator), "/")
|
||||
keys = append(keys, relPath)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walking directory: %w", err)
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// ListStream returns a channel of ObjectInfo for large result sets.
|
||||
func (f *FileStorer) ListStream(ctx context.Context, prefix string) <-chan ObjectInfo {
|
||||
ch := make(chan ObjectInfo)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
basePath := f.fullPath(prefix)
|
||||
|
||||
// Check if base path exists
|
||||
exists, err := afero.Exists(f.fs, basePath)
|
||||
if err != nil {
|
||||
ch <- ObjectInfo{Err: fmt.Errorf("checking path: %w", err)}
|
||||
return
|
||||
}
|
||||
if !exists {
|
||||
return // Empty channel for non-existent prefix
|
||||
}
|
||||
|
||||
_ = afero.Walk(f.fs, basePath, func(path string, info os.FileInfo, err error) error {
|
||||
// Check context cancellation
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
ch <- ObjectInfo{Err: ctx.Err()}
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
ch <- ObjectInfo{Err: err}
|
||||
return nil // Continue walking despite errors
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
relPath, err := filepath.Rel(f.basePath, path)
|
||||
if err != nil {
|
||||
ch <- ObjectInfo{Err: fmt.Errorf("computing relative path: %w", err)}
|
||||
return nil
|
||||
}
|
||||
// Normalize path separators
|
||||
relPath = strings.ReplaceAll(relPath, string(filepath.Separator), "/")
|
||||
ch <- ObjectInfo{
|
||||
Key: relPath,
|
||||
Size: info.Size(),
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// Info returns human-readable storage location information.
|
||||
func (f *FileStorer) Info() StorageInfo {
|
||||
return StorageInfo{
|
||||
Type: "file",
|
||||
Location: f.basePath,
|
||||
}
|
||||
}
|
||||
|
||||
// progressWriter wraps an io.Writer to track write progress.
|
||||
type progressWriter struct {
|
||||
writer io.Writer
|
||||
written int64
|
||||
callback ProgressCallback
|
||||
}
|
||||
|
||||
func (pw *progressWriter) Write(p []byte) (int, error) {
|
||||
n, err := pw.writer.Write(p)
|
||||
if n > 0 {
|
||||
pw.written += int64(n)
|
||||
if pw.callback != nil {
|
||||
if callbackErr := pw.callback(pw.written); callbackErr != nil {
|
||||
return n, callbackErr
|
||||
}
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
110
internal/storage/module.go
Normal file
110
internal/storage/module.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"git.eeqj.de/sneak/vaultik/internal/config"
|
||||
"git.eeqj.de/sneak/vaultik/internal/s3"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
// Module exports storage functionality as an fx module.
|
||||
// It provides a Storer implementation based on the configured storage URL
|
||||
// or falls back to legacy S3 configuration.
|
||||
var Module = fx.Module("storage",
|
||||
fx.Provide(NewStorer),
|
||||
)
|
||||
|
||||
// NewStorer creates a Storer based on configuration.
|
||||
// If StorageURL is set, it uses URL-based configuration.
|
||||
// Otherwise, it falls back to legacy S3 configuration.
|
||||
func NewStorer(cfg *config.Config) (Storer, error) {
|
||||
if cfg.StorageURL != "" {
|
||||
return storerFromURL(cfg.StorageURL, cfg)
|
||||
}
|
||||
return storerFromLegacyS3Config(cfg)
|
||||
}
|
||||
|
||||
func storerFromURL(rawURL string, cfg *config.Config) (Storer, error) {
|
||||
parsed, err := ParseStorageURL(rawURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing storage URL: %w", err)
|
||||
}
|
||||
|
||||
switch parsed.Scheme {
|
||||
case "file":
|
||||
return NewFileStorer(parsed.Prefix)
|
||||
|
||||
case "s3":
|
||||
// Build endpoint URL
|
||||
endpoint := parsed.Endpoint
|
||||
if endpoint == "" {
|
||||
endpoint = "s3.amazonaws.com"
|
||||
}
|
||||
|
||||
// Add protocol if not present
|
||||
if parsed.UseSSL && !strings.HasPrefix(endpoint, "https://") && !strings.HasPrefix(endpoint, "http://") {
|
||||
endpoint = "https://" + endpoint
|
||||
} else if !parsed.UseSSL && !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
|
||||
endpoint = "http://" + endpoint
|
||||
}
|
||||
|
||||
region := parsed.Region
|
||||
if region == "" {
|
||||
region = cfg.S3.Region
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
}
|
||||
|
||||
// Credentials come from config (not URL for security)
|
||||
client, err := s3.NewClient(context.Background(), s3.Config{
|
||||
Endpoint: endpoint,
|
||||
Bucket: parsed.Bucket,
|
||||
Prefix: parsed.Prefix,
|
||||
AccessKeyID: cfg.S3.AccessKeyID,
|
||||
SecretAccessKey: cfg.S3.SecretAccessKey,
|
||||
Region: region,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating S3 client: %w", err)
|
||||
}
|
||||
return NewS3Storer(client), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported storage scheme: %s", parsed.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
func storerFromLegacyS3Config(cfg *config.Config) (Storer, error) {
|
||||
endpoint := cfg.S3.Endpoint
|
||||
|
||||
// Ensure protocol is present
|
||||
if !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
|
||||
if cfg.S3.UseSSL {
|
||||
endpoint = "https://" + endpoint
|
||||
} else {
|
||||
endpoint = "http://" + endpoint
|
||||
}
|
||||
}
|
||||
|
||||
region := cfg.S3.Region
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
|
||||
client, err := s3.NewClient(context.Background(), s3.Config{
|
||||
Endpoint: endpoint,
|
||||
Bucket: cfg.S3.Bucket,
|
||||
Prefix: cfg.S3.Prefix,
|
||||
AccessKeyID: cfg.S3.AccessKeyID,
|
||||
SecretAccessKey: cfg.S3.SecretAccessKey,
|
||||
Region: region,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating S3 client: %w", err)
|
||||
}
|
||||
return NewS3Storer(client), nil
|
||||
}
|
||||
85
internal/storage/s3.go
Normal file
85
internal/storage/s3.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"git.eeqj.de/sneak/vaultik/internal/s3"
|
||||
)
|
||||
|
||||
// S3Storer wraps the existing s3.Client to implement Storer.
|
||||
type S3Storer struct {
|
||||
client *s3.Client
|
||||
}
|
||||
|
||||
// NewS3Storer creates a new S3 storage backend.
|
||||
func NewS3Storer(client *s3.Client) *S3Storer {
|
||||
return &S3Storer{client: client}
|
||||
}
|
||||
|
||||
// Put stores data at the specified key.
|
||||
func (s *S3Storer) Put(ctx context.Context, key string, data io.Reader) error {
|
||||
return s.client.PutObject(ctx, key, data)
|
||||
}
|
||||
|
||||
// PutWithProgress stores data with progress reporting.
|
||||
func (s *S3Storer) PutWithProgress(ctx context.Context, key string, data io.Reader, size int64, progress ProgressCallback) error {
|
||||
// Convert storage.ProgressCallback to s3.ProgressCallback
|
||||
var s3Progress s3.ProgressCallback
|
||||
if progress != nil {
|
||||
s3Progress = s3.ProgressCallback(progress)
|
||||
}
|
||||
return s.client.PutObjectWithProgress(ctx, key, data, size, s3Progress)
|
||||
}
|
||||
|
||||
// Get retrieves data from the specified key.
|
||||
func (s *S3Storer) Get(ctx context.Context, key string) (io.ReadCloser, error) {
|
||||
return s.client.GetObject(ctx, key)
|
||||
}
|
||||
|
||||
// Stat returns metadata about an object without retrieving its contents.
|
||||
func (s *S3Storer) Stat(ctx context.Context, key string) (*ObjectInfo, error) {
|
||||
info, err := s.client.StatObject(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ObjectInfo{
|
||||
Key: info.Key,
|
||||
Size: info.Size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Delete removes an object.
|
||||
func (s *S3Storer) Delete(ctx context.Context, key string) error {
|
||||
return s.client.DeleteObject(ctx, key)
|
||||
}
|
||||
|
||||
// List returns all keys with the given prefix.
|
||||
func (s *S3Storer) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
return s.client.ListObjects(ctx, prefix)
|
||||
}
|
||||
|
||||
// ListStream returns a channel of ObjectInfo for large result sets.
|
||||
func (s *S3Storer) ListStream(ctx context.Context, prefix string) <-chan ObjectInfo {
|
||||
ch := make(chan ObjectInfo)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
for info := range s.client.ListObjectsStream(ctx, prefix, false) {
|
||||
ch <- ObjectInfo{
|
||||
Key: info.Key,
|
||||
Size: info.Size,
|
||||
Err: info.Err,
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// Info returns human-readable storage location information.
|
||||
func (s *S3Storer) Info() StorageInfo {
|
||||
return StorageInfo{
|
||||
Type: "s3",
|
||||
Location: fmt.Sprintf("%s/%s", s.client.Endpoint(), s.client.BucketName()),
|
||||
}
|
||||
}
|
||||
74
internal/storage/storer.go
Normal file
74
internal/storage/storer.go
Normal file
@@ -0,0 +1,74 @@
|
||||
// Package storage provides a unified interface for storage backends.
|
||||
// It supports both S3-compatible object storage and local filesystem storage,
|
||||
// allowing Vaultik to store backups in either location with the same API.
|
||||
//
|
||||
// Storage backends are selected via URL:
|
||||
// - s3://bucket/prefix?endpoint=host®ion=r - S3-compatible storage
|
||||
// - file:///path/to/backup - Local filesystem storage
|
||||
//
|
||||
// Both backends implement the Storer interface and support progress reporting
|
||||
// during upload/write operations.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ErrNotFound is returned when an object does not exist.
|
||||
var ErrNotFound = errors.New("object not found")
|
||||
|
||||
// ProgressCallback is called during storage operations with bytes transferred so far.
|
||||
// Return an error to cancel the operation.
|
||||
type ProgressCallback func(bytesTransferred int64) error
|
||||
|
||||
// ObjectInfo contains metadata about a stored object.
|
||||
type ObjectInfo struct {
|
||||
Key string // Object key/path
|
||||
Size int64 // Size in bytes
|
||||
Err error // Error for streaming results (nil on success)
|
||||
}
|
||||
|
||||
// StorageInfo provides human-readable storage configuration.
|
||||
type StorageInfo struct {
|
||||
Type string // "s3" or "file"
|
||||
Location string // endpoint/bucket for S3, base path for filesystem
|
||||
}
|
||||
|
||||
// Storer defines the interface for storage backends.
|
||||
// All paths are relative to the storage root (bucket/prefix for S3, base directory for filesystem).
|
||||
type Storer interface {
|
||||
// Put stores data at the specified key.
|
||||
// Parent directories are created automatically for filesystem backends.
|
||||
Put(ctx context.Context, key string, data io.Reader) error
|
||||
|
||||
// PutWithProgress stores data with progress reporting.
|
||||
// Size must be the exact size of the data to store.
|
||||
// The progress callback is called periodically with bytes transferred.
|
||||
PutWithProgress(ctx context.Context, key string, data io.Reader, size int64, progress ProgressCallback) error
|
||||
|
||||
// Get retrieves data from the specified key.
|
||||
// The caller must close the returned ReadCloser.
|
||||
// Returns ErrNotFound if the object does not exist.
|
||||
Get(ctx context.Context, key string) (io.ReadCloser, error)
|
||||
|
||||
// Stat returns metadata about an object without retrieving its contents.
|
||||
// Returns ErrNotFound if the object does not exist.
|
||||
Stat(ctx context.Context, key string) (*ObjectInfo, error)
|
||||
|
||||
// Delete removes an object. No error is returned if the object doesn't exist.
|
||||
Delete(ctx context.Context, key string) error
|
||||
|
||||
// List returns all keys with the given prefix.
|
||||
// For large result sets, prefer ListStream.
|
||||
List(ctx context.Context, prefix string) ([]string, error)
|
||||
|
||||
// ListStream returns a channel of ObjectInfo for large result sets.
|
||||
// The channel is closed when listing completes.
|
||||
// If an error occurs during listing, the final item will have Err set.
|
||||
ListStream(ctx context.Context, prefix string) <-chan ObjectInfo
|
||||
|
||||
// Info returns human-readable storage location information.
|
||||
Info() StorageInfo
|
||||
}
|
||||
90
internal/storage/url.go
Normal file
90
internal/storage/url.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// StorageURL represents a parsed storage URL.
|
||||
type StorageURL struct {
|
||||
Scheme string // "s3" or "file"
|
||||
Bucket string // S3 bucket name (empty for file)
|
||||
Prefix string // Path within bucket or filesystem base path
|
||||
Endpoint string // S3 endpoint (optional, default AWS)
|
||||
Region string // S3 region (optional)
|
||||
UseSSL bool // Use HTTPS for S3 (default true)
|
||||
}
|
||||
|
||||
// ParseStorageURL parses a storage URL string.
|
||||
// Supported formats:
|
||||
// - s3://bucket/prefix?endpoint=host®ion=us-east-1&ssl=true
|
||||
// - file:///absolute/path/to/backup
|
||||
func ParseStorageURL(rawURL string) (*StorageURL, error) {
|
||||
if rawURL == "" {
|
||||
return nil, fmt.Errorf("storage URL is empty")
|
||||
}
|
||||
|
||||
// Handle file:// URLs
|
||||
if strings.HasPrefix(rawURL, "file://") {
|
||||
path := strings.TrimPrefix(rawURL, "file://")
|
||||
if path == "" {
|
||||
return nil, fmt.Errorf("file URL path is empty")
|
||||
}
|
||||
return &StorageURL{
|
||||
Scheme: "file",
|
||||
Prefix: path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Handle s3:// URLs
|
||||
if strings.HasPrefix(rawURL, "s3://") {
|
||||
u, err := url.Parse(rawURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid URL: %w", err)
|
||||
}
|
||||
|
||||
bucket := u.Host
|
||||
if bucket == "" {
|
||||
return nil, fmt.Errorf("s3 URL missing bucket name")
|
||||
}
|
||||
|
||||
prefix := strings.TrimPrefix(u.Path, "/")
|
||||
|
||||
query := u.Query()
|
||||
useSSL := true
|
||||
if query.Get("ssl") == "false" {
|
||||
useSSL = false
|
||||
}
|
||||
|
||||
return &StorageURL{
|
||||
Scheme: "s3",
|
||||
Bucket: bucket,
|
||||
Prefix: prefix,
|
||||
Endpoint: query.Get("endpoint"),
|
||||
Region: query.Get("region"),
|
||||
UseSSL: useSSL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unsupported URL scheme: must start with s3:// or file://")
|
||||
}
|
||||
|
||||
// String returns a human-readable representation of the storage URL.
|
||||
func (u *StorageURL) String() string {
|
||||
switch u.Scheme {
|
||||
case "file":
|
||||
return fmt.Sprintf("file://%s", u.Prefix)
|
||||
case "s3":
|
||||
endpoint := u.Endpoint
|
||||
if endpoint == "" {
|
||||
endpoint = "s3.amazonaws.com"
|
||||
}
|
||||
if u.Prefix != "" {
|
||||
return fmt.Sprintf("s3://%s/%s (endpoint: %s)", u.Bucket, u.Prefix, endpoint)
|
||||
}
|
||||
return fmt.Sprintf("s3://%s (endpoint: %s)", u.Bucket, endpoint)
|
||||
default:
|
||||
return fmt.Sprintf("%s://?", u.Scheme)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user