Compare commits
5 Commits
main
...
71a402650c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
71a402650c | ||
|
|
54cddb70f8 | ||
|
|
99516a03cf | ||
|
|
1fe250d8b2 | ||
|
|
ca2171cb42 |
@@ -54,7 +54,7 @@ The database tracks five primary entities and their relationships:
|
|||||||
|
|
||||||
#### File (`database.File`)
|
#### File (`database.File`)
|
||||||
Represents a file or directory in the backup system. Stores metadata needed for restoration:
|
Represents a file or directory in the backup system. Stores metadata needed for restoration:
|
||||||
- Path, mtime
|
- Path, timestamps (mtime, ctime)
|
||||||
- Size, mode, ownership (uid, gid)
|
- Size, mode, ownership (uid, gid)
|
||||||
- Symlink target (if applicable)
|
- Symlink target (if applicable)
|
||||||
|
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ Stores metadata about files in the filesystem being backed up.
|
|||||||
- `id` (TEXT PRIMARY KEY) - UUID for the file record
|
- `id` (TEXT PRIMARY KEY) - UUID for the file record
|
||||||
- `path` (TEXT NOT NULL UNIQUE) - Absolute file path
|
- `path` (TEXT NOT NULL UNIQUE) - Absolute file path
|
||||||
- `mtime` (INTEGER NOT NULL) - Modification time as Unix timestamp
|
- `mtime` (INTEGER NOT NULL) - Modification time as Unix timestamp
|
||||||
|
- `ctime` (INTEGER NOT NULL) - Change time as Unix timestamp
|
||||||
- `size` (INTEGER NOT NULL) - File size in bytes
|
- `size` (INTEGER NOT NULL) - File size in bytes
|
||||||
- `mode` (INTEGER NOT NULL) - Unix file permissions and type
|
- `mode` (INTEGER NOT NULL) - Unix file permissions and type
|
||||||
- `uid` (INTEGER NOT NULL) - User ID of file owner
|
- `uid` (INTEGER NOT NULL) - User ID of file owner
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ func TestCascadeDeleteDebug(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/cascade-test.txt",
|
Path: "/cascade-test.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ func TestChunkFileRepository(t *testing.T) {
|
|||||||
file1 := &File{
|
file1 := &File{
|
||||||
Path: "/file1.txt",
|
Path: "/file1.txt",
|
||||||
MTime: testTime,
|
MTime: testTime,
|
||||||
|
CTime: testTime,
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -36,6 +37,7 @@ func TestChunkFileRepository(t *testing.T) {
|
|||||||
file2 := &File{
|
file2 := &File{
|
||||||
Path: "/file2.txt",
|
Path: "/file2.txt",
|
||||||
MTime: testTime,
|
MTime: testTime,
|
||||||
|
CTime: testTime,
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -136,9 +138,9 @@ func TestChunkFileRepositoryComplexDeduplication(t *testing.T) {
|
|||||||
|
|
||||||
// Create test files
|
// Create test files
|
||||||
testTime := time.Now().Truncate(time.Second)
|
testTime := time.Now().Truncate(time.Second)
|
||||||
file1 := &File{Path: "/file1.txt", MTime: testTime, Size: 3072, Mode: 0644, UID: 1000, GID: 1000}
|
file1 := &File{Path: "/file1.txt", MTime: testTime, CTime: testTime, Size: 3072, Mode: 0644, UID: 1000, GID: 1000}
|
||||||
file2 := &File{Path: "/file2.txt", MTime: testTime, Size: 3072, Mode: 0644, UID: 1000, GID: 1000}
|
file2 := &File{Path: "/file2.txt", MTime: testTime, CTime: testTime, Size: 3072, Mode: 0644, UID: 1000, GID: 1000}
|
||||||
file3 := &File{Path: "/file3.txt", MTime: testTime, Size: 2048, Mode: 0644, UID: 1000, GID: 1000}
|
file3 := &File{Path: "/file3.txt", MTime: testTime, CTime: testTime, Size: 2048, Mode: 0644, UID: 1000, GID: 1000}
|
||||||
|
|
||||||
if err := fileRepo.Create(ctx, nil, file1); err != nil {
|
if err := fileRepo.Create(ctx, nil, file1); err != nil {
|
||||||
t.Fatalf("failed to create file1: %v", err)
|
t.Fatalf("failed to create file1: %v", err)
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ func TestFileChunkRepository(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/test/file.txt",
|
Path: "/test/file.txt",
|
||||||
MTime: testTime,
|
MTime: testTime,
|
||||||
|
CTime: testTime,
|
||||||
Size: 3072,
|
Size: 3072,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -134,6 +135,7 @@ func TestFileChunkRepositoryMultipleFiles(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: types.FilePath(path),
|
Path: types.FilePath(path),
|
||||||
MTime: testTime,
|
MTime: testTime,
|
||||||
|
CTime: testTime,
|
||||||
Size: 2048,
|
Size: 2048,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
|
|||||||
@@ -25,11 +25,12 @@ func (r *FileRepository) Create(ctx context.Context, tx *sql.Tx, file *File) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
query := `
|
query := `
|
||||||
INSERT INTO files (id, path, source_path, mtime, size, mode, uid, gid, link_target)
|
INSERT INTO files (id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target)
|
||||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
ON CONFLICT(path) DO UPDATE SET
|
ON CONFLICT(path) DO UPDATE SET
|
||||||
source_path = excluded.source_path,
|
source_path = excluded.source_path,
|
||||||
mtime = excluded.mtime,
|
mtime = excluded.mtime,
|
||||||
|
ctime = excluded.ctime,
|
||||||
size = excluded.size,
|
size = excluded.size,
|
||||||
mode = excluded.mode,
|
mode = excluded.mode,
|
||||||
uid = excluded.uid,
|
uid = excluded.uid,
|
||||||
@@ -41,10 +42,10 @@ func (r *FileRepository) Create(ctx context.Context, tx *sql.Tx, file *File) err
|
|||||||
var idStr string
|
var idStr string
|
||||||
var err error
|
var err error
|
||||||
if tx != nil {
|
if tx != nil {
|
||||||
LogSQL("Execute", query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String())
|
LogSQL("Execute", query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String())
|
||||||
err = tx.QueryRowContext(ctx, query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String()).Scan(&idStr)
|
err = tx.QueryRowContext(ctx, query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String()).Scan(&idStr)
|
||||||
} else {
|
} else {
|
||||||
err = r.db.QueryRowWithLog(ctx, query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String()).Scan(&idStr)
|
err = r.db.QueryRowWithLog(ctx, query, file.ID.String(), file.Path.String(), file.SourcePath.String(), file.MTime.Unix(), file.CTime.Unix(), file.Size, file.Mode, file.UID, file.GID, file.LinkTarget.String()).Scan(&idStr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -62,7 +63,7 @@ func (r *FileRepository) Create(ctx context.Context, tx *sql.Tx, file *File) err
|
|||||||
|
|
||||||
func (r *FileRepository) GetByPath(ctx context.Context, path string) (*File, error) {
|
func (r *FileRepository) GetByPath(ctx context.Context, path string) (*File, error) {
|
||||||
query := `
|
query := `
|
||||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||||
FROM files
|
FROM files
|
||||||
WHERE path = ?
|
WHERE path = ?
|
||||||
`
|
`
|
||||||
@@ -81,7 +82,7 @@ func (r *FileRepository) GetByPath(ctx context.Context, path string) (*File, err
|
|||||||
// GetByID retrieves a file by its UUID
|
// GetByID retrieves a file by its UUID
|
||||||
func (r *FileRepository) GetByID(ctx context.Context, id types.FileID) (*File, error) {
|
func (r *FileRepository) GetByID(ctx context.Context, id types.FileID) (*File, error) {
|
||||||
query := `
|
query := `
|
||||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||||
FROM files
|
FROM files
|
||||||
WHERE id = ?
|
WHERE id = ?
|
||||||
`
|
`
|
||||||
@@ -99,7 +100,7 @@ func (r *FileRepository) GetByID(ctx context.Context, id types.FileID) (*File, e
|
|||||||
|
|
||||||
func (r *FileRepository) GetByPathTx(ctx context.Context, tx *sql.Tx, path string) (*File, error) {
|
func (r *FileRepository) GetByPathTx(ctx context.Context, tx *sql.Tx, path string) (*File, error) {
|
||||||
query := `
|
query := `
|
||||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||||
FROM files
|
FROM files
|
||||||
WHERE path = ?
|
WHERE path = ?
|
||||||
`
|
`
|
||||||
@@ -122,7 +123,7 @@ func (r *FileRepository) GetByPathTx(ctx context.Context, tx *sql.Tx, path strin
|
|||||||
func (r *FileRepository) scanFile(row *sql.Row) (*File, error) {
|
func (r *FileRepository) scanFile(row *sql.Row) (*File, error) {
|
||||||
var file File
|
var file File
|
||||||
var idStr, pathStr, sourcePathStr string
|
var idStr, pathStr, sourcePathStr string
|
||||||
var mtimeUnix int64
|
var mtimeUnix, ctimeUnix int64
|
||||||
var linkTarget sql.NullString
|
var linkTarget sql.NullString
|
||||||
|
|
||||||
err := row.Scan(
|
err := row.Scan(
|
||||||
@@ -130,6 +131,7 @@ func (r *FileRepository) scanFile(row *sql.Row) (*File, error) {
|
|||||||
&pathStr,
|
&pathStr,
|
||||||
&sourcePathStr,
|
&sourcePathStr,
|
||||||
&mtimeUnix,
|
&mtimeUnix,
|
||||||
|
&ctimeUnix,
|
||||||
&file.Size,
|
&file.Size,
|
||||||
&file.Mode,
|
&file.Mode,
|
||||||
&file.UID,
|
&file.UID,
|
||||||
@@ -147,6 +149,7 @@ func (r *FileRepository) scanFile(row *sql.Row) (*File, error) {
|
|||||||
file.Path = types.FilePath(pathStr)
|
file.Path = types.FilePath(pathStr)
|
||||||
file.SourcePath = types.SourcePath(sourcePathStr)
|
file.SourcePath = types.SourcePath(sourcePathStr)
|
||||||
file.MTime = time.Unix(mtimeUnix, 0).UTC()
|
file.MTime = time.Unix(mtimeUnix, 0).UTC()
|
||||||
|
file.CTime = time.Unix(ctimeUnix, 0).UTC()
|
||||||
if linkTarget.Valid {
|
if linkTarget.Valid {
|
||||||
file.LinkTarget = types.FilePath(linkTarget.String)
|
file.LinkTarget = types.FilePath(linkTarget.String)
|
||||||
}
|
}
|
||||||
@@ -158,7 +161,7 @@ func (r *FileRepository) scanFile(row *sql.Row) (*File, error) {
|
|||||||
func (r *FileRepository) scanFileRows(rows *sql.Rows) (*File, error) {
|
func (r *FileRepository) scanFileRows(rows *sql.Rows) (*File, error) {
|
||||||
var file File
|
var file File
|
||||||
var idStr, pathStr, sourcePathStr string
|
var idStr, pathStr, sourcePathStr string
|
||||||
var mtimeUnix int64
|
var mtimeUnix, ctimeUnix int64
|
||||||
var linkTarget sql.NullString
|
var linkTarget sql.NullString
|
||||||
|
|
||||||
err := rows.Scan(
|
err := rows.Scan(
|
||||||
@@ -166,6 +169,7 @@ func (r *FileRepository) scanFileRows(rows *sql.Rows) (*File, error) {
|
|||||||
&pathStr,
|
&pathStr,
|
||||||
&sourcePathStr,
|
&sourcePathStr,
|
||||||
&mtimeUnix,
|
&mtimeUnix,
|
||||||
|
&ctimeUnix,
|
||||||
&file.Size,
|
&file.Size,
|
||||||
&file.Mode,
|
&file.Mode,
|
||||||
&file.UID,
|
&file.UID,
|
||||||
@@ -183,6 +187,7 @@ func (r *FileRepository) scanFileRows(rows *sql.Rows) (*File, error) {
|
|||||||
file.Path = types.FilePath(pathStr)
|
file.Path = types.FilePath(pathStr)
|
||||||
file.SourcePath = types.SourcePath(sourcePathStr)
|
file.SourcePath = types.SourcePath(sourcePathStr)
|
||||||
file.MTime = time.Unix(mtimeUnix, 0).UTC()
|
file.MTime = time.Unix(mtimeUnix, 0).UTC()
|
||||||
|
file.CTime = time.Unix(ctimeUnix, 0).UTC()
|
||||||
if linkTarget.Valid {
|
if linkTarget.Valid {
|
||||||
file.LinkTarget = types.FilePath(linkTarget.String)
|
file.LinkTarget = types.FilePath(linkTarget.String)
|
||||||
}
|
}
|
||||||
@@ -192,7 +197,7 @@ func (r *FileRepository) scanFileRows(rows *sql.Rows) (*File, error) {
|
|||||||
|
|
||||||
func (r *FileRepository) ListModifiedSince(ctx context.Context, since time.Time) ([]*File, error) {
|
func (r *FileRepository) ListModifiedSince(ctx context.Context, since time.Time) ([]*File, error) {
|
||||||
query := `
|
query := `
|
||||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||||
FROM files
|
FROM files
|
||||||
WHERE mtime >= ?
|
WHERE mtime >= ?
|
||||||
ORDER BY path
|
ORDER BY path
|
||||||
@@ -253,7 +258,7 @@ func (r *FileRepository) DeleteByID(ctx context.Context, tx *sql.Tx, id types.Fi
|
|||||||
|
|
||||||
func (r *FileRepository) ListByPrefix(ctx context.Context, prefix string) ([]*File, error) {
|
func (r *FileRepository) ListByPrefix(ctx context.Context, prefix string) ([]*File, error) {
|
||||||
query := `
|
query := `
|
||||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||||
FROM files
|
FROM files
|
||||||
WHERE path LIKE ? || '%'
|
WHERE path LIKE ? || '%'
|
||||||
ORDER BY path
|
ORDER BY path
|
||||||
@@ -280,7 +285,7 @@ func (r *FileRepository) ListByPrefix(ctx context.Context, prefix string) ([]*Fi
|
|||||||
// ListAll returns all files in the database
|
// ListAll returns all files in the database
|
||||||
func (r *FileRepository) ListAll(ctx context.Context) ([]*File, error) {
|
func (r *FileRepository) ListAll(ctx context.Context) ([]*File, error) {
|
||||||
query := `
|
query := `
|
||||||
SELECT id, path, source_path, mtime, size, mode, uid, gid, link_target
|
SELECT id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target
|
||||||
FROM files
|
FROM files
|
||||||
ORDER BY path
|
ORDER BY path
|
||||||
`
|
`
|
||||||
@@ -310,7 +315,7 @@ func (r *FileRepository) CreateBatch(ctx context.Context, tx *sql.Tx, files []*F
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Each File has 9 values, so batch at 100 to be safe with SQLite's variable limit
|
// Each File has 10 values, so batch at 100 to be safe with SQLite's variable limit
|
||||||
const batchSize = 100
|
const batchSize = 100
|
||||||
|
|
||||||
for i := 0; i < len(files); i += batchSize {
|
for i := 0; i < len(files); i += batchSize {
|
||||||
@@ -320,18 +325,19 @@ func (r *FileRepository) CreateBatch(ctx context.Context, tx *sql.Tx, files []*F
|
|||||||
}
|
}
|
||||||
batch := files[i:end]
|
batch := files[i:end]
|
||||||
|
|
||||||
query := `INSERT INTO files (id, path, source_path, mtime, size, mode, uid, gid, link_target) VALUES `
|
query := `INSERT INTO files (id, path, source_path, mtime, ctime, size, mode, uid, gid, link_target) VALUES `
|
||||||
args := make([]interface{}, 0, len(batch)*9)
|
args := make([]interface{}, 0, len(batch)*10)
|
||||||
for j, f := range batch {
|
for j, f := range batch {
|
||||||
if j > 0 {
|
if j > 0 {
|
||||||
query += ", "
|
query += ", "
|
||||||
}
|
}
|
||||||
query += "(?, ?, ?, ?, ?, ?, ?, ?, ?)"
|
query += "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
|
||||||
args = append(args, f.ID.String(), f.Path.String(), f.SourcePath.String(), f.MTime.Unix(), f.Size, f.Mode, f.UID, f.GID, f.LinkTarget.String())
|
args = append(args, f.ID.String(), f.Path.String(), f.SourcePath.String(), f.MTime.Unix(), f.CTime.Unix(), f.Size, f.Mode, f.UID, f.GID, f.LinkTarget.String())
|
||||||
}
|
}
|
||||||
query += ` ON CONFLICT(path) DO UPDATE SET
|
query += ` ON CONFLICT(path) DO UPDATE SET
|
||||||
source_path = excluded.source_path,
|
source_path = excluded.source_path,
|
||||||
mtime = excluded.mtime,
|
mtime = excluded.mtime,
|
||||||
|
ctime = excluded.ctime,
|
||||||
size = excluded.size,
|
size = excluded.size,
|
||||||
mode = excluded.mode,
|
mode = excluded.mode,
|
||||||
uid = excluded.uid,
|
uid = excluded.uid,
|
||||||
|
|||||||
@@ -39,6 +39,7 @@ func TestFileRepository(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/test/file.txt",
|
Path: "/test/file.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -123,6 +124,7 @@ func TestFileRepositorySymlink(t *testing.T) {
|
|||||||
symlink := &File{
|
symlink := &File{
|
||||||
Path: "/test/link",
|
Path: "/test/link",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 0,
|
Size: 0,
|
||||||
Mode: uint32(0777 | os.ModeSymlink),
|
Mode: uint32(0777 | os.ModeSymlink),
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -159,6 +161,7 @@ func TestFileRepositoryTransaction(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/test/tx_file.txt",
|
Path: "/test/tx_file.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ type File struct {
|
|||||||
Path types.FilePath // Absolute path of the file
|
Path types.FilePath // Absolute path of the file
|
||||||
SourcePath types.SourcePath // The source directory this file came from (for restore path stripping)
|
SourcePath types.SourcePath // The source directory this file came from (for restore path stripping)
|
||||||
MTime time.Time
|
MTime time.Time
|
||||||
|
CTime time.Time
|
||||||
Size int64
|
Size int64
|
||||||
Mode uint32
|
Mode uint32
|
||||||
UID uint32
|
UID uint32
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ func TestRepositoriesTransaction(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/test/tx_file.txt",
|
Path: "/test/tx_file.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -145,6 +146,7 @@ func TestRepositoriesTransactionRollback(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/test/rollback_file.txt",
|
Path: "/test/rollback_file.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -200,6 +202,7 @@ func TestRepositoriesReadTransaction(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/test/read_file.txt",
|
Path: "/test/read_file.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -223,6 +226,7 @@ func TestRepositoriesReadTransaction(t *testing.T) {
|
|||||||
_ = repos.Files.Create(ctx, tx, &File{
|
_ = repos.Files.Create(ctx, tx, &File{
|
||||||
Path: "/test/should_fail.txt",
|
Path: "/test/should_fail.txt",
|
||||||
MTime: time.Now(),
|
MTime: time.Now(),
|
||||||
|
CTime: time.Now(),
|
||||||
Size: 0,
|
Size: 0,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ func TestFileRepositoryUUIDGeneration(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Path: "/file1.txt",
|
Path: "/file1.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -31,6 +32,7 @@ func TestFileRepositoryUUIDGeneration(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Path: "/file2.txt",
|
Path: "/file2.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 2048,
|
Size: 2048,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -70,6 +72,7 @@ func TestFileRepositoryGetByID(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/test.txt",
|
Path: "/test.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -117,6 +120,7 @@ func TestOrphanedFileCleanup(t *testing.T) {
|
|||||||
file1 := &File{
|
file1 := &File{
|
||||||
Path: "/orphaned.txt",
|
Path: "/orphaned.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -125,6 +129,7 @@ func TestOrphanedFileCleanup(t *testing.T) {
|
|||||||
file2 := &File{
|
file2 := &File{
|
||||||
Path: "/referenced.txt",
|
Path: "/referenced.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 2048,
|
Size: 2048,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -213,6 +218,7 @@ func TestOrphanedChunkCleanup(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/test.txt",
|
Path: "/test.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -342,6 +348,7 @@ func TestFileChunkRepositoryWithUUIDs(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/test.txt",
|
Path: "/test.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 3072,
|
Size: 3072,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -412,6 +419,7 @@ func TestChunkFileRepositoryWithUUIDs(t *testing.T) {
|
|||||||
file1 := &File{
|
file1 := &File{
|
||||||
Path: "/file1.txt",
|
Path: "/file1.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -420,6 +428,7 @@ func TestChunkFileRepositoryWithUUIDs(t *testing.T) {
|
|||||||
file2 := &File{
|
file2 := &File{
|
||||||
Path: "/file2.txt",
|
Path: "/file2.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -577,6 +586,7 @@ func TestComplexOrphanedDataScenario(t *testing.T) {
|
|||||||
files[i] = &File{
|
files[i] = &File{
|
||||||
Path: types.FilePath(fmt.Sprintf("/file%d.txt", i)),
|
Path: types.FilePath(fmt.Sprintf("/file%d.txt", i)),
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -668,6 +678,7 @@ func TestCascadeDelete(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/cascade-test.txt",
|
Path: "/cascade-test.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -739,6 +750,7 @@ func TestTransactionIsolation(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/tx-test.txt",
|
Path: "/tx-test.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -800,6 +812,7 @@ func TestConcurrentOrphanedCleanup(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: types.FilePath(fmt.Sprintf("/concurrent-%d.txt", i)),
|
Path: types.FilePath(fmt.Sprintf("/concurrent-%d.txt", i)),
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ func TestOrphanedFileCleanupDebug(t *testing.T) {
|
|||||||
file1 := &File{
|
file1 := &File{
|
||||||
Path: "/orphaned.txt",
|
Path: "/orphaned.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -26,6 +27,7 @@ func TestOrphanedFileCleanupDebug(t *testing.T) {
|
|||||||
file2 := &File{
|
file2 := &File{
|
||||||
Path: "/referenced.txt",
|
Path: "/referenced.txt",
|
||||||
MTime: time.Now().Truncate(time.Second),
|
MTime: time.Now().Truncate(time.Second),
|
||||||
|
CTime: time.Now().Truncate(time.Second),
|
||||||
Size: 2048,
|
Size: 2048,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
|||||||
file: &File{
|
file: &File{
|
||||||
Path: "",
|
Path: "",
|
||||||
MTime: time.Now(),
|
MTime: time.Now(),
|
||||||
|
CTime: time.Now(),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -41,6 +42,7 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
|||||||
file: &File{
|
file: &File{
|
||||||
Path: types.FilePath("/" + strings.Repeat("a", 4096)),
|
Path: types.FilePath("/" + strings.Repeat("a", 4096)),
|
||||||
MTime: time.Now(),
|
MTime: time.Now(),
|
||||||
|
CTime: time.Now(),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -53,6 +55,7 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
|||||||
file: &File{
|
file: &File{
|
||||||
Path: "/test/file with spaces and 特殊文字.txt",
|
Path: "/test/file with spaces and 特殊文字.txt",
|
||||||
MTime: time.Now(),
|
MTime: time.Now(),
|
||||||
|
CTime: time.Now(),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -65,6 +68,7 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
|||||||
file: &File{
|
file: &File{
|
||||||
Path: "/empty.txt",
|
Path: "/empty.txt",
|
||||||
MTime: time.Now(),
|
MTime: time.Now(),
|
||||||
|
CTime: time.Now(),
|
||||||
Size: 0,
|
Size: 0,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -77,6 +81,7 @@ func TestFileRepositoryEdgeCases(t *testing.T) {
|
|||||||
file: &File{
|
file: &File{
|
||||||
Path: "/link",
|
Path: "/link",
|
||||||
MTime: time.Now(),
|
MTime: time.Now(),
|
||||||
|
CTime: time.Now(),
|
||||||
Size: 0,
|
Size: 0,
|
||||||
Mode: 0777 | 0120000, // symlink mode
|
Mode: 0777 | 0120000, // symlink mode
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -118,6 +123,7 @@ func TestDuplicateHandling(t *testing.T) {
|
|||||||
file1 := &File{
|
file1 := &File{
|
||||||
Path: "/duplicate.txt",
|
Path: "/duplicate.txt",
|
||||||
MTime: time.Now(),
|
MTime: time.Now(),
|
||||||
|
CTime: time.Now(),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -126,6 +132,7 @@ func TestDuplicateHandling(t *testing.T) {
|
|||||||
file2 := &File{
|
file2 := &File{
|
||||||
Path: "/duplicate.txt", // Same path
|
Path: "/duplicate.txt", // Same path
|
||||||
MTime: time.Now().Add(time.Hour),
|
MTime: time.Now().Add(time.Hour),
|
||||||
|
CTime: time.Now().Add(time.Hour),
|
||||||
Size: 2048,
|
Size: 2048,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -185,6 +192,7 @@ func TestDuplicateHandling(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/test-dup-fc.txt",
|
Path: "/test-dup-fc.txt",
|
||||||
MTime: time.Now(),
|
MTime: time.Now(),
|
||||||
|
CTime: time.Now(),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -236,6 +244,7 @@ func TestNullHandling(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/regular.txt",
|
Path: "/regular.txt",
|
||||||
MTime: time.Now(),
|
MTime: time.Now(),
|
||||||
|
CTime: time.Now(),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -340,6 +349,7 @@ func TestLargeDatasets(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: types.FilePath(fmt.Sprintf("/large/file%05d.txt", i)),
|
Path: types.FilePath(fmt.Sprintf("/large/file%05d.txt", i)),
|
||||||
MTime: time.Now(),
|
MTime: time.Now(),
|
||||||
|
CTime: time.Now(),
|
||||||
Size: int64(i * 1024),
|
Size: int64(i * 1024),
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: uint32(1000 + (i % 10)),
|
UID: uint32(1000 + (i % 10)),
|
||||||
@@ -464,6 +474,7 @@ func TestQueryInjection(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: types.FilePath(injection),
|
Path: types.FilePath(injection),
|
||||||
MTime: time.Now(),
|
MTime: time.Now(),
|
||||||
|
CTime: time.Now(),
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
@@ -502,6 +513,7 @@ func TestTimezoneHandling(t *testing.T) {
|
|||||||
file := &File{
|
file := &File{
|
||||||
Path: "/timezone-test.txt",
|
Path: "/timezone-test.txt",
|
||||||
MTime: nyTime,
|
MTime: nyTime,
|
||||||
|
CTime: nyTime,
|
||||||
Size: 1024,
|
Size: 1024,
|
||||||
Mode: 0644,
|
Mode: 0644,
|
||||||
UID: 1000,
|
UID: 1000,
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ CREATE TABLE IF NOT EXISTS files (
|
|||||||
path TEXT NOT NULL UNIQUE,
|
path TEXT NOT NULL UNIQUE,
|
||||||
source_path TEXT NOT NULL DEFAULT '', -- The source directory this file came from (for restore path stripping)
|
source_path TEXT NOT NULL DEFAULT '', -- The source directory this file came from (for restore path stripping)
|
||||||
mtime INTEGER NOT NULL,
|
mtime INTEGER NOT NULL,
|
||||||
|
ctime INTEGER NOT NULL,
|
||||||
size INTEGER NOT NULL,
|
size INTEGER NOT NULL,
|
||||||
mode INTEGER NOT NULL,
|
mode INTEGER NOT NULL,
|
||||||
uid INTEGER NOT NULL,
|
uid INTEGER NOT NULL,
|
||||||
@@ -102,7 +103,7 @@ CREATE TABLE IF NOT EXISTS snapshot_files (
|
|||||||
file_id TEXT NOT NULL,
|
file_id TEXT NOT NULL,
|
||||||
PRIMARY KEY (snapshot_id, file_id),
|
PRIMARY KEY (snapshot_id, file_id),
|
||||||
FOREIGN KEY (snapshot_id) REFERENCES snapshots(id) ON DELETE CASCADE,
|
FOREIGN KEY (snapshot_id) REFERENCES snapshots(id) ON DELETE CASCADE,
|
||||||
FOREIGN KEY (file_id) REFERENCES files(id) ON DELETE CASCADE
|
FOREIGN KEY (file_id) REFERENCES files(id)
|
||||||
);
|
);
|
||||||
|
|
||||||
-- Index for efficient file lookups (used in orphan detection)
|
-- Index for efficient file lookups (used in orphan detection)
|
||||||
@@ -115,7 +116,7 @@ CREATE TABLE IF NOT EXISTS snapshot_blobs (
|
|||||||
blob_hash TEXT NOT NULL,
|
blob_hash TEXT NOT NULL,
|
||||||
PRIMARY KEY (snapshot_id, blob_id),
|
PRIMARY KEY (snapshot_id, blob_id),
|
||||||
FOREIGN KEY (snapshot_id) REFERENCES snapshots(id) ON DELETE CASCADE,
|
FOREIGN KEY (snapshot_id) REFERENCES snapshots(id) ON DELETE CASCADE,
|
||||||
FOREIGN KEY (blob_id) REFERENCES blobs(id) ON DELETE CASCADE
|
FOREIGN KEY (blob_id) REFERENCES blobs(id)
|
||||||
);
|
);
|
||||||
|
|
||||||
-- Index for efficient blob lookups (used in orphan detection)
|
-- Index for efficient blob lookups (used in orphan detection)
|
||||||
@@ -129,7 +130,7 @@ CREATE TABLE IF NOT EXISTS uploads (
|
|||||||
size INTEGER NOT NULL,
|
size INTEGER NOT NULL,
|
||||||
duration_ms INTEGER NOT NULL,
|
duration_ms INTEGER NOT NULL,
|
||||||
FOREIGN KEY (blob_hash) REFERENCES blobs(blob_hash),
|
FOREIGN KEY (blob_hash) REFERENCES blobs(blob_hash),
|
||||||
FOREIGN KEY (snapshot_id) REFERENCES snapshots(id) ON DELETE CASCADE
|
FOREIGN KEY (snapshot_id) REFERENCES snapshots(id)
|
||||||
);
|
);
|
||||||
|
|
||||||
-- Index for efficient snapshot lookups
|
-- Index for efficient snapshot lookups
|
||||||
|
|||||||
@@ -345,6 +345,7 @@ func (b *BackupEngine) Backup(ctx context.Context, fsys fs.FS, root string) (str
|
|||||||
Size: info.Size(),
|
Size: info.Size(),
|
||||||
Mode: uint32(info.Mode()),
|
Mode: uint32(info.Mode()),
|
||||||
MTime: info.ModTime(),
|
MTime: info.ModTime(),
|
||||||
|
CTime: info.ModTime(), // Use mtime as ctime for test
|
||||||
UID: 1000, // Default UID for test
|
UID: 1000, // Default UID for test
|
||||||
GID: 1000, // Default GID for test
|
GID: 1000, // Default GID for test
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -785,6 +785,7 @@ func (s *Scanner) checkFileInMemory(path string, info os.FileInfo, knownFiles ma
|
|||||||
Path: types.FilePath(path),
|
Path: types.FilePath(path),
|
||||||
SourcePath: types.SourcePath(s.currentSourcePath), // Store source directory for restore path stripping
|
SourcePath: types.SourcePath(s.currentSourcePath), // Store source directory for restore path stripping
|
||||||
MTime: info.ModTime(),
|
MTime: info.ModTime(),
|
||||||
|
CTime: info.ModTime(), // afero doesn't provide ctime
|
||||||
Size: info.Size(),
|
Size: info.Size(),
|
||||||
Mode: uint32(info.Mode()),
|
Mode: uint32(info.Mode()),
|
||||||
UID: uid,
|
UID: uid,
|
||||||
|
|||||||
@@ -1,93 +0,0 @@
|
|||||||
package vaultik
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"filippo.io/age"
|
|
||||||
"git.eeqj.de/sneak/vaultik/internal/blobgen"
|
|
||||||
)
|
|
||||||
|
|
||||||
// hashVerifyReader wraps a blobgen.Reader and verifies the double-SHA-256 hash
|
|
||||||
// of decrypted plaintext when Close is called. It reuses the hash that
|
|
||||||
// blobgen.Reader already computes internally via its TeeReader, avoiding
|
|
||||||
// redundant SHA-256 computation.
|
|
||||||
type hashVerifyReader struct {
|
|
||||||
reader *blobgen.Reader // underlying decrypted blob reader (has internal hasher)
|
|
||||||
fetcher io.ReadCloser // raw fetched stream (closed on Close)
|
|
||||||
blobHash string // expected double-SHA-256 hex
|
|
||||||
done bool // EOF reached
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *hashVerifyReader) Read(p []byte) (int, error) {
|
|
||||||
n, err := h.reader.Read(p)
|
|
||||||
if err == io.EOF {
|
|
||||||
h.done = true
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close verifies the hash (if the stream was fully read) and closes underlying readers.
|
|
||||||
func (h *hashVerifyReader) Close() error {
|
|
||||||
readerErr := h.reader.Close()
|
|
||||||
fetcherErr := h.fetcher.Close()
|
|
||||||
|
|
||||||
if h.done {
|
|
||||||
firstHash := h.reader.Sum256()
|
|
||||||
secondHasher := sha256.New()
|
|
||||||
secondHasher.Write(firstHash)
|
|
||||||
actualHashHex := hex.EncodeToString(secondHasher.Sum(nil))
|
|
||||||
if actualHashHex != h.blobHash {
|
|
||||||
return fmt.Errorf("blob hash mismatch: expected %s, got %s", h.blobHash[:16], actualHashHex[:16])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if readerErr != nil {
|
|
||||||
return readerErr
|
|
||||||
}
|
|
||||||
return fetcherErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchAndDecryptBlob downloads a blob, decrypts and decompresses it, and
|
|
||||||
// returns a streaming reader that computes the double-SHA-256 hash on the fly.
|
|
||||||
// The hash is verified when the returned reader is closed (after fully reading).
|
|
||||||
// This avoids buffering the entire blob in memory.
|
|
||||||
func (v *Vaultik) FetchAndDecryptBlob(ctx context.Context, blobHash string, expectedSize int64, identity age.Identity) (io.ReadCloser, error) {
|
|
||||||
rc, _, err := v.FetchBlob(ctx, blobHash, expectedSize)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
reader, err := blobgen.NewReader(rc, identity)
|
|
||||||
if err != nil {
|
|
||||||
_ = rc.Close()
|
|
||||||
return nil, fmt.Errorf("creating blob reader: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &hashVerifyReader{
|
|
||||||
reader: reader,
|
|
||||||
fetcher: rc,
|
|
||||||
blobHash: blobHash,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchBlob downloads a blob and returns a reader for the encrypted data.
|
|
||||||
func (v *Vaultik) FetchBlob(ctx context.Context, blobHash string, expectedSize int64) (io.ReadCloser, int64, error) {
|
|
||||||
blobPath := fmt.Sprintf("blobs/%s/%s/%s", blobHash[:2], blobHash[2:4], blobHash)
|
|
||||||
|
|
||||||
rc, err := v.Storage.Get(ctx, blobPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, fmt.Errorf("downloading blob %s: %w", blobHash[:16], err)
|
|
||||||
}
|
|
||||||
|
|
||||||
info, err := v.Storage.Stat(ctx, blobPath)
|
|
||||||
if err != nil {
|
|
||||||
_ = rc.Close()
|
|
||||||
return nil, 0, fmt.Errorf("stat blob %s: %w", blobHash[:16], err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return rc, info.Size, nil
|
|
||||||
}
|
|
||||||
@@ -1,100 +0,0 @@
|
|||||||
package vaultik_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"filippo.io/age"
|
|
||||||
"git.eeqj.de/sneak/vaultik/internal/blobgen"
|
|
||||||
"git.eeqj.de/sneak/vaultik/internal/vaultik"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestFetchAndDecryptBlobVerifiesHash verifies that FetchAndDecryptBlob checks
|
|
||||||
// the double-SHA-256 hash of the decrypted plaintext against the expected blob hash.
|
|
||||||
func TestFetchAndDecryptBlobVerifiesHash(t *testing.T) {
|
|
||||||
identity, err := age.GenerateX25519Identity()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("generating identity: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create test data and encrypt it using blobgen.Writer
|
|
||||||
plaintext := []byte("hello world test data for blob hash verification")
|
|
||||||
var encBuf bytes.Buffer
|
|
||||||
writer, err := blobgen.NewWriter(&encBuf, 1, []string{identity.Recipient().String()})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("creating blobgen writer: %v", err)
|
|
||||||
}
|
|
||||||
if _, err := writer.Write(plaintext); err != nil {
|
|
||||||
t.Fatalf("writing plaintext: %v", err)
|
|
||||||
}
|
|
||||||
if err := writer.Close(); err != nil {
|
|
||||||
t.Fatalf("closing writer: %v", err)
|
|
||||||
}
|
|
||||||
encryptedData := encBuf.Bytes()
|
|
||||||
|
|
||||||
// Compute correct double-SHA-256 hash of the plaintext (matches blobgen.Writer.Sum256)
|
|
||||||
firstHash := sha256.Sum256(plaintext)
|
|
||||||
secondHash := sha256.Sum256(firstHash[:])
|
|
||||||
correctHash := hex.EncodeToString(secondHash[:])
|
|
||||||
|
|
||||||
// Verify our hash matches what blobgen.Writer produces
|
|
||||||
writerHash := hex.EncodeToString(writer.Sum256())
|
|
||||||
if correctHash != writerHash {
|
|
||||||
t.Fatalf("hash computation mismatch: manual=%s, writer=%s", correctHash, writerHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set up mock storage with the blob at the correct path
|
|
||||||
mockStorage := NewMockStorer()
|
|
||||||
blobPath := "blobs/" + correctHash[:2] + "/" + correctHash[2:4] + "/" + correctHash
|
|
||||||
mockStorage.mu.Lock()
|
|
||||||
mockStorage.data[blobPath] = encryptedData
|
|
||||||
mockStorage.mu.Unlock()
|
|
||||||
|
|
||||||
tv := vaultik.NewForTesting(mockStorage)
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
t.Run("correct hash succeeds", func(t *testing.T) {
|
|
||||||
rc, err := tv.FetchAndDecryptBlob(ctx, correctHash, int64(len(encryptedData)), identity)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected success, got error: %v", err)
|
|
||||||
}
|
|
||||||
data, err := io.ReadAll(rc)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("reading stream: %v", err)
|
|
||||||
}
|
|
||||||
if err := rc.Close(); err != nil {
|
|
||||||
t.Fatalf("close (hash verification) failed: %v", err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(data, plaintext) {
|
|
||||||
t.Fatalf("decrypted data mismatch: got %q, want %q", data, plaintext)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("wrong hash fails", func(t *testing.T) {
|
|
||||||
// Use a fake hash that doesn't match the actual plaintext
|
|
||||||
fakeHash := strings.Repeat("ab", 32) // 64 hex chars
|
|
||||||
fakePath := "blobs/" + fakeHash[:2] + "/" + fakeHash[2:4] + "/" + fakeHash
|
|
||||||
mockStorage.mu.Lock()
|
|
||||||
mockStorage.data[fakePath] = encryptedData
|
|
||||||
mockStorage.mu.Unlock()
|
|
||||||
|
|
||||||
rc, err := tv.FetchAndDecryptBlob(ctx, fakeHash, int64(len(encryptedData)), identity)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error opening stream: %v", err)
|
|
||||||
}
|
|
||||||
// Read all data — hash is verified on Close
|
|
||||||
_, _ = io.ReadAll(rc)
|
|
||||||
err = rc.Close()
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected error for mismatched hash, got nil")
|
|
||||||
}
|
|
||||||
if !strings.Contains(err.Error(), "hash mismatch") {
|
|
||||||
t.Fatalf("expected hash mismatch error, got: %v", err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
55
internal/vaultik/blob_fetch_stub.go
Normal file
55
internal/vaultik/blob_fetch_stub.go
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
package vaultik
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"filippo.io/age"
|
||||||
|
"git.eeqj.de/sneak/vaultik/internal/blobgen"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FetchAndDecryptBlobResult holds the result of fetching and decrypting a blob.
|
||||||
|
type FetchAndDecryptBlobResult struct {
|
||||||
|
Data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchAndDecryptBlob downloads a blob, decrypts it, and returns the plaintext data.
|
||||||
|
func (v *Vaultik) FetchAndDecryptBlob(ctx context.Context, blobHash string, expectedSize int64, identity age.Identity) (*FetchAndDecryptBlobResult, error) {
|
||||||
|
rc, _, err := v.FetchBlob(ctx, blobHash, expectedSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() { _ = rc.Close() }()
|
||||||
|
|
||||||
|
reader, err := blobgen.NewReader(rc, identity)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("creating blob reader: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = reader.Close() }()
|
||||||
|
|
||||||
|
data, err := io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("reading blob data: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &FetchAndDecryptBlobResult{Data: data}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchBlob downloads a blob and returns a reader for the encrypted data.
|
||||||
|
func (v *Vaultik) FetchBlob(ctx context.Context, blobHash string, expectedSize int64) (io.ReadCloser, int64, error) {
|
||||||
|
blobPath := fmt.Sprintf("blobs/%s/%s/%s", blobHash[:2], blobHash[2:4], blobHash)
|
||||||
|
|
||||||
|
rc, err := v.Storage.Get(ctx, blobPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("downloading blob %s: %w", blobHash[:16], err)
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := v.Storage.Stat(ctx, blobPath)
|
||||||
|
if err != nil {
|
||||||
|
_ = rc.Close()
|
||||||
|
return nil, 0, fmt.Errorf("stat blob %s: %w", blobHash[:16], err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc, info.Size, nil
|
||||||
|
}
|
||||||
@@ -558,23 +558,11 @@ func (v *Vaultik) restoreRegularFile(
|
|||||||
|
|
||||||
// downloadBlob downloads and decrypts a blob
|
// downloadBlob downloads and decrypts a blob
|
||||||
func (v *Vaultik) downloadBlob(ctx context.Context, blobHash string, expectedSize int64, identity age.Identity) ([]byte, error) {
|
func (v *Vaultik) downloadBlob(ctx context.Context, blobHash string, expectedSize int64, identity age.Identity) ([]byte, error) {
|
||||||
rc, err := v.FetchAndDecryptBlob(ctx, blobHash, expectedSize, identity)
|
result, err := v.FetchAndDecryptBlob(ctx, blobHash, expectedSize, identity)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
return result.Data, nil
|
||||||
data, err := io.ReadAll(rc)
|
|
||||||
if err != nil {
|
|
||||||
_ = rc.Close()
|
|
||||||
return nil, fmt.Errorf("reading blob data: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close triggers hash verification
|
|
||||||
if err := rc.Close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyRestoredFiles verifies that all restored files match their expected chunk hashes
|
// verifyRestoredFiles verifies that all restored files match their expected chunk hashes
|
||||||
|
|||||||
@@ -419,7 +419,7 @@ func (v *Vaultik) listRemoteSnapshotIDs() (map[string]bool, error) {
|
|||||||
return remoteSnapshots, nil
|
return remoteSnapshots, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// reconcileLocalWithRemote builds a map of local snapshots keyed by ID for cross-referencing with remote
|
// reconcileLocalWithRemote removes local snapshots not in remote and returns the surviving local map
|
||||||
func (v *Vaultik) reconcileLocalWithRemote(remoteSnapshots map[string]bool) (map[string]*database.Snapshot, error) {
|
func (v *Vaultik) reconcileLocalWithRemote(remoteSnapshots map[string]bool) (map[string]*database.Snapshot, error) {
|
||||||
localSnapshots, err := v.Repositories.Snapshots.ListRecent(v.ctx, 10000)
|
localSnapshots, err := v.Repositories.Snapshots.ListRecent(v.ctx, 10000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -431,6 +431,19 @@ func (v *Vaultik) reconcileLocalWithRemote(remoteSnapshots map[string]bool) (map
|
|||||||
localSnapshotMap[s.ID.String()] = s
|
localSnapshotMap[s.ID.String()] = s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, snap := range localSnapshots {
|
||||||
|
snapshotIDStr := snap.ID.String()
|
||||||
|
if !remoteSnapshots[snapshotIDStr] {
|
||||||
|
log.Info("Removing local snapshot not found in remote", "snapshot_id", snap.ID)
|
||||||
|
if err := v.deleteSnapshotFromLocalDB(snapshotIDStr); err != nil {
|
||||||
|
log.Error("Failed to delete local snapshot", "snapshot_id", snap.ID, "error", err)
|
||||||
|
} else {
|
||||||
|
log.Info("Deleted local snapshot not found in remote", "snapshot_id", snap.ID)
|
||||||
|
delete(localSnapshotMap, snapshotIDStr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return localSnapshotMap, nil
|
return localSnapshotMap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -859,7 +872,7 @@ func (v *Vaultik) syncWithRemote() error {
|
|||||||
snapshotIDStr := snapshot.ID.String()
|
snapshotIDStr := snapshot.ID.String()
|
||||||
if !remoteSnapshots[snapshotIDStr] {
|
if !remoteSnapshots[snapshotIDStr] {
|
||||||
log.Info("Removing local snapshot not found in remote", "snapshot_id", snapshot.ID)
|
log.Info("Removing local snapshot not found in remote", "snapshot_id", snapshot.ID)
|
||||||
if err := v.deleteSnapshotFromLocalDB(snapshotIDStr); err != nil {
|
if err := v.Repositories.Snapshots.Delete(v.ctx, snapshotIDStr); err != nil {
|
||||||
log.Error("Failed to delete local snapshot", "snapshot_id", snapshot.ID, "error", err)
|
log.Error("Failed to delete local snapshot", "snapshot_id", snapshot.ID, "error", err)
|
||||||
} else {
|
} else {
|
||||||
removedCount++
|
removedCount++
|
||||||
|
|||||||
Reference in New Issue
Block a user