Compare commits
5 Commits
fix/issue-
...
b8e544bca8
| Author | SHA1 | Date | |
|---|---|---|---|
| b8e544bca8 | |||
| 5c76824e14 | |||
| 4ac80cfcec | |||
| 6ba6eb876b | |||
| 591db8d541 |
@@ -1,3 +0,0 @@
|
|||||||
*.tmp
|
|
||||||
*.dockerimage
|
|
||||||
.git
|
|
||||||
11
.drone.yml
11
.drone.yml
@@ -7,17 +7,8 @@ steps:
|
|||||||
network_mode: bridge
|
network_mode: bridge
|
||||||
settings:
|
settings:
|
||||||
repo: sneak/mfer
|
repo: sneak/mfer
|
||||||
build_args_from_env: [ DRONE_COMMIT_SHA ]
|
|
||||||
dry_run: true
|
dry_run: true
|
||||||
custom_dns: [ 116.202.204.30 ]
|
custom_dns: [ 116.202.204.30 ]
|
||||||
tags:
|
tags:
|
||||||
- ${DRONE_COMMIT_SHA:0:7}
|
- ${DRONE_COMMIT_SHA}
|
||||||
- ${DRONE_BRANCH}
|
- ${DRONE_BRANCH}
|
||||||
- latest
|
|
||||||
- name: notify
|
|
||||||
image: plugins/slack
|
|
||||||
settings:
|
|
||||||
webhook:
|
|
||||||
from_secret: SLACK_WEBHOOK_URL
|
|
||||||
when:
|
|
||||||
event: pull_request
|
|
||||||
|
|||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,5 +1,2 @@
|
|||||||
/bin/
|
mfer/*.pb.go
|
||||||
/tmp
|
/mfer.cmd
|
||||||
*.tmp
|
|
||||||
*.dockerimage
|
|
||||||
/vendor
|
|
||||||
|
|||||||
20
CLAUDE.md
20
CLAUDE.md
@@ -1,20 +0,0 @@
|
|||||||
# Important Rules
|
|
||||||
|
|
||||||
- when fixing a bug, write a failing test FIRST. only after the test fails, write
|
|
||||||
the code to fix the bug. then ensure the test passes. leave the test in
|
|
||||||
place and commit it with the bugfix. don't run shell commands to test
|
|
||||||
bugfixes or reproduce bugs. write tests!
|
|
||||||
|
|
||||||
- never, ever mention claude or anthropic in commit messages. do not use attribution
|
|
||||||
|
|
||||||
- after each change, run "make fmt".
|
|
||||||
|
|
||||||
- after each change, run "make test" and ensure all tests pass.
|
|
||||||
|
|
||||||
- after each change, run "make lint" and ensure no linting errors. fix any
|
|
||||||
you find, one by one.
|
|
||||||
|
|
||||||
- after each change, commit the files you've changed. push after
|
|
||||||
committing.
|
|
||||||
|
|
||||||
- NEVER use `git add -A`. always add only individual files that you've changed.
|
|
||||||
73
Dockerfile
73
Dockerfile
@@ -1,37 +1,42 @@
|
|||||||
################################################################################
|
## lint image
|
||||||
#2345678911234567892123456789312345678941234567895123456789612345678971234567898
|
## current as of 2022-01-25
|
||||||
################################################################################
|
FROM golangci/golangci-lint@sha256:d16ef91da7e10f3df45c36876543326abbc4c16aaab6548549560b9f52e9e831 AS linter
|
||||||
FROM sneak/builder:2022-12-08 AS builder
|
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
RUN mkdir -p /build
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
COPY ./Makefile ./.golangci.yml ./go.mod ./go.sum /build/
|
COPY ./ ./
|
||||||
COPY ./vendor.tzst /build/vendor.tzst
|
RUN golangci-lint run
|
||||||
COPY ./modcache.tzst /build/modcache.tzst
|
|
||||||
COPY ./internal ./internal
|
## build image:
|
||||||
COPY ./bin/gitrev.sh ./bin/gitrev.sh
|
# this is golang:1.17.6-buster as of 2022-01-27
|
||||||
COPY ./mfer ./mfer
|
FROM golang@sha256:52a48e0239f4d645b20ac268a60361703afe7feb2df5697fa89f72052cb87a3e AS builder
|
||||||
COPY ./cmd ./cmd
|
#FROM golang:1.16-buster AS builder
|
||||||
ARG GITREV unknown
|
|
||||||
ARG DRONE_COMMIT_SHA unknown
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
|
RUN apt update && apt install -y make bzip2 curl unzip
|
||||||
|
|
||||||
|
# install newer protoc than what comes with buster
|
||||||
|
ENV PB_REL https://github.com/protocolbuffers/protobuf/releases
|
||||||
|
RUN curl -LO $PB_REL/download/v3.19.0/protoc-3.19.0-linux-x86_64.zip && \
|
||||||
|
unzip protoc-3.19.0-linux-x86_64.zip -d /usr/local
|
||||||
|
|
||||||
|
RUN mkdir -p /build
|
||||||
|
WORKDIR /build
|
||||||
|
|
||||||
|
COPY go.mod .
|
||||||
|
COPY go.sum .
|
||||||
|
|
||||||
|
COPY ./ ./
|
||||||
|
# don't lint again during build because there's no golangci-lint in this
|
||||||
|
# image and we already did it in a previous stage
|
||||||
|
#RUN make lint
|
||||||
|
RUN make mfer
|
||||||
|
#RUN go mod vendor
|
||||||
|
RUN tar -c . | bzip2 > /src.tbz2
|
||||||
|
|
||||||
|
|
||||||
RUN mkdir -p "$(go env GOMODCACHE)" && cd "$(go env GOMODCACHE)" && \
|
|
||||||
zstdmt -d --stdout /build/modcache.tzst | tar xf - && \
|
|
||||||
rm /build/modcache.tzst && cd /build
|
|
||||||
RUN \
|
|
||||||
cd mfer && go generate . && cd .. && \
|
|
||||||
GOPACKAGESDEBUG=true golangci-lint run ./... && \
|
|
||||||
mkdir vendor && cd vendor && \
|
|
||||||
zstdmt -d --stdout /build/vendor.tzst | tar xf - && rm /build/vendor.tzst && \
|
|
||||||
cd .. && \
|
|
||||||
make mfer.cmd
|
|
||||||
RUN rm -rf /build/vendor && go mod vendor && tar -c . | zstdmt -19 > /src.tzst
|
|
||||||
################################################################################
|
|
||||||
#2345678911234567892123456789312345678941234567895123456789612345678971234567898
|
|
||||||
################################################################################
|
|
||||||
## final image
|
|
||||||
################################################################################
|
|
||||||
FROM scratch
|
FROM scratch
|
||||||
# we put all the source into the final image for posterity, it's small
|
COPY --from=builder /src.tbz2 /src.tbz2
|
||||||
COPY --from=builder /src.tzst /src.tzst
|
COPY --from=builder /build/mfer /mfer
|
||||||
COPY --from=builder /build/mfer.cmd /mfer
|
|
||||||
ENTRYPOINT ["/mfer"]
|
ENTRYPOINT ["/mfer"]
|
||||||
|
|
||||||
|
|||||||
70
Makefile
70
Makefile
@@ -1,81 +1,43 @@
|
|||||||
export DOCKER_BUILDKIT := 1
|
|
||||||
export PROGRESS_NO_TRUNC := 1
|
|
||||||
GOPATH := $(shell go env GOPATH)
|
GOPATH := $(shell go env GOPATH)
|
||||||
export PATH := $(PATH):$(GOPATH)/bin
|
export PATH := $(PATH):$(GOPATH)/bin
|
||||||
PROTOC_GEN_GO := $(GOPATH)/bin/protoc-gen-go
|
PROTOC_GEN_GO := $(GOPATH)/bin/protoc-gen-go
|
||||||
SOURCEFILES := mfer/*.go mfer/*.proto internal/*/*.go cmd/*/*.go go.mod go.sum
|
|
||||||
ARCH := $(shell uname -m)
|
ARCH := $(shell uname -m)
|
||||||
GITREV_BUILD := $(shell bash $(PWD)/bin/gitrev.sh)
|
VERSION := $(shell git describe --always --dirty=-dirty)
|
||||||
APPNAME := mfer
|
|
||||||
VERSION := 0.1.0
|
|
||||||
export DOCKER_IMAGE_CACHE_DIR := $(HOME)/Library/Caches/Docker/$(APPNAME)-$(ARCH)
|
|
||||||
GOLDFLAGS += -X main.Version=$(VERSION)
|
GOLDFLAGS += -X main.Version=$(VERSION)
|
||||||
GOLDFLAGS += -X main.Gitrev=$(GITREV_BUILD)
|
|
||||||
GOFLAGS := -ldflags "$(GOLDFLAGS)"
|
GOFLAGS := -ldflags "$(GOLDFLAGS)"
|
||||||
|
|
||||||
.PHONY: docker default run ci test fixme
|
default: run
|
||||||
|
|
||||||
default: fmt test
|
run: ./mfer.cmd
|
||||||
|
|
||||||
run: ./bin/mfer
|
|
||||||
./$<
|
./$<
|
||||||
./$< gen
|
./$< gen
|
||||||
|
|
||||||
ci: test
|
|
||||||
|
|
||||||
test: $(SOURCEFILES) mfer/mf.pb.go
|
|
||||||
go test -v --timeout 10s ./...
|
|
||||||
|
|
||||||
$(PROTOC_GEN_GO):
|
$(PROTOC_GEN_GO):
|
||||||
test -e $(PROTOC_GEN_GO) || go install -v google.golang.org/protobuf/cmd/protoc-gen-go@v1.28.1
|
test -e $(PROTOC_GEN_GO) || go install -v google.golang.org/protobuf/cmd/protoc-gen-go@latest
|
||||||
|
|
||||||
fixme:
|
|
||||||
@grep -nir fixme . | grep -v Makefile
|
|
||||||
|
|
||||||
devprereqs:
|
devprereqs:
|
||||||
|
which gofumpt || go install -v mvdan.cc/gofumpt@latest
|
||||||
which golangci-lint || go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
which golangci-lint || go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||||
|
|
||||||
mfer/mf.pb.go: mfer/mf.proto
|
mfer.cmd: $(PROTOC_GEN_GO) mfer/*.go internal/*/*.go cmd/*/*.go
|
||||||
cd mfer && go generate .
|
|
||||||
|
|
||||||
bin/mfer: $(SOURCEFILES) mfer/mf.pb.go
|
|
||||||
protoc --version
|
protoc --version
|
||||||
cd cmd/mfer && go build -tags urfave_cli_no_docs -o ../../bin/mfer $(GOFLAGS) .
|
cd mfer && go generate .
|
||||||
|
cd cmd/mfer && go build -o ../../mfer.cmd $(GOFLAGS) .
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rfv mfer/*.pb.go bin/mfer cmd/mfer/mfer *.dockerimage
|
rm -rfv mfer/*.pb.go ./mfer
|
||||||
|
|
||||||
fmt: mfer/mf.pb.go
|
fmt: prereqs
|
||||||
gofumpt -l -w mfer internal cmd
|
gofumpt -l -w mfer internal cmd
|
||||||
golangci-lint run --fix
|
golangci-lint run --fix
|
||||||
-prettier -w *.json
|
prettier -w *.json *.md
|
||||||
-prettier -w *.md
|
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
golangci-lint run
|
golangci-lint run
|
||||||
sh -c 'test -z "$$(gofmt -l .)"'
|
sh -c 'test -z "$$(gofmt -l .)"'
|
||||||
|
|
||||||
docker: sneak-mfer.$(ARCH).tzst.dockerimage
|
dockerbuild:
|
||||||
|
docker build .
|
||||||
sneak-mfer.$(ARCH).tzst.dockerimage: $(SOURCEFILES) vendor.tzst modcache.tzst
|
|
||||||
docker build --progress plain --build-arg GITREV=$(GITREV_BUILD) -t sneak/mfer .
|
|
||||||
docker save sneak/mfer | pv | zstdmt -19 > $@
|
|
||||||
du -sh $@
|
|
||||||
|
|
||||||
godoc:
|
|
||||||
open http://127.0.0.1:6060
|
|
||||||
godoc -http=:6060
|
|
||||||
|
|
||||||
vendor.tzst: go.mod go.sum
|
|
||||||
go mod tidy
|
|
||||||
go mod vendor
|
|
||||||
cd vendor && tar -c . | pv | zstdmt -19 > $(PWD)/$@.tmp
|
|
||||||
rm -rf vendor
|
|
||||||
mv $@.tmp $@
|
|
||||||
|
|
||||||
modcache.tzst: go.mod go.sum
|
|
||||||
go mod tidy
|
|
||||||
cd $(HOME)/go/pkg && chmod -R u+rw . && rm -rf mod sumdb
|
|
||||||
go mod download -x
|
|
||||||
cd $(shell go env GOMODCACHE) && tar -c . | pv | zstdmt -19 > $(PWD)/$@.tmp
|
|
||||||
mv $@.tmp $@
|
|
||||||
|
|||||||
89
README.md
89
README.md
@@ -1,76 +1,11 @@
|
|||||||
# mfer
|
# mfer
|
||||||
|
|
||||||
[mfer](https://git.eeqj.de/sneak/mfer) is a reference implementation library
|
Manifest file generator and checker.
|
||||||
and thin wrapper command-line utility written in [Go](https://golang.org)
|
|
||||||
and first published in 2022 under the [WTFPL](https://wtfpl.net) (public
|
|
||||||
domain) license. It specifies and generates `.mf` manifest files over a
|
|
||||||
directory tree of files to encapsulate metadata about them (such as
|
|
||||||
cryptographic checksums or signatures over same) to aid in archiving,
|
|
||||||
downloading, and streaming, or mirroring. The manifest files' data is
|
|
||||||
serialized with Google's [protobuf serialization
|
|
||||||
format](https://developers.google.com/protocol-buffers). The structure of
|
|
||||||
these files can be found [in the format
|
|
||||||
specification](https://git.eeqj.de/sneak/mfer/src/branch/main/mfer/mf.proto)
|
|
||||||
which is included in the [project
|
|
||||||
repository](https://git.eeqj.de/sneak/mfer).
|
|
||||||
|
|
||||||
The current version is pre-1.0 and while the repo was published in 2022,
|
|
||||||
there has not yet been any versioned release. [SemVer](https://semver.org)
|
|
||||||
will be used for releases.
|
|
||||||
|
|
||||||
This project was started by [@sneak](https://sneak.berlin) to scratch an
|
|
||||||
itch in 2022 and is currently a one-person effort, though the goal is for
|
|
||||||
this to emerge as a de-facto standard and be incorporated into other
|
|
||||||
software. A compatible javascript library is planned.
|
|
||||||
|
|
||||||
# Phases
|
|
||||||
|
|
||||||
Manifest generation happens in two distinct phases:
|
|
||||||
|
|
||||||
## Phase 1: Enumeration
|
|
||||||
|
|
||||||
Walking directories and calling `stat()` on files to collect metadata (path, size, mtime, ctime). This builds the list of files to be scanned. Relatively fast as it only reads filesystem metadata, not file contents.
|
|
||||||
|
|
||||||
**Progress:** `EnumerateStatus` with `FilesFound` and `BytesFound`
|
|
||||||
|
|
||||||
## Phase 2: Scan (ToManifest)
|
|
||||||
|
|
||||||
Reading file contents and computing cryptographic hashes for manifest generation. This is the expensive phase that reads all file data from disk.
|
|
||||||
|
|
||||||
**Progress:** `ScanStatus` with `TotalFiles`, `ScannedFiles`, `TotalBytes`, `ScannedBytes`, `BytesPerSec`
|
|
||||||
|
|
||||||
# Code Conventions
|
|
||||||
|
|
||||||
- **Logging:** Never use `fmt.Printf` or write to stdout/stderr directly in normal code. Use the `internal/log` package for all output (`log.Info`, `log.Infof`, `log.Debug`, `log.Debugf`, `log.Progressf`, `log.ProgressDone`).
|
|
||||||
- **Filesystem abstraction:** Use `github.com/spf13/afero` for filesystem operations to enable testing and flexibility.
|
|
||||||
- **CLI framework:** Use `github.com/urfave/cli/v2` for command-line interface.
|
|
||||||
- **Serialization:** Use Protocol Buffers for manifest file format.
|
|
||||||
- **Internal packages:** Non-exported implementation details go in `internal/` subdirectories.
|
|
||||||
- **Concurrency:** Use `sync.RWMutex` for protecting shared state; prefer channels for progress reporting.
|
|
||||||
- **Progress channels:** Use buffered channels (size 1) with non-blocking sends to avoid blocking the main operation if the consumer is slow.
|
|
||||||
- **Context support:** Long-running operations should accept `context.Context` for cancellation.
|
|
||||||
- **NO_COLOR:** Respect the `NO_COLOR` environment variable for disabling colored output.
|
|
||||||
- **Options pattern:** Use `NewWithOptions(opts *Options)` constructor pattern for configurable types.
|
|
||||||
|
|
||||||
# Build Status
|
# Build Status
|
||||||
|
|
||||||
[](https://drone.datavi.be/sneak/mfer)
|
[](https://drone.datavi.be/sneak/mfer)
|
||||||
|
|
||||||
# Participation
|
|
||||||
|
|
||||||
The community is as yet nonexistent so there are no defined policies or
|
|
||||||
norms yet. Primary development happens on a privately-run Gitea instance at
|
|
||||||
[https://git.eeqj.de/sneak/mfer](https://git.eeqj.de/sneak/mfer) and issues
|
|
||||||
are [tracked there](https://git.eeqj.de/sneak/mfer/issues).
|
|
||||||
|
|
||||||
Changes must always be formatted with a standard `go fmt`, syntactically
|
|
||||||
valid, and must pass the linting defined in the repository (presently only
|
|
||||||
the `golangci-lint` defaults), which can be run with a `make lint`. The
|
|
||||||
`main` branch is protected and all changes must be made via [pull
|
|
||||||
requests](https://git.eeqj.de/sneak/mfer/pulls) and pass CI to be merged.
|
|
||||||
Any changes submitted to this project must also be
|
|
||||||
[WTFPL-licensed](https://wtfpl.net) to be considered.
|
|
||||||
|
|
||||||
# Problem Statement
|
# Problem Statement
|
||||||
|
|
||||||
Given a plain URL, there is no standard way to safely and programmatically
|
Given a plain URL, there is no standard way to safely and programmatically
|
||||||
@@ -148,10 +83,6 @@ The manifest file would do several important things:
|
|||||||
- metadata size should not be used as an excuse to sacrifice utility (such
|
- metadata size should not be used as an excuse to sacrifice utility (such
|
||||||
as providing checksums over each chunk of a large file)
|
as providing checksums over each chunk of a large file)
|
||||||
|
|
||||||
# Limitations
|
|
||||||
|
|
||||||
- **Manifest size:** Manifests must fit entirely in system memory during reading and writing.
|
|
||||||
|
|
||||||
# Open Questions
|
# Open Questions
|
||||||
|
|
||||||
- Should the manifest file include checksums of individual file chunks, or just for the whole assembled file?
|
- Should the manifest file include checksums of individual file chunks, or just for the whole assembled file?
|
||||||
@@ -197,7 +128,8 @@ The manifest file would do several important things:
|
|||||||
- a command line option to zero/omit mtime/ctime, as well as manifest
|
- a command line option to zero/omit mtime/ctime, as well as manifest
|
||||||
timestamp, and sort all directory listings so that manifest file
|
timestamp, and sort all directory listings so that manifest file
|
||||||
generation is deterministic/reproducible
|
generation is deterministic/reproducible
|
||||||
- URL format `mfer fetch https://exmaple.com/manifestdirectory/?key=5539AD00DE4C42F3AFE11575052443F4DF2A55C2`
|
- URL format `mfer fetch
|
||||||
|
https://exmaple.com/manifestdirectory/?key=5539AD00DE4C42F3AFE11575052443F4DF2A55C2`
|
||||||
to assert in the URL which PGP signing key should be used in the manifest,
|
to assert in the URL which PGP signing key should be used in the manifest,
|
||||||
so that shared URLs have a cryptographic trust root
|
so that shared URLs have a cryptographic trust root
|
||||||
- a "well-known" key in the manifest that maps well known keys (could reuse
|
- a "well-known" key in the manifest that maps well known keys (could reuse
|
||||||
@@ -239,15 +171,6 @@ regardless of filesystem format.
|
|||||||
Please email [`sneak@sneak.berlin`](mailto:sneak@sneak.berlin) with your
|
Please email [`sneak@sneak.berlin`](mailto:sneak@sneak.berlin) with your
|
||||||
desired username for an account on this Gitea instance.
|
desired username for an account on this Gitea instance.
|
||||||
|
|
||||||
## Links
|
I am currently interested in hiring a contractor skilled with the Go
|
||||||
|
standard library interfaces to specify this tool in full and develop a
|
||||||
- Repo: [https://git.eeqj.de/sneak/mfer](https://git.eeqj.de/sneak/mfer)
|
prototype implementation.
|
||||||
- Issues: [https://git.eeqj.de/sneak/mfer/issues](https://git.eeqj.de/sneak/mfer/issues)
|
|
||||||
|
|
||||||
# Authors
|
|
||||||
|
|
||||||
- [@sneak <sneak@sneak.berlin>](mailto:sneak@sneak.berlin)
|
|
||||||
|
|
||||||
# License
|
|
||||||
|
|
||||||
- [WTFPL](https://wtfpl.net)
|
|
||||||
|
|||||||
122
TODO.md
122
TODO.md
@@ -1,122 +0,0 @@
|
|||||||
# TODO: mfer 1.0
|
|
||||||
|
|
||||||
## Design Questions
|
|
||||||
|
|
||||||
*sneak: please answer inline below each question. These are preserved for posterity.*
|
|
||||||
|
|
||||||
### Format Design
|
|
||||||
|
|
||||||
**1. Should `MFFileChecksum` be simplified?**
|
|
||||||
Currently it's a separate message wrapping a single `bytes multiHash` field. Since multihash already self-describes the algorithm, `repeated bytes hashes` directly on `MFFilePath` would be simpler and reduce per-file protobuf overhead. Is the extra message layer intentional (e.g. planning to add per-hash metadata like `verified_at`)?
|
|
||||||
|
|
||||||
> *answer:*
|
|
||||||
|
|
||||||
**2. Should file permissions/mode be stored?**
|
|
||||||
The format stores mtime/ctime but not Unix file permissions. For archival use (ExFAT, filesystem-independent checksums) this may not matter, but for software distribution or filesystem restoration it's a gap. Should we reserve a field now (e.g. `optional uint32 mode = 305`) even if we don't populate it yet?
|
|
||||||
|
|
||||||
> *answer:*
|
|
||||||
|
|
||||||
**3. Should `atime` be removed from the schema?**
|
|
||||||
Access time is volatile, non-deterministic, and often disabled (`noatime`). Including it means two manifests of the same directory at different times will differ, which conflicts with the determinism goal. Remove it, or document it as "never set by default"?
|
|
||||||
|
|
||||||
> *answer:*
|
|
||||||
|
|
||||||
**4. What are the path normalization rules?**
|
|
||||||
The proto has `string path` with no specification about: always forward-slash? Must be relative? No `..` components allowed? UTF-8 NFC vs NFD normalization (macOS vs Linux)? Max path length? This is a security issue (path traversal) and a cross-platform compatibility issue. What rules should the spec mandate?
|
|
||||||
|
|
||||||
> *answer:*
|
|
||||||
|
|
||||||
**5. Should we add a version byte after the magic?**
|
|
||||||
Currently `ZNAVSRFG` is followed immediately by protobuf. Adding a version byte (`ZNAVSRFG\x01`) would allow future framing changes without requiring protobuf parsing to detect the version. `MFFileOuter.Version` serves this purpose but requires successful deserialization to read. Worth the extra byte?
|
|
||||||
|
|
||||||
> *answer:*
|
|
||||||
|
|
||||||
**6. Should we add a length-prefix after the magic?**
|
|
||||||
Protobuf is not self-delimiting. If we ever want to concatenate manifests or append data after the protobuf, the current framing is insufficient. Add a varint or fixed-width length-prefix?
|
|
||||||
|
|
||||||
> *answer:*
|
|
||||||
|
|
||||||
### Signature Design
|
|
||||||
|
|
||||||
**7. What does the outer SHA-256 hash cover — compressed or uncompressed data?**
|
|
||||||
The review notes it currently hashes compressed data (good for verifying before decompression), but this should be explicitly documented. Which is the intended behavior?
|
|
||||||
|
|
||||||
> *answer:*
|
|
||||||
|
|
||||||
**8. Should `signatureString()` sign raw bytes instead of a hex-encoded string?**
|
|
||||||
Currently the canonical string is `MAGIC-UUID-MULTIHASH` with hex encoding, which adds a transformation layer. Signing the raw `sha256` bytes (or compressed `innerMessage` directly) would be simpler. Keep the string format or switch to raw bytes?
|
|
||||||
|
|
||||||
> *answer:*
|
|
||||||
|
|
||||||
**9. Should we support detached signature files (`.mf.sig`)?**
|
|
||||||
Embedded signatures are better for single-file distribution. Detached `.mf.sig` files follow the familiar `SHASUMS`/`SHASUMS.asc` pattern and are simpler for HTTP serving. Support both modes?
|
|
||||||
|
|
||||||
> *answer:*
|
|
||||||
|
|
||||||
**10. GPG vs pure-Go crypto for signatures?**
|
|
||||||
Shelling out to `gpg` is fragile (may not be installed, version-dependent output). `github.com/ProtonMail/go-crypto` provides pure-Go OpenPGP, or we could go Ed25519/signify (simpler, no key management). Which direction?
|
|
||||||
|
|
||||||
> *answer:*
|
|
||||||
|
|
||||||
### Implementation Design
|
|
||||||
|
|
||||||
**11. Should manifests be deterministic by default?**
|
|
||||||
This means: sort file entries by path, omit `createdAt` timestamp (or make it opt-in), no `atime`. Should determinism be the default, with a `--include-timestamps` flag to opt in?
|
|
||||||
|
|
||||||
> *answer:*
|
|
||||||
|
|
||||||
**12. Should we consolidate or keep both scanner/checker implementations?**
|
|
||||||
There are two parallel implementations: `mfer/scanner.go` + `mfer/checker.go` (typed with `FileSize`, `RelFilePath`) and `internal/scanner/` + `internal/checker/` (raw `int64`, `string`). The `mfer/` versions are superior. Delete the `internal/` versions?
|
|
||||||
|
|
||||||
> *answer:*
|
|
||||||
|
|
||||||
**13. Should the `manifest` type be exported?**
|
|
||||||
Currently unexported with exported constructors (`New`, `NewFromPaths`, etc.). Consumers can't declare `var m *mfer.manifest`. Export the type, or define an interface?
|
|
||||||
|
|
||||||
> *answer:*
|
|
||||||
|
|
||||||
**14. What should the Go module path be for 1.0?**
|
|
||||||
Currently mixed between `sneak.berlin/go/mfer` and `git.eeqj.de/sneak/mfer`. Which is canonical?
|
|
||||||
|
|
||||||
> *answer:*
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Implementation Plan
|
|
||||||
|
|
||||||
### Phase 1: Foundation (format correctness)
|
|
||||||
|
|
||||||
- [ ] Delete `internal/scanner/` and `internal/checker/` — consolidate on `mfer/` package versions; update CLI code
|
|
||||||
- [ ] Add deterministic file ordering — sort entries by path (lexicographic, byte-order) in `Builder.Build()`; add test asserting byte-identical output from two runs
|
|
||||||
- [ ] Add decompression size limit — `io.LimitReader` in `deserializeInner()` with `m.pbOuter.Size` as bound
|
|
||||||
- [ ] Fix `errors.Is` dead code in checker — replace with `os.IsNotExist(err)` or `errors.Is(err, fs.ErrNotExist)`
|
|
||||||
- [ ] Fix `AddFile` to verify size — check `totalRead == size` after reading, return error on mismatch
|
|
||||||
- [ ] Specify path invariants — add proto comments (UTF-8, forward-slash, relative, no `..`, no leading `/`); validate in `Builder.AddFile` and `Builder.AddFileWithHash`
|
|
||||||
|
|
||||||
### Phase 2: CLI polish
|
|
||||||
|
|
||||||
- [ ] Fix flag naming — all CLI flags use kebab-case as primary (`--include-dotfiles`, `--follow-symlinks`)
|
|
||||||
- [ ] Fix URL construction in fetch — use `BaseURL.JoinPath()` or `url.JoinPath()` instead of string concatenation
|
|
||||||
- [ ] Add progress rate-limiting to Checker — throttle to once per second, matching Scanner
|
|
||||||
- [ ] Add `--deterministic` flag (or make it default) — omit `createdAt`, sort files
|
|
||||||
|
|
||||||
### Phase 3: Robustness
|
|
||||||
|
|
||||||
- [ ] Replace GPG subprocess with pure-Go crypto — `github.com/ProtonMail/go-crypto` or Ed25519/signify
|
|
||||||
- [ ] Add timeout to any remaining subprocess calls
|
|
||||||
- [ ] Add fuzzing tests for `NewManifestFromReader`
|
|
||||||
- [ ] Add retry logic to fetch — exponential backoff for transient HTTP errors
|
|
||||||
|
|
||||||
### Phase 4: Format finalization
|
|
||||||
|
|
||||||
- [ ] Remove or deprecate `atime` from proto (pending design question answer)
|
|
||||||
- [ ] Reserve `optional uint32 mode = 305` in `MFFilePath` for future file permissions
|
|
||||||
- [ ] Add version byte after magic — `ZNAVSRFG\x01` for format version 1
|
|
||||||
- [ ] Write format specification document — separate from README: magic, outer structure, compression, inner structure, path invariants, signature scheme, canonical serialization
|
|
||||||
|
|
||||||
### Phase 5: Release prep
|
|
||||||
|
|
||||||
- [ ] Finalize Go module path
|
|
||||||
- [ ] Audit all error messages for consistency and helpfulness
|
|
||||||
- [ ] Add `--version` output matching SemVer
|
|
||||||
- [ ] Tag v1.0.0
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
if [[ ! -z "$DRONE_COMMIT_SHA" ]]; then
|
|
||||||
echo "${DRONE_COMMIT_SHA:0:7}"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ! -z "$GITREV" ]]; then
|
|
||||||
echo $GITREV
|
|
||||||
else
|
|
||||||
git describe --always --dirty=-dirty
|
|
||||||
fi
|
|
||||||
@@ -3,15 +3,14 @@ package main
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"sneak.berlin/go/mfer/internal/cli"
|
"git.eeqj.de/sneak/mfer/internal/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
Appname string = "mfer"
|
Appname string = "mfer"
|
||||||
Version string
|
Version string
|
||||||
Gitrev string
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
os.Exit(cli.Run(Appname, Version, Gitrev))
|
os.Exit(cli.Run(Appname, Version))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBuild(t *testing.T) {
|
|
||||||
assert.True(t, true)
|
|
||||||
}
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# usage.sh - Generate and check a manifest from the repo
|
|
||||||
# Run from repo root: ./contrib/usage.sh
|
|
||||||
|
|
||||||
TMPDIR=$(mktemp -d)
|
|
||||||
MANIFEST="$TMPDIR/index.mf"
|
|
||||||
|
|
||||||
cleanup() {
|
|
||||||
rm -rf "$TMPDIR"
|
|
||||||
}
|
|
||||||
trap cleanup EXIT
|
|
||||||
|
|
||||||
echo "Building mfer..."
|
|
||||||
go build -o "$TMPDIR/mfer" ./cmd/mfer
|
|
||||||
|
|
||||||
"$TMPDIR/mfer" generate -o "$MANIFEST" .
|
|
||||||
"$TMPDIR/mfer" check --base . "$MANIFEST"
|
|
||||||
41
go.mod
41
go.mod
@@ -1,44 +1,11 @@
|
|||||||
module sneak.berlin/go/mfer
|
module git.eeqj.de/sneak/mfer
|
||||||
|
|
||||||
go 1.23
|
go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/apex/log v1.9.0
|
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/dustin/go-humanize v1.0.1
|
|
||||||
github.com/google/uuid v1.1.2
|
|
||||||
github.com/klauspost/compress v1.18.2
|
|
||||||
github.com/multiformats/go-multihash v0.2.3
|
|
||||||
github.com/pterm/pterm v0.12.35
|
github.com/pterm/pterm v0.12.35
|
||||||
github.com/spf13/afero v1.8.0
|
github.com/spf13/afero v1.8.0
|
||||||
github.com/stretchr/testify v1.8.1
|
github.com/urfave/cli/v2 v2.3.0
|
||||||
github.com/urfave/cli/v2 v2.23.6
|
google.golang.org/protobuf v1.27.1
|
||||||
google.golang.org/protobuf v1.28.1
|
|
||||||
)
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/atomicgo/cursor v0.0.1 // indirect
|
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
|
||||||
github.com/fatih/color v1.7.0 // indirect
|
|
||||||
github.com/gookit/color v1.4.2 // indirect
|
|
||||||
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
|
||||||
github.com/mattn/go-colorable v0.1.2 // indirect
|
|
||||||
github.com/mattn/go-isatty v0.0.8 // indirect
|
|
||||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
|
||||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
|
||||||
github.com/multiformats/go-varint v0.0.6 // indirect
|
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
|
||||||
github.com/rivo/uniseg v0.2.0 // indirect
|
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
|
||||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
|
||||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 // indirect
|
|
||||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
|
||||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
|
|
||||||
golang.org/x/sys v0.1.0 // indirect
|
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
|
||||||
golang.org/x/text v0.3.6 // indirect
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
|
||||||
lukechampine.com/blake3 v1.1.6 // indirect
|
|
||||||
)
|
)
|
||||||
|
|||||||
110
go.sum
110
go.sum
@@ -44,15 +44,8 @@ github.com/MarvinJWendt/testza v0.2.8/go.mod h1:nwIcjmr0Zz+Rcwfh3/4UhBp7ePKVhuBE
|
|||||||
github.com/MarvinJWendt/testza v0.2.10/go.mod h1:pd+VWsoGUiFtq+hRKSU1Bktnn+DMCSrDrXDpX2bG66k=
|
github.com/MarvinJWendt/testza v0.2.10/go.mod h1:pd+VWsoGUiFtq+hRKSU1Bktnn+DMCSrDrXDpX2bG66k=
|
||||||
github.com/MarvinJWendt/testza v0.2.12 h1:/PRp/BF+27t2ZxynTiqj0nyND5PbOtfJS0SuTuxmgeg=
|
github.com/MarvinJWendt/testza v0.2.12 h1:/PRp/BF+27t2ZxynTiqj0nyND5PbOtfJS0SuTuxmgeg=
|
||||||
github.com/MarvinJWendt/testza v0.2.12/go.mod h1:JOIegYyV7rX+7VZ9r77L/eH6CfJHHzXjB69adAhzZkI=
|
github.com/MarvinJWendt/testza v0.2.12/go.mod h1:JOIegYyV7rX+7VZ9r77L/eH6CfJHHzXjB69adAhzZkI=
|
||||||
github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0=
|
|
||||||
github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA=
|
|
||||||
github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo=
|
|
||||||
github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE=
|
|
||||||
github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys=
|
|
||||||
github.com/atomicgo/cursor v0.0.1 h1:xdogsqa6YYlLfM+GyClC/Lchf7aiMerFiZQn7soTOoU=
|
github.com/atomicgo/cursor v0.0.1 h1:xdogsqa6YYlLfM+GyClC/Lchf7aiMerFiZQn7soTOoU=
|
||||||
github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
|
github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
|
||||||
github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
|
||||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
|
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||||
@@ -61,26 +54,20 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
|||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
|
||||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
|
||||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
@@ -134,8 +121,6 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
|
|||||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
|
||||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
@@ -144,48 +129,19 @@ github.com/gookit/color v1.4.2 h1:tXy44JFSFkKnELV6WaMo/lLfu/meqITX3iAV52do7lk=
|
|||||||
github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ=
|
github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ=
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
|
||||||
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
|
|
||||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
|
||||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
|
||||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
|
||||||
github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
|
github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
|
||||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
|
||||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
|
||||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
|
||||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
|
||||||
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
|
|
||||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
|
||||||
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
|
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
|
||||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
|
||||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
|
||||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
|
||||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
|
||||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
|
||||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
|
||||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
|
||||||
github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY=
|
|
||||||
github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
@@ -200,43 +156,23 @@ github.com/pterm/pterm v0.12.35 h1:A/vHwDM+WByn0sTPlpL2L6kOTy12xqZuwNFMF/NlA+U=
|
|||||||
github.com/pterm/pterm v0.12.35/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5bUw8T8=
|
github.com/pterm/pterm v0.12.35/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5bUw8T8=
|
||||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||||
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
|
|
||||||
github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs=
|
|
||||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
|
||||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
|
||||||
github.com/spf13/afero v1.8.0 h1:5MmtuhAgYeU6qpa7w7bP0dv6MBYuup0vekhSpSkoq60=
|
github.com/spf13/afero v1.8.0 h1:5MmtuhAgYeU6qpa7w7bP0dv6MBYuup0vekhSpSkoq60=
|
||||||
github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
|
||||||
github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
|
|
||||||
github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk=
|
|
||||||
github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk=
|
|
||||||
github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc=
|
|
||||||
github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0=
|
|
||||||
github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao=
|
|
||||||
github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4=
|
|
||||||
github.com/urfave/cli/v2 v2.23.6 h1:iWmtKD+prGo1nKUtLO0Wg4z9esfBM4rAV4QRLQiEmJ4=
|
|
||||||
github.com/urfave/cli/v2 v2.23.6/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
|
|
||||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 h1:QldyIu/L63oPpyvQmHgvgickp1Yw510KJOqX7H24mg8=
|
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 h1:QldyIu/L63oPpyvQmHgvgickp1Yw510KJOqX7H24mg8=
|
||||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
|
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
|
||||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
|
||||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
|
||||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
@@ -248,15 +184,12 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
|
|
||||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
@@ -292,7 +225,6 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
@@ -342,9 +274,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@@ -378,9 +308,8 @@ golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c h1:taxlMj0D/1sOAuv/CbSD+MMDof2vbyPTqz5FNYKpXt8=
|
||||||
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
@@ -391,9 +320,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
|
||||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
@@ -538,22 +466,16 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
|||||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
@@ -561,8 +483,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
|||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
lukechampine.com/blake3 v1.1.6 h1:H3cROdztr7RCfoaTpGZFQsrqvweFLrqS73j7L7cmR5c=
|
|
||||||
lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
|
|
||||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
|
|||||||
@@ -1,15 +0,0 @@
|
|||||||
package bork
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrMissingMagic = errors.New("missing magic bytes in file")
|
|
||||||
ErrFileTruncated = errors.New("file/stream is truncated abnormally")
|
|
||||||
)
|
|
||||||
|
|
||||||
func Newf(format string, args ...interface{}) error {
|
|
||||||
return fmt.Errorf(format, args...)
|
|
||||||
}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
package bork
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBuild(t *testing.T) {
|
|
||||||
assert.NotNil(t, ErrMissingMagic)
|
|
||||||
}
|
|
||||||
@@ -1,188 +0,0 @@
|
|||||||
package cli
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/dustin/go-humanize"
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"github.com/urfave/cli/v2"
|
|
||||||
"sneak.berlin/go/mfer/internal/log"
|
|
||||||
"sneak.berlin/go/mfer/mfer"
|
|
||||||
)
|
|
||||||
|
|
||||||
// findManifest looks for a manifest file in the given directory.
|
|
||||||
// It checks for index.mf and .index.mf, returning the first one found.
|
|
||||||
func findManifest(fs afero.Fs, dir string) (string, error) {
|
|
||||||
candidates := []string{"index.mf", ".index.mf"}
|
|
||||||
for _, name := range candidates {
|
|
||||||
path := filepath.Join(dir, name)
|
|
||||||
exists, err := afero.Exists(fs, path)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if exists {
|
|
||||||
return path, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("no manifest found in %s (looked for index.mf and .index.mf)", dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mfa *CLIApp) checkManifestOperation(ctx *cli.Context) error {
|
|
||||||
log.Debug("checkManifestOperation()")
|
|
||||||
|
|
||||||
var manifestPath string
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if ctx.Args().Len() > 0 {
|
|
||||||
arg := ctx.Args().Get(0)
|
|
||||||
// Check if arg is a directory or a file
|
|
||||||
info, statErr := mfa.Fs.Stat(arg)
|
|
||||||
if statErr == nil && info.IsDir() {
|
|
||||||
// It's a directory, look for manifest inside
|
|
||||||
manifestPath, err = findManifest(mfa.Fs, arg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Treat as a file path
|
|
||||||
manifestPath = arg
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// No argument, look in current directory
|
|
||||||
manifestPath, err = findManifest(mfa.Fs, ".")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
basePath := ctx.String("base")
|
|
||||||
showProgress := ctx.Bool("progress")
|
|
||||||
|
|
||||||
log.Infof("checking manifest %s with base %s", manifestPath, basePath)
|
|
||||||
|
|
||||||
// Create checker
|
|
||||||
chk, err := mfer.NewChecker(manifestPath, basePath, mfa.Fs)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to load manifest: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check signature requirement
|
|
||||||
requiredSigner := ctx.String("require-signature")
|
|
||||||
if requiredSigner != "" {
|
|
||||||
// Validate fingerprint format: must be exactly 40 hex characters
|
|
||||||
if len(requiredSigner) != 40 {
|
|
||||||
return fmt.Errorf("invalid fingerprint: must be exactly 40 hex characters, got %d", len(requiredSigner))
|
|
||||||
}
|
|
||||||
if _, err := hex.DecodeString(requiredSigner); err != nil {
|
|
||||||
return fmt.Errorf("invalid fingerprint: must be valid hex: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !chk.IsSigned() {
|
|
||||||
return fmt.Errorf("manifest is not signed, but signature from %s is required", requiredSigner)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract fingerprint from the embedded public key (not from the signer field)
|
|
||||||
// This validates the key is importable and gets its actual fingerprint
|
|
||||||
embeddedFP, err := chk.ExtractEmbeddedSigningKeyFP()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to extract fingerprint from embedded signing key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare fingerprints - must be exact match (case-insensitive)
|
|
||||||
if !strings.EqualFold(embeddedFP, requiredSigner) {
|
|
||||||
return fmt.Errorf("embedded signing key fingerprint %s does not match required %s", embeddedFP, requiredSigner)
|
|
||||||
}
|
|
||||||
log.Infof("manifest signature verified (signer: %s)", embeddedFP)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("manifest contains %d files, %s", chk.FileCount(), humanize.IBytes(uint64(chk.TotalBytes())))
|
|
||||||
|
|
||||||
// Set up results channel
|
|
||||||
results := make(chan mfer.Result, 1)
|
|
||||||
|
|
||||||
// Set up progress channel
|
|
||||||
var progress chan mfer.CheckStatus
|
|
||||||
if showProgress {
|
|
||||||
progress = make(chan mfer.CheckStatus, 1)
|
|
||||||
go func() {
|
|
||||||
for status := range progress {
|
|
||||||
if status.ETA > 0 {
|
|
||||||
log.Progressf("Checking: %d/%d files, %s/s, ETA %s, %d failures",
|
|
||||||
status.CheckedFiles,
|
|
||||||
status.TotalFiles,
|
|
||||||
humanize.IBytes(uint64(status.BytesPerSec)),
|
|
||||||
status.ETA.Round(time.Second),
|
|
||||||
status.Failures)
|
|
||||||
} else {
|
|
||||||
log.Progressf("Checking: %d/%d files, %s/s, %d failures",
|
|
||||||
status.CheckedFiles,
|
|
||||||
status.TotalFiles,
|
|
||||||
humanize.IBytes(uint64(status.BytesPerSec)),
|
|
||||||
status.Failures)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.ProgressDone()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process results in a goroutine
|
|
||||||
var failures int64
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
for result := range results {
|
|
||||||
if result.Status != mfer.StatusOK {
|
|
||||||
failures++
|
|
||||||
log.Infof("%s: %s (%s)", result.Status, result.Path, result.Message)
|
|
||||||
} else {
|
|
||||||
log.Verbosef("%s: %s", result.Status, result.Path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
close(done)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Run check
|
|
||||||
err = chk.Check(ctx.Context, results, progress)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("check failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for results processing to complete
|
|
||||||
<-done
|
|
||||||
|
|
||||||
// Check for extra files if requested
|
|
||||||
if ctx.Bool("no-extra-files") {
|
|
||||||
extraResults := make(chan mfer.Result, 1)
|
|
||||||
extraDone := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
for result := range extraResults {
|
|
||||||
failures++
|
|
||||||
log.Infof("%s: %s (%s)", result.Status, result.Path, result.Message)
|
|
||||||
}
|
|
||||||
close(extraDone)
|
|
||||||
}()
|
|
||||||
|
|
||||||
err = chk.FindExtraFiles(ctx.Context, extraResults)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check for extra files: %w", err)
|
|
||||||
}
|
|
||||||
<-extraDone
|
|
||||||
}
|
|
||||||
|
|
||||||
elapsed := time.Since(mfa.startupTime).Seconds()
|
|
||||||
rate := float64(chk.TotalBytes()) / elapsed
|
|
||||||
if failures == 0 {
|
|
||||||
log.Infof("checked %d files (%s) in %.1fs (%s/s): all OK", chk.FileCount(), humanize.IBytes(uint64(chk.TotalBytes())), elapsed, humanize.IBytes(uint64(rate)))
|
|
||||||
} else {
|
|
||||||
log.Infof("checked %d files (%s) in %.1fs (%s/s): %d failed", chk.FileCount(), humanize.IBytes(uint64(chk.TotalBytes())), elapsed, humanize.IBytes(uint64(rate)), failures)
|
|
||||||
}
|
|
||||||
|
|
||||||
if failures > 0 {
|
|
||||||
mfa.exitCode = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
24
internal/cli/cli.go
Normal file
24
internal/cli/cli.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
package cli
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
var NO_COLOR bool
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
NO_COLOR = false
|
||||||
|
if _, exists := os.LookupEnv("NO_COLOR"); exists {
|
||||||
|
NO_COLOR = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Run(Appname, Version string) int {
|
||||||
|
m := &CLIApp{}
|
||||||
|
m.appname = Appname
|
||||||
|
m.version = Version
|
||||||
|
m.exitCode = 0
|
||||||
|
|
||||||
|
m.run()
|
||||||
|
return m.exitCode
|
||||||
|
}
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
package cli
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NO_COLOR disables colored output when set. Automatically true if the
|
|
||||||
// NO_COLOR environment variable is present (per https://no-color.org/).
|
|
||||||
var NO_COLOR bool
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
NO_COLOR = false
|
|
||||||
if _, exists := os.LookupEnv("NO_COLOR"); exists {
|
|
||||||
NO_COLOR = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunOptions contains all configuration for running the CLI application.
|
|
||||||
// Use DefaultRunOptions for standard CLI execution, or construct manually for testing.
|
|
||||||
type RunOptions struct {
|
|
||||||
Appname string // Application name displayed in help and version output
|
|
||||||
Version string // Version string (typically set at build time)
|
|
||||||
Gitrev string // Git revision hash (typically set at build time)
|
|
||||||
Args []string // Command-line arguments (typically os.Args)
|
|
||||||
Stdin io.Reader // Standard input stream
|
|
||||||
Stdout io.Writer // Standard output stream
|
|
||||||
Stderr io.Writer // Standard error stream
|
|
||||||
Fs afero.Fs // Filesystem abstraction for file operations
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultRunOptions returns RunOptions configured for normal CLI execution.
|
|
||||||
func DefaultRunOptions(appname, version, gitrev string) *RunOptions {
|
|
||||||
return &RunOptions{
|
|
||||||
Appname: appname,
|
|
||||||
Version: version,
|
|
||||||
Gitrev: gitrev,
|
|
||||||
Args: os.Args,
|
|
||||||
Stdin: os.Stdin,
|
|
||||||
Stdout: os.Stdout,
|
|
||||||
Stderr: os.Stderr,
|
|
||||||
Fs: afero.NewOsFs(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run creates and runs the CLI application with default options.
|
|
||||||
func Run(appname, version, gitrev string) int {
|
|
||||||
return RunWithOptions(DefaultRunOptions(appname, version, gitrev))
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunWithOptions creates and runs the CLI application with the given options.
|
|
||||||
func RunWithOptions(opts *RunOptions) int {
|
|
||||||
m := &CLIApp{
|
|
||||||
appname: opts.Appname,
|
|
||||||
version: opts.Version,
|
|
||||||
gitrev: opts.Gitrev,
|
|
||||||
exitCode: 0,
|
|
||||||
Stdin: opts.Stdin,
|
|
||||||
Stdout: opts.Stdout,
|
|
||||||
Stderr: opts.Stderr,
|
|
||||||
Fs: opts.Fs,
|
|
||||||
}
|
|
||||||
|
|
||||||
m.run(opts.Args)
|
|
||||||
return m.exitCode
|
|
||||||
}
|
|
||||||
@@ -1,593 +0,0 @@
|
|||||||
package cli
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
urfcli "github.com/urfave/cli/v2"
|
|
||||||
"sneak.berlin/go/mfer/mfer"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Prevent urfave/cli from calling os.Exit during tests
|
|
||||||
urfcli.OsExiter = func(code int) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuild(t *testing.T) {
|
|
||||||
m := &CLIApp{}
|
|
||||||
assert.NotNil(t, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testOpts(args []string, fs afero.Fs) *RunOptions {
|
|
||||||
return &RunOptions{
|
|
||||||
Appname: "mfer",
|
|
||||||
Version: "1.0.0",
|
|
||||||
Gitrev: "abc123",
|
|
||||||
Args: args,
|
|
||||||
Stdin: &bytes.Buffer{},
|
|
||||||
Stdout: &bytes.Buffer{},
|
|
||||||
Stderr: &bytes.Buffer{},
|
|
||||||
Fs: fs,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVersionCommand(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
opts := testOpts([]string{"mfer", "version"}, fs)
|
|
||||||
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
|
|
||||||
assert.Equal(t, 0, exitCode)
|
|
||||||
stdout := opts.Stdout.(*bytes.Buffer).String()
|
|
||||||
assert.Contains(t, stdout, mfer.Version)
|
|
||||||
assert.Contains(t, stdout, "abc123")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHelpCommand(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
opts := testOpts([]string{"mfer", "--help"}, fs)
|
|
||||||
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
|
|
||||||
assert.Equal(t, 0, exitCode)
|
|
||||||
stdout := opts.Stdout.(*bytes.Buffer).String()
|
|
||||||
assert.Contains(t, stdout, "generate")
|
|
||||||
assert.Contains(t, stdout, "check")
|
|
||||||
assert.Contains(t, stdout, "fetch")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGenerateCommand(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test files in memory filesystem
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello world"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file2.txt", []byte("test content"), 0o644))
|
|
||||||
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/testdir/test.mf", "/testdir"}, fs)
|
|
||||||
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
|
|
||||||
assert.Equal(t, 0, exitCode, "stderr: %s", opts.Stderr.(*bytes.Buffer).String())
|
|
||||||
|
|
||||||
// Verify manifest was created
|
|
||||||
exists, err := afero.Exists(fs, "/testdir/test.mf")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, exists)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGenerateAndCheckCommand(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test files with subdirectory
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir/subdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello world"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/subdir/file2.txt", []byte("test content"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/testdir/test.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode, "generate failed: %s", opts.Stderr.(*bytes.Buffer).String())
|
|
||||||
|
|
||||||
// Check manifest
|
|
||||||
opts = testOpts([]string{"mfer", "check", "-q", "--base", "/testdir", "/testdir/test.mf"}, fs)
|
|
||||||
exitCode = RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 0, exitCode, "check failed: %s", opts.Stderr.(*bytes.Buffer).String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckCommandWithMissingFile(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test file
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello world"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/testdir/test.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode, "generate failed: %s", opts.Stderr.(*bytes.Buffer).String())
|
|
||||||
|
|
||||||
// Delete the file
|
|
||||||
require.NoError(t, fs.Remove("/testdir/file1.txt"))
|
|
||||||
|
|
||||||
// Check manifest - should fail
|
|
||||||
opts = testOpts([]string{"mfer", "check", "-q", "--base", "/testdir", "/testdir/test.mf"}, fs)
|
|
||||||
exitCode = RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 1, exitCode, "check should have failed for missing file")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckCommandWithCorruptedFile(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test file
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello world"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/testdir/test.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode, "generate failed: %s", opts.Stderr.(*bytes.Buffer).String())
|
|
||||||
|
|
||||||
// Corrupt the file (change content but keep same size)
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("HELLO WORLD"), 0o644))
|
|
||||||
|
|
||||||
// Check manifest - should fail with hash mismatch
|
|
||||||
opts = testOpts([]string{"mfer", "check", "-q", "--base", "/testdir", "/testdir/test.mf"}, fs)
|
|
||||||
exitCode = RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 1, exitCode, "check should have failed for corrupted file")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckCommandWithSizeMismatch(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test file
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello world"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/testdir/test.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode, "generate failed: %s", opts.Stderr.(*bytes.Buffer).String())
|
|
||||||
|
|
||||||
// Change file size
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("different size content here"), 0o644))
|
|
||||||
|
|
||||||
// Check manifest - should fail with size mismatch
|
|
||||||
opts = testOpts([]string{"mfer", "check", "-q", "--base", "/testdir", "/testdir/test.mf"}, fs)
|
|
||||||
exitCode = RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 1, exitCode, "check should have failed for size mismatch")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBannerOutput(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test file
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello"), 0o644))
|
|
||||||
|
|
||||||
// Run without -q to see banner
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-o", "/testdir/test.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 0, exitCode)
|
|
||||||
|
|
||||||
// Banner ASCII art should be in stdout
|
|
||||||
stdout := opts.Stdout.(*bytes.Buffer).String()
|
|
||||||
assert.Contains(t, stdout, "___")
|
|
||||||
assert.Contains(t, stdout, "\\")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUnknownCommand(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
opts := testOpts([]string{"mfer", "unknown"}, fs)
|
|
||||||
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 1, exitCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGenerateExcludesDotfilesByDefault(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test files including dotfiles
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/.hidden", []byte("secret"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest without --include-dotfiles (default excludes dotfiles)
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/testdir/test.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode)
|
|
||||||
|
|
||||||
// Check that manifest exists
|
|
||||||
exists, _ := afero.Exists(fs, "/testdir/test.mf")
|
|
||||||
assert.True(t, exists)
|
|
||||||
|
|
||||||
// Verify manifest only has 1 file (the non-dotfile)
|
|
||||||
manifest, err := mfer.NewManifestFromFile(fs, "/testdir/test.mf")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, manifest.Files(), 1)
|
|
||||||
assert.Equal(t, "file1.txt", manifest.Files()[0].Path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGenerateWithIncludeDotfiles(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test files including dotfiles
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/.hidden", []byte("secret"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest with --include-dotfiles
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "--include-dotfiles", "-o", "/testdir/test.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode)
|
|
||||||
|
|
||||||
// Verify manifest has 2 files (including dotfile)
|
|
||||||
manifest, err := mfer.NewManifestFromFile(fs, "/testdir/test.mf")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, manifest.Files(), 2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMultipleInputPaths(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test files in multiple directories
|
|
||||||
require.NoError(t, fs.MkdirAll("/dir1", 0o755))
|
|
||||||
require.NoError(t, fs.MkdirAll("/dir2", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/dir1/file1.txt", []byte("content1"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/dir2/file2.txt", []byte("content2"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest from multiple paths
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/output.mf", "/dir1", "/dir2"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 0, exitCode, "stderr: %s", opts.Stderr.(*bytes.Buffer).String())
|
|
||||||
|
|
||||||
exists, _ := afero.Exists(fs, "/output.mf")
|
|
||||||
assert.True(t, exists)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNoExtraFilesPass(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test files
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file2.txt", []byte("world"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/manifest.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode)
|
|
||||||
|
|
||||||
// Check with --no-extra-files (should pass - no extra files)
|
|
||||||
opts = testOpts([]string{"mfer", "check", "-q", "--no-extra-files", "--base", "/testdir", "/manifest.mf"}, fs)
|
|
||||||
exitCode = RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 0, exitCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNoExtraFilesFail(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test files
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/manifest.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode)
|
|
||||||
|
|
||||||
// Add an extra file after manifest generation
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/extra.txt", []byte("extra"), 0o644))
|
|
||||||
|
|
||||||
// Check with --no-extra-files (should fail - extra file exists)
|
|
||||||
opts = testOpts([]string{"mfer", "check", "-q", "--no-extra-files", "--base", "/testdir", "/manifest.mf"}, fs)
|
|
||||||
exitCode = RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 1, exitCode, "check should fail when extra files exist")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNoExtraFilesWithSubdirectory(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test files with subdirectory
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir/subdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/subdir/file2.txt", []byte("world"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/manifest.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode)
|
|
||||||
|
|
||||||
// Add extra file in subdirectory
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/subdir/extra.txt", []byte("extra"), 0o644))
|
|
||||||
|
|
||||||
// Check with --no-extra-files (should fail)
|
|
||||||
opts = testOpts([]string{"mfer", "check", "-q", "--no-extra-files", "--base", "/testdir", "/manifest.mf"}, fs)
|
|
||||||
exitCode = RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 1, exitCode, "check should fail when extra files exist in subdirectory")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckWithoutNoExtraFilesIgnoresExtra(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test file
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/manifest.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode)
|
|
||||||
|
|
||||||
// Add extra file
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/extra.txt", []byte("extra"), 0o644))
|
|
||||||
|
|
||||||
// Check WITHOUT --no-extra-files (should pass - extra files ignored)
|
|
||||||
opts = testOpts([]string{"mfer", "check", "-q", "--base", "/testdir", "/manifest.mf"}, fs)
|
|
||||||
exitCode = RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 0, exitCode, "check without --no-extra-files should ignore extra files")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGenerateAtomicWriteNoTempFileOnSuccess(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test file
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/output.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode)
|
|
||||||
|
|
||||||
// Verify output file exists
|
|
||||||
exists, err := afero.Exists(fs, "/output.mf")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, exists, "output file should exist")
|
|
||||||
|
|
||||||
// Verify temp file does NOT exist
|
|
||||||
tmpExists, err := afero.Exists(fs, "/output.mf.tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.False(t, tmpExists, "temp file should not exist after successful generation")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGenerateAtomicWriteOverwriteWithForce(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test file
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello"), 0o644))
|
|
||||||
|
|
||||||
// Create existing manifest with different content
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/output.mf", []byte("old content"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest with --force
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-f", "-o", "/output.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode)
|
|
||||||
|
|
||||||
// Verify output file exists and was overwritten
|
|
||||||
content, err := afero.ReadFile(fs, "/output.mf")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotEqual(t, "old content", string(content), "manifest should be overwritten")
|
|
||||||
|
|
||||||
// Verify temp file does NOT exist
|
|
||||||
tmpExists, err := afero.Exists(fs, "/output.mf.tmp")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.False(t, tmpExists, "temp file should not exist after successful generation")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGenerateFailsWithoutForceWhenOutputExists(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test file
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello"), 0o644))
|
|
||||||
|
|
||||||
// Create existing manifest
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/output.mf", []byte("existing"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest WITHOUT --force (should fail)
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/output.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 1, exitCode, "should fail when output exists without --force")
|
|
||||||
|
|
||||||
// Verify original content is preserved
|
|
||||||
content, err := afero.ReadFile(fs, "/output.mf")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "existing", string(content), "original file should be preserved")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGenerateAtomicWriteUsesTemp(t *testing.T) {
|
|
||||||
// This test verifies that generate uses a temp file by checking
|
|
||||||
// that the output file doesn't exist until generation completes.
|
|
||||||
// We do this by generating to a path and verifying the temp file
|
|
||||||
// pattern is used (output.mf.tmp -> output.mf)
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test file
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("hello"), 0o644))
|
|
||||||
|
|
||||||
// Generate manifest
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/output.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode)
|
|
||||||
|
|
||||||
// Both output file should exist and temp should not
|
|
||||||
exists, _ := afero.Exists(fs, "/output.mf")
|
|
||||||
assert.True(t, exists, "output file should exist")
|
|
||||||
|
|
||||||
tmpExists, _ := afero.Exists(fs, "/output.mf.tmp")
|
|
||||||
assert.False(t, tmpExists, "temp file should be cleaned up")
|
|
||||||
|
|
||||||
// Verify manifest is valid (not empty)
|
|
||||||
content, err := afero.ReadFile(fs, "/output.mf")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, len(content) > 0, "manifest should not be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
// failingWriterFs wraps a filesystem and makes writes fail after N bytes
|
|
||||||
type failingWriterFs struct {
|
|
||||||
afero.Fs
|
|
||||||
failAfter int64
|
|
||||||
written int64
|
|
||||||
}
|
|
||||||
|
|
||||||
type failingFile struct {
|
|
||||||
afero.File
|
|
||||||
fs *failingWriterFs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *failingFile) Write(p []byte) (int, error) {
|
|
||||||
f.fs.written += int64(len(p))
|
|
||||||
if f.fs.written > f.fs.failAfter {
|
|
||||||
return 0, fmt.Errorf("simulated write failure")
|
|
||||||
}
|
|
||||||
return f.File.Write(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *failingWriterFs) Create(name string) (afero.File, error) {
|
|
||||||
f, err := fs.Fs.Create(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &failingFile{File: f, fs: fs}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGenerateAtomicWriteCleansUpOnError(t *testing.T) {
|
|
||||||
baseFs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create test files - need enough content to trigger the write failure
|
|
||||||
require.NoError(t, baseFs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(baseFs, "/testdir/file1.txt", []byte("hello world this is a test file"), 0o644))
|
|
||||||
|
|
||||||
// Wrap with failing writer that fails after writing some bytes
|
|
||||||
fs := &failingWriterFs{Fs: baseFs, failAfter: 10}
|
|
||||||
|
|
||||||
// Generate manifest - should fail during write
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/output.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 1, exitCode, "should fail due to write error")
|
|
||||||
|
|
||||||
// With atomic writes: output.mf should NOT exist (temp was cleaned up)
|
|
||||||
// With non-atomic writes: output.mf WOULD exist (partial/empty)
|
|
||||||
exists, _ := afero.Exists(baseFs, "/output.mf")
|
|
||||||
assert.False(t, exists, "output file should not exist after failed generation (atomic write)")
|
|
||||||
|
|
||||||
// Temp file should also not exist
|
|
||||||
tmpExists, _ := afero.Exists(baseFs, "/output.mf.tmp")
|
|
||||||
assert.False(t, tmpExists, "temp file should be cleaned up after failed generation")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGenerateValidatesInputPaths(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
// Create one valid directory
|
|
||||||
require.NoError(t, fs.MkdirAll("/validdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/validdir/file.txt", []byte("content"), 0o644))
|
|
||||||
|
|
||||||
t.Run("nonexistent path fails fast", func(t *testing.T) {
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/output.mf", "/nonexistent"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 1, exitCode)
|
|
||||||
stderr := opts.Stderr.(*bytes.Buffer).String()
|
|
||||||
assert.Contains(t, stderr, "path does not exist")
|
|
||||||
assert.Contains(t, stderr, "/nonexistent")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("mix of valid and invalid paths fails fast", func(t *testing.T) {
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/output.mf", "/validdir", "/alsononexistent"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 1, exitCode)
|
|
||||||
stderr := opts.Stderr.(*bytes.Buffer).String()
|
|
||||||
assert.Contains(t, stderr, "path does not exist")
|
|
||||||
assert.Contains(t, stderr, "/alsononexistent")
|
|
||||||
|
|
||||||
// Output file should not have been created
|
|
||||||
exists, _ := afero.Exists(fs, "/output.mf")
|
|
||||||
assert.False(t, exists, "output file should not exist when path validation fails")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("valid paths succeed", func(t *testing.T) {
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/output.mf", "/validdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 0, exitCode)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckDetectsManifestCorruption(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
rng := rand.New(rand.NewSource(42))
|
|
||||||
|
|
||||||
// Create many small files with random names to generate a ~1MB manifest
|
|
||||||
// Each manifest entry is roughly 50-60 bytes, so we need ~20000 files
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
|
|
||||||
numFiles := 20000
|
|
||||||
for i := 0; i < numFiles; i++ {
|
|
||||||
// Generate random filename
|
|
||||||
filename := fmt.Sprintf("/testdir/%08x%08x%08x.dat", rng.Uint32(), rng.Uint32(), rng.Uint32())
|
|
||||||
// Small random content
|
|
||||||
content := make([]byte, 16+rng.Intn(48))
|
|
||||||
rng.Read(content)
|
|
||||||
require.NoError(t, afero.WriteFile(fs, filename, content, 0o644))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate manifest outside of testdir
|
|
||||||
opts := testOpts([]string{"mfer", "generate", "-q", "-o", "/manifest.mf", "/testdir"}, fs)
|
|
||||||
exitCode := RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode, "generate should succeed")
|
|
||||||
|
|
||||||
// Read the valid manifest and verify it's approximately 1MB
|
|
||||||
validManifest, err := afero.ReadFile(fs, "/manifest.mf")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.True(t, len(validManifest) >= 1024*1024, "manifest should be at least 1MB, got %d bytes", len(validManifest))
|
|
||||||
t.Logf("manifest size: %d bytes (%d files)", len(validManifest), numFiles)
|
|
||||||
|
|
||||||
// First corruption: truncate the manifest
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/manifest.mf", validManifest[:len(validManifest)/2], 0o644))
|
|
||||||
|
|
||||||
// Check should fail with truncated manifest
|
|
||||||
opts = testOpts([]string{"mfer", "check", "-q", "--base", "/testdir", "/manifest.mf"}, fs)
|
|
||||||
exitCode = RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 1, exitCode, "check should fail with truncated manifest")
|
|
||||||
|
|
||||||
// Verify check passes with valid manifest
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/manifest.mf", validManifest, 0o644))
|
|
||||||
opts = testOpts([]string{"mfer", "check", "-q", "--base", "/testdir", "/manifest.mf"}, fs)
|
|
||||||
exitCode = RunWithOptions(opts)
|
|
||||||
require.Equal(t, 0, exitCode, "check should pass with valid manifest")
|
|
||||||
|
|
||||||
// Now do 500 random corruption iterations
|
|
||||||
for i := 0; i < 500; i++ {
|
|
||||||
// Corrupt: write a random byte at a random offset
|
|
||||||
corrupted := make([]byte, len(validManifest))
|
|
||||||
copy(corrupted, validManifest)
|
|
||||||
|
|
||||||
offset := rng.Intn(len(corrupted))
|
|
||||||
originalByte := corrupted[offset]
|
|
||||||
// Make sure we actually change the byte
|
|
||||||
newByte := byte(rng.Intn(256))
|
|
||||||
for newByte == originalByte {
|
|
||||||
newByte = byte(rng.Intn(256))
|
|
||||||
}
|
|
||||||
corrupted[offset] = newByte
|
|
||||||
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/manifest.mf", corrupted, 0o644))
|
|
||||||
|
|
||||||
// Check should fail with corrupted manifest
|
|
||||||
opts = testOpts([]string{"mfer", "check", "-q", "--base", "/testdir", "/manifest.mf"}, fs)
|
|
||||||
exitCode = RunWithOptions(opts)
|
|
||||||
assert.Equal(t, 1, exitCode, "iteration %d: check should fail with corrupted manifest (offset %d, 0x%02x -> 0x%02x)",
|
|
||||||
i, offset, originalByte, newByte)
|
|
||||||
|
|
||||||
// Restore valid manifest for next iteration
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/manifest.mf", validManifest, 0o644))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,374 +0,0 @@
|
|||||||
package cli
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/sha256"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/dustin/go-humanize"
|
|
||||||
"github.com/multiformats/go-multihash"
|
|
||||||
"github.com/urfave/cli/v2"
|
|
||||||
"sneak.berlin/go/mfer/internal/log"
|
|
||||||
"sneak.berlin/go/mfer/mfer"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DownloadProgress reports the progress of a single file download.
|
|
||||||
type DownloadProgress struct {
|
|
||||||
Path string // File path being downloaded
|
|
||||||
BytesRead int64 // Bytes downloaded so far
|
|
||||||
TotalBytes int64 // Total expected bytes (-1 if unknown)
|
|
||||||
BytesPerSec float64 // Current download rate
|
|
||||||
ETA time.Duration // Estimated time to completion
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mfa *CLIApp) fetchManifestOperation(ctx *cli.Context) error {
|
|
||||||
log.Debug("fetchManifestOperation()")
|
|
||||||
|
|
||||||
if ctx.Args().Len() == 0 {
|
|
||||||
return fmt.Errorf("URL argument required")
|
|
||||||
}
|
|
||||||
|
|
||||||
inputURL := ctx.Args().Get(0)
|
|
||||||
manifestURL, err := resolveManifestURL(inputURL)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid URL: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("fetching manifest from %s", manifestURL)
|
|
||||||
|
|
||||||
// Fetch manifest
|
|
||||||
resp, err := http.Get(manifestURL)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to fetch manifest: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return fmt.Errorf("failed to fetch manifest: HTTP %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse manifest
|
|
||||||
manifest, err := mfer.NewManifestFromReader(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to parse manifest: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
files := manifest.Files()
|
|
||||||
log.Infof("manifest contains %d files", len(files))
|
|
||||||
|
|
||||||
// Compute base URL (directory containing manifest)
|
|
||||||
baseURL, err := url.Parse(manifestURL)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
baseURL.Path = path.Dir(baseURL.Path)
|
|
||||||
if !strings.HasSuffix(baseURL.Path, "/") {
|
|
||||||
baseURL.Path += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate total bytes to download
|
|
||||||
var totalBytes int64
|
|
||||||
for _, f := range files {
|
|
||||||
totalBytes += f.Size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create progress channel
|
|
||||||
progress := make(chan DownloadProgress, 10)
|
|
||||||
|
|
||||||
// Start progress reporter goroutine
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for p := range progress {
|
|
||||||
rate := formatBitrate(p.BytesPerSec * 8)
|
|
||||||
if p.ETA > 0 {
|
|
||||||
log.Infof("%s: %s/%s, %s, ETA %s",
|
|
||||||
p.Path, humanize.IBytes(uint64(p.BytesRead)), humanize.IBytes(uint64(p.TotalBytes)),
|
|
||||||
rate, p.ETA.Round(time.Second))
|
|
||||||
} else {
|
|
||||||
log.Infof("%s: %s/%s, %s",
|
|
||||||
p.Path, humanize.IBytes(uint64(p.BytesRead)), humanize.IBytes(uint64(p.TotalBytes)), rate)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Track download start time
|
|
||||||
startTime := time.Now()
|
|
||||||
|
|
||||||
// Download each file
|
|
||||||
for _, f := range files {
|
|
||||||
// Sanitize the path to prevent path traversal attacks
|
|
||||||
localPath, err := sanitizePath(f.Path)
|
|
||||||
if err != nil {
|
|
||||||
close(progress)
|
|
||||||
<-done
|
|
||||||
return fmt.Errorf("invalid path in manifest: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fileURL := baseURL.String() + encodeFilePath(f.Path)
|
|
||||||
log.Infof("fetching %s", f.Path)
|
|
||||||
|
|
||||||
if err := downloadFile(fileURL, localPath, f, progress); err != nil {
|
|
||||||
close(progress)
|
|
||||||
<-done
|
|
||||||
return fmt.Errorf("failed to download %s: %w", f.Path, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
close(progress)
|
|
||||||
<-done
|
|
||||||
|
|
||||||
// Print summary
|
|
||||||
elapsed := time.Since(startTime)
|
|
||||||
avgBytesPerSec := float64(totalBytes) / elapsed.Seconds()
|
|
||||||
avgRate := formatBitrate(avgBytesPerSec * 8)
|
|
||||||
log.Infof("downloaded %d files (%s) in %.1fs (%s avg)",
|
|
||||||
len(files),
|
|
||||||
humanize.IBytes(uint64(totalBytes)),
|
|
||||||
elapsed.Seconds(),
|
|
||||||
avgRate)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// encodeFilePath URL-encodes each segment of a file path while preserving slashes.
|
|
||||||
func encodeFilePath(p string) string {
|
|
||||||
segments := strings.Split(p, "/")
|
|
||||||
for i, seg := range segments {
|
|
||||||
segments[i] = url.PathEscape(seg)
|
|
||||||
}
|
|
||||||
return strings.Join(segments, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
// sanitizePath validates and sanitizes a file path from the manifest.
|
|
||||||
// It prevents path traversal attacks and rejects unsafe paths.
|
|
||||||
func sanitizePath(p string) (string, error) {
|
|
||||||
// Reject empty paths
|
|
||||||
if p == "" {
|
|
||||||
return "", fmt.Errorf("empty path")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reject absolute paths
|
|
||||||
if filepath.IsAbs(p) {
|
|
||||||
return "", fmt.Errorf("absolute path not allowed: %s", p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean the path to resolve . and ..
|
|
||||||
cleaned := filepath.Clean(p)
|
|
||||||
|
|
||||||
// Reject paths that escape the current directory
|
|
||||||
if strings.HasPrefix(cleaned, ".."+string(filepath.Separator)) || cleaned == ".." {
|
|
||||||
return "", fmt.Errorf("path traversal not allowed: %s", p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Also check for absolute paths after cleaning (handles edge cases)
|
|
||||||
if filepath.IsAbs(cleaned) {
|
|
||||||
return "", fmt.Errorf("absolute path not allowed: %s", p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cleaned, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// resolveManifestURL takes a URL and returns the manifest URL.
|
|
||||||
// If the URL already ends with .mf, it's returned as-is.
|
|
||||||
// Otherwise, index.mf is appended.
|
|
||||||
func resolveManifestURL(inputURL string) (string, error) {
|
|
||||||
parsed, err := url.Parse(inputURL)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if URL already ends with .mf
|
|
||||||
if strings.HasSuffix(parsed.Path, ".mf") {
|
|
||||||
return inputURL, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure path ends with /
|
|
||||||
if !strings.HasSuffix(parsed.Path, "/") {
|
|
||||||
parsed.Path += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append index.mf
|
|
||||||
parsed.Path += "index.mf"
|
|
||||||
|
|
||||||
return parsed.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// progressWriter wraps an io.Writer and reports progress to a channel.
|
|
||||||
type progressWriter struct {
|
|
||||||
w io.Writer
|
|
||||||
path string
|
|
||||||
total int64
|
|
||||||
written int64
|
|
||||||
startTime time.Time
|
|
||||||
progress chan<- DownloadProgress
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pw *progressWriter) Write(p []byte) (int, error) {
|
|
||||||
n, err := pw.w.Write(p)
|
|
||||||
pw.written += int64(n)
|
|
||||||
if pw.progress != nil {
|
|
||||||
var bytesPerSec float64
|
|
||||||
var eta time.Duration
|
|
||||||
elapsed := time.Since(pw.startTime)
|
|
||||||
if elapsed > 0 && pw.written > 0 {
|
|
||||||
bytesPerSec = float64(pw.written) / elapsed.Seconds()
|
|
||||||
if bytesPerSec > 0 && pw.total > 0 {
|
|
||||||
remainingBytes := pw.total - pw.written
|
|
||||||
eta = time.Duration(float64(remainingBytes)/bytesPerSec) * time.Second
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sendProgress(pw.progress, DownloadProgress{
|
|
||||||
Path: pw.path,
|
|
||||||
BytesRead: pw.written,
|
|
||||||
TotalBytes: pw.total,
|
|
||||||
BytesPerSec: bytesPerSec,
|
|
||||||
ETA: eta,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatBitrate formats a bits-per-second value with appropriate unit prefix.
|
|
||||||
func formatBitrate(bps float64) string {
|
|
||||||
switch {
|
|
||||||
case bps >= 1e9:
|
|
||||||
return fmt.Sprintf("%.1f Gbps", bps/1e9)
|
|
||||||
case bps >= 1e6:
|
|
||||||
return fmt.Sprintf("%.1f Mbps", bps/1e6)
|
|
||||||
case bps >= 1e3:
|
|
||||||
return fmt.Sprintf("%.1f Kbps", bps/1e3)
|
|
||||||
default:
|
|
||||||
return fmt.Sprintf("%.0f bps", bps)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendProgress sends a progress update without blocking.
|
|
||||||
func sendProgress(ch chan<- DownloadProgress, p DownloadProgress) {
|
|
||||||
select {
|
|
||||||
case ch <- p:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// downloadFile downloads a URL to a local file path with hash verification.
|
|
||||||
// It downloads to a temporary file, verifies the hash, then renames to the final path.
|
|
||||||
// Progress is reported via the progress channel.
|
|
||||||
func downloadFile(fileURL, localPath string, entry *mfer.MFFilePath, progress chan<- DownloadProgress) error {
|
|
||||||
// Create parent directories if needed
|
|
||||||
dir := filepath.Dir(localPath)
|
|
||||||
if dir != "" && dir != "." {
|
|
||||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute temp file path in the same directory
|
|
||||||
// For dotfiles, just append .tmp (they're already hidden)
|
|
||||||
// For regular files, prefix with . and append .tmp
|
|
||||||
base := filepath.Base(localPath)
|
|
||||||
var tmpName string
|
|
||||||
if strings.HasPrefix(base, ".") {
|
|
||||||
tmpName = base + ".tmp"
|
|
||||||
} else {
|
|
||||||
tmpName = "." + base + ".tmp"
|
|
||||||
}
|
|
||||||
tmpPath := filepath.Join(dir, tmpName)
|
|
||||||
if dir == "" || dir == "." {
|
|
||||||
tmpPath = tmpName
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch file
|
|
||||||
resp, err := http.Get(fileURL)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return fmt.Errorf("HTTP %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine expected size
|
|
||||||
expectedSize := entry.Size
|
|
||||||
totalBytes := resp.ContentLength
|
|
||||||
if totalBytes < 0 {
|
|
||||||
totalBytes = expectedSize
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create temp file
|
|
||||||
out, err := os.Create(tmpPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set up hash computation
|
|
||||||
h := sha256.New()
|
|
||||||
|
|
||||||
// Create progress-reporting writer that also computes hash
|
|
||||||
pw := &progressWriter{
|
|
||||||
w: io.MultiWriter(out, h),
|
|
||||||
path: localPath,
|
|
||||||
total: totalBytes,
|
|
||||||
startTime: time.Now(),
|
|
||||||
progress: progress,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy content while hashing and reporting progress
|
|
||||||
written, copyErr := io.Copy(pw, resp.Body)
|
|
||||||
|
|
||||||
// Close file before checking errors (to flush writes)
|
|
||||||
closeErr := out.Close()
|
|
||||||
|
|
||||||
// If copy failed, clean up temp file and return error
|
|
||||||
if copyErr != nil {
|
|
||||||
_ = os.Remove(tmpPath)
|
|
||||||
return copyErr
|
|
||||||
}
|
|
||||||
if closeErr != nil {
|
|
||||||
_ = os.Remove(tmpPath)
|
|
||||||
return closeErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify size
|
|
||||||
if written != expectedSize {
|
|
||||||
_ = os.Remove(tmpPath)
|
|
||||||
return fmt.Errorf("size mismatch: expected %d bytes, got %d", expectedSize, written)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode computed hash as multihash
|
|
||||||
computed, err := multihash.Encode(h.Sum(nil), multihash.SHA2_256)
|
|
||||||
if err != nil {
|
|
||||||
_ = os.Remove(tmpPath)
|
|
||||||
return fmt.Errorf("failed to encode hash: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify hash against manifest (at least one must match)
|
|
||||||
hashMatch := false
|
|
||||||
for _, hash := range entry.Hashes {
|
|
||||||
if bytes.Equal(computed, hash.MultiHash) {
|
|
||||||
hashMatch = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !hashMatch {
|
|
||||||
_ = os.Remove(tmpPath)
|
|
||||||
return fmt.Errorf("hash mismatch")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rename temp file to final path
|
|
||||||
if err := os.Rename(tmpPath, localPath); err != nil {
|
|
||||||
_ = os.Remove(tmpPath)
|
|
||||||
return fmt.Errorf("failed to rename temp file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,391 +0,0 @@
|
|||||||
package cli
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"sneak.berlin/go/mfer/mfer"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestEncodeFilePath(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
input string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{"file.txt", "file.txt"},
|
|
||||||
{"dir/file.txt", "dir/file.txt"},
|
|
||||||
{"my file.txt", "my%20file.txt"},
|
|
||||||
{"dir/my file.txt", "dir/my%20file.txt"},
|
|
||||||
{"file#1.txt", "file%231.txt"},
|
|
||||||
{"file?v=1.txt", "file%3Fv=1.txt"},
|
|
||||||
{"path/to/file with spaces.txt", "path/to/file%20with%20spaces.txt"},
|
|
||||||
{"100%done.txt", "100%25done.txt"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.input, func(t *testing.T) {
|
|
||||||
result := encodeFilePath(tt.input)
|
|
||||||
assert.Equal(t, tt.expected, result)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSanitizePath(t *testing.T) {
|
|
||||||
// Valid paths that should be accepted
|
|
||||||
validTests := []struct {
|
|
||||||
input string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{"file.txt", "file.txt"},
|
|
||||||
{"dir/file.txt", "dir/file.txt"},
|
|
||||||
{"dir/subdir/file.txt", "dir/subdir/file.txt"},
|
|
||||||
{"./file.txt", "file.txt"},
|
|
||||||
{"./dir/file.txt", "dir/file.txt"},
|
|
||||||
{"dir/./file.txt", "dir/file.txt"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range validTests {
|
|
||||||
t.Run("valid:"+tt.input, func(t *testing.T) {
|
|
||||||
result, err := sanitizePath(tt.input)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, tt.expected, result)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Invalid paths that should be rejected
|
|
||||||
invalidTests := []struct {
|
|
||||||
input string
|
|
||||||
desc string
|
|
||||||
}{
|
|
||||||
{"", "empty path"},
|
|
||||||
{"..", "parent directory"},
|
|
||||||
{"../file.txt", "parent traversal"},
|
|
||||||
{"../../file.txt", "double parent traversal"},
|
|
||||||
{"dir/../../../file.txt", "traversal escaping base"},
|
|
||||||
{"/etc/passwd", "absolute path"},
|
|
||||||
{"/file.txt", "absolute path with single component"},
|
|
||||||
{"dir/../../etc/passwd", "traversal to system file"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range invalidTests {
|
|
||||||
t.Run("invalid:"+tt.desc, func(t *testing.T) {
|
|
||||||
_, err := sanitizePath(tt.input)
|
|
||||||
assert.Error(t, err, "expected error for path: %s", tt.input)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestResolveManifestURL(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
input string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
// Already ends with .mf - use as-is
|
|
||||||
{"https://example.com/path/index.mf", "https://example.com/path/index.mf"},
|
|
||||||
{"https://example.com/path/custom.mf", "https://example.com/path/custom.mf"},
|
|
||||||
{"https://example.com/foo.mf", "https://example.com/foo.mf"},
|
|
||||||
|
|
||||||
// Directory with trailing slash - append index.mf
|
|
||||||
{"https://example.com/path/", "https://example.com/path/index.mf"},
|
|
||||||
{"https://example.com/", "https://example.com/index.mf"},
|
|
||||||
|
|
||||||
// Directory without trailing slash - add slash and index.mf
|
|
||||||
{"https://example.com/path", "https://example.com/path/index.mf"},
|
|
||||||
{"https://example.com", "https://example.com/index.mf"},
|
|
||||||
|
|
||||||
// With query strings
|
|
||||||
{"https://example.com/path?foo=bar", "https://example.com/path/index.mf?foo=bar"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.input, func(t *testing.T) {
|
|
||||||
result, err := resolveManifestURL(tt.input)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, tt.expected, result)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFetchFromHTTP(t *testing.T) {
|
|
||||||
// Create source filesystem with test files
|
|
||||||
sourceFs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
testFiles := map[string][]byte{
|
|
||||||
"file1.txt": []byte("Hello, World!"),
|
|
||||||
"file2.txt": []byte("This is file 2 with more content."),
|
|
||||||
"subdir/file3.txt": []byte("Nested file content here."),
|
|
||||||
"subdir/deep/f.txt": []byte("Deeply nested file."),
|
|
||||||
}
|
|
||||||
|
|
||||||
for path, content := range testFiles {
|
|
||||||
fullPath := "/" + path // MemMapFs needs absolute paths
|
|
||||||
dir := filepath.Dir(fullPath)
|
|
||||||
require.NoError(t, sourceFs.MkdirAll(dir, 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(sourceFs, fullPath, content, 0o644))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate manifest using scanner
|
|
||||||
opts := &mfer.ScannerOptions{
|
|
||||||
Fs: sourceFs,
|
|
||||||
}
|
|
||||||
s := mfer.NewScannerWithOptions(opts)
|
|
||||||
require.NoError(t, s.EnumerateFS(sourceFs, "/", nil))
|
|
||||||
|
|
||||||
var manifestBuf bytes.Buffer
|
|
||||||
require.NoError(t, s.ToManifest(context.Background(), &manifestBuf, nil))
|
|
||||||
manifestData := manifestBuf.Bytes()
|
|
||||||
|
|
||||||
// Create HTTP server that serves the source filesystem
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
path := r.URL.Path
|
|
||||||
if path == "/index.mf" {
|
|
||||||
w.Header().Set("Content-Type", "application/octet-stream")
|
|
||||||
_, _ = w.Write(manifestData)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Strip leading slash
|
|
||||||
if len(path) > 0 && path[0] == '/' {
|
|
||||||
path = path[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
content, exists := testFiles[path]
|
|
||||||
if !exists {
|
|
||||||
http.NotFound(w, r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/octet-stream")
|
|
||||||
_, _ = w.Write(content)
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
// Create destination directory
|
|
||||||
destDir, err := os.MkdirTemp("", "mfer-fetch-test-*")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = os.RemoveAll(destDir) }()
|
|
||||||
|
|
||||||
// Change to dest directory for the test
|
|
||||||
origDir, err := os.Getwd()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, os.Chdir(destDir))
|
|
||||||
defer func() { _ = os.Chdir(origDir) }()
|
|
||||||
|
|
||||||
// Parse the manifest to get file entries
|
|
||||||
manifest, err := mfer.NewManifestFromReader(bytes.NewReader(manifestData))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
files := manifest.Files()
|
|
||||||
require.Len(t, files, len(testFiles))
|
|
||||||
|
|
||||||
// Download each file using downloadFile
|
|
||||||
progress := make(chan DownloadProgress, 10)
|
|
||||||
go func() {
|
|
||||||
for range progress {
|
|
||||||
// Drain progress channel
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
baseURL := server.URL + "/"
|
|
||||||
for _, f := range files {
|
|
||||||
localPath, err := sanitizePath(f.Path)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
fileURL := baseURL + f.Path
|
|
||||||
err = downloadFile(fileURL, localPath, f, progress)
|
|
||||||
require.NoError(t, err, "failed to download %s", f.Path)
|
|
||||||
}
|
|
||||||
close(progress)
|
|
||||||
|
|
||||||
// Verify downloaded files match originals
|
|
||||||
for path, expectedContent := range testFiles {
|
|
||||||
downloadedPath := filepath.Join(destDir, path)
|
|
||||||
downloadedContent, err := os.ReadFile(downloadedPath)
|
|
||||||
require.NoError(t, err, "failed to read downloaded file %s", path)
|
|
||||||
assert.Equal(t, expectedContent, downloadedContent, "content mismatch for %s", path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFetchHashMismatch(t *testing.T) {
|
|
||||||
// Create source filesystem with a test file
|
|
||||||
sourceFs := afero.NewMemMapFs()
|
|
||||||
originalContent := []byte("Original content")
|
|
||||||
require.NoError(t, afero.WriteFile(sourceFs, "/file.txt", originalContent, 0o644))
|
|
||||||
|
|
||||||
// Generate manifest
|
|
||||||
opts := &mfer.ScannerOptions{Fs: sourceFs}
|
|
||||||
s := mfer.NewScannerWithOptions(opts)
|
|
||||||
require.NoError(t, s.EnumerateFS(sourceFs, "/", nil))
|
|
||||||
|
|
||||||
var manifestBuf bytes.Buffer
|
|
||||||
require.NoError(t, s.ToManifest(context.Background(), &manifestBuf, nil))
|
|
||||||
|
|
||||||
// Parse manifest
|
|
||||||
manifest, err := mfer.NewManifestFromReader(bytes.NewReader(manifestBuf.Bytes()))
|
|
||||||
require.NoError(t, err)
|
|
||||||
files := manifest.Files()
|
|
||||||
require.Len(t, files, 1)
|
|
||||||
|
|
||||||
// Create server that serves DIFFERENT content (to trigger hash mismatch)
|
|
||||||
tamperedContent := []byte("Tampered content!")
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.Header().Set("Content-Type", "application/octet-stream")
|
|
||||||
_, _ = w.Write(tamperedContent)
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
// Create temp directory
|
|
||||||
destDir, err := os.MkdirTemp("", "mfer-fetch-hash-test-*")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = os.RemoveAll(destDir) }()
|
|
||||||
|
|
||||||
origDir, err := os.Getwd()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, os.Chdir(destDir))
|
|
||||||
defer func() { _ = os.Chdir(origDir) }()
|
|
||||||
|
|
||||||
// Try to download - should fail with hash mismatch
|
|
||||||
err = downloadFile(server.URL+"/file.txt", "file.txt", files[0], nil)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "mismatch")
|
|
||||||
|
|
||||||
// Verify temp file was cleaned up
|
|
||||||
_, err = os.Stat(".file.txt.tmp")
|
|
||||||
assert.True(t, os.IsNotExist(err), "temp file should be cleaned up on hash mismatch")
|
|
||||||
|
|
||||||
// Verify final file was not created
|
|
||||||
_, err = os.Stat("file.txt")
|
|
||||||
assert.True(t, os.IsNotExist(err), "final file should not exist on hash mismatch")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFetchSizeMismatch(t *testing.T) {
|
|
||||||
// Create source filesystem with a test file
|
|
||||||
sourceFs := afero.NewMemMapFs()
|
|
||||||
originalContent := []byte("Original content with specific size")
|
|
||||||
require.NoError(t, afero.WriteFile(sourceFs, "/file.txt", originalContent, 0o644))
|
|
||||||
|
|
||||||
// Generate manifest
|
|
||||||
opts := &mfer.ScannerOptions{Fs: sourceFs}
|
|
||||||
s := mfer.NewScannerWithOptions(opts)
|
|
||||||
require.NoError(t, s.EnumerateFS(sourceFs, "/", nil))
|
|
||||||
|
|
||||||
var manifestBuf bytes.Buffer
|
|
||||||
require.NoError(t, s.ToManifest(context.Background(), &manifestBuf, nil))
|
|
||||||
|
|
||||||
// Parse manifest
|
|
||||||
manifest, err := mfer.NewManifestFromReader(bytes.NewReader(manifestBuf.Bytes()))
|
|
||||||
require.NoError(t, err)
|
|
||||||
files := manifest.Files()
|
|
||||||
require.Len(t, files, 1)
|
|
||||||
|
|
||||||
// Create server that serves content with wrong size
|
|
||||||
wrongSizeContent := []byte("Short")
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.Header().Set("Content-Type", "application/octet-stream")
|
|
||||||
_, _ = w.Write(wrongSizeContent)
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
// Create temp directory
|
|
||||||
destDir, err := os.MkdirTemp("", "mfer-fetch-size-test-*")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = os.RemoveAll(destDir) }()
|
|
||||||
|
|
||||||
origDir, err := os.Getwd()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, os.Chdir(destDir))
|
|
||||||
defer func() { _ = os.Chdir(origDir) }()
|
|
||||||
|
|
||||||
// Try to download - should fail with size mismatch
|
|
||||||
err = downloadFile(server.URL+"/file.txt", "file.txt", files[0], nil)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "size mismatch")
|
|
||||||
|
|
||||||
// Verify temp file was cleaned up
|
|
||||||
_, err = os.Stat(".file.txt.tmp")
|
|
||||||
assert.True(t, os.IsNotExist(err), "temp file should be cleaned up on size mismatch")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFetchProgress(t *testing.T) {
|
|
||||||
// Create source filesystem with a larger test file
|
|
||||||
sourceFs := afero.NewMemMapFs()
|
|
||||||
// Create content large enough to trigger multiple progress updates
|
|
||||||
content := bytes.Repeat([]byte("x"), 100*1024) // 100KB
|
|
||||||
require.NoError(t, afero.WriteFile(sourceFs, "/large.txt", content, 0o644))
|
|
||||||
|
|
||||||
// Generate manifest
|
|
||||||
opts := &mfer.ScannerOptions{Fs: sourceFs}
|
|
||||||
s := mfer.NewScannerWithOptions(opts)
|
|
||||||
require.NoError(t, s.EnumerateFS(sourceFs, "/", nil))
|
|
||||||
|
|
||||||
var manifestBuf bytes.Buffer
|
|
||||||
require.NoError(t, s.ToManifest(context.Background(), &manifestBuf, nil))
|
|
||||||
|
|
||||||
// Parse manifest
|
|
||||||
manifest, err := mfer.NewManifestFromReader(bytes.NewReader(manifestBuf.Bytes()))
|
|
||||||
require.NoError(t, err)
|
|
||||||
files := manifest.Files()
|
|
||||||
require.Len(t, files, 1)
|
|
||||||
|
|
||||||
// Create server that serves the content
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.Header().Set("Content-Type", "application/octet-stream")
|
|
||||||
w.Header().Set("Content-Length", "102400")
|
|
||||||
// Write in chunks to allow progress reporting
|
|
||||||
reader := bytes.NewReader(content)
|
|
||||||
_, _ = io.Copy(w, reader)
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
// Create temp directory
|
|
||||||
destDir, err := os.MkdirTemp("", "mfer-fetch-progress-test-*")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = os.RemoveAll(destDir) }()
|
|
||||||
|
|
||||||
origDir, err := os.Getwd()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, os.Chdir(destDir))
|
|
||||||
defer func() { _ = os.Chdir(origDir) }()
|
|
||||||
|
|
||||||
// Set up progress channel and collect updates
|
|
||||||
progress := make(chan DownloadProgress, 100)
|
|
||||||
var progressUpdates []DownloadProgress
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
for p := range progress {
|
|
||||||
progressUpdates = append(progressUpdates, p)
|
|
||||||
}
|
|
||||||
close(done)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Download
|
|
||||||
err = downloadFile(server.URL+"/large.txt", "large.txt", files[0], progress)
|
|
||||||
close(progress)
|
|
||||||
<-done
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify we got progress updates
|
|
||||||
assert.NotEmpty(t, progressUpdates, "should have received progress updates")
|
|
||||||
|
|
||||||
// Verify final progress shows complete
|
|
||||||
if len(progressUpdates) > 0 {
|
|
||||||
last := progressUpdates[len(progressUpdates)-1]
|
|
||||||
assert.Equal(t, int64(len(content)), last.BytesRead, "final progress should show all bytes read")
|
|
||||||
assert.Equal(t, "large.txt", last.Path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify file was downloaded correctly
|
|
||||||
downloaded, err := os.ReadFile("large.txt")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, content, downloaded)
|
|
||||||
}
|
|
||||||
@@ -1,386 +0,0 @@
|
|||||||
package cli
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha256"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/fs"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/dustin/go-humanize"
|
|
||||||
"github.com/multiformats/go-multihash"
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"github.com/urfave/cli/v2"
|
|
||||||
"sneak.berlin/go/mfer/internal/log"
|
|
||||||
"sneak.berlin/go/mfer/mfer"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FreshenStatus contains progress information for the freshen operation.
|
|
||||||
type FreshenStatus struct {
|
|
||||||
Phase string // "scan" or "hash"
|
|
||||||
TotalFiles int64 // Total files to process in current phase
|
|
||||||
CurrentFiles int64 // Files processed so far
|
|
||||||
TotalBytes int64 // Total bytes to hash (hash phase only)
|
|
||||||
CurrentBytes int64 // Bytes hashed so far
|
|
||||||
BytesPerSec float64 // Current throughput rate
|
|
||||||
ETA time.Duration // Estimated time to completion
|
|
||||||
}
|
|
||||||
|
|
||||||
// freshenEntry tracks a file's status during freshen
|
|
||||||
type freshenEntry struct {
|
|
||||||
path string
|
|
||||||
size int64
|
|
||||||
mtime time.Time
|
|
||||||
needsHash bool // true if new or changed
|
|
||||||
existing *mfer.MFFilePath // existing manifest entry if unchanged
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mfa *CLIApp) freshenManifestOperation(ctx *cli.Context) error {
|
|
||||||
log.Debug("freshenManifestOperation()")
|
|
||||||
|
|
||||||
basePath := ctx.String("base")
|
|
||||||
showProgress := ctx.Bool("progress")
|
|
||||||
includeDotfiles := ctx.Bool("IncludeDotfiles")
|
|
||||||
followSymlinks := ctx.Bool("FollowSymLinks")
|
|
||||||
|
|
||||||
// Find manifest file
|
|
||||||
var manifestPath string
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if ctx.Args().Len() > 0 {
|
|
||||||
arg := ctx.Args().Get(0)
|
|
||||||
info, statErr := mfa.Fs.Stat(arg)
|
|
||||||
if statErr == nil && info.IsDir() {
|
|
||||||
manifestPath, err = findManifest(mfa.Fs, arg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
manifestPath = arg
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
manifestPath, err = findManifest(mfa.Fs, ".")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("loading manifest from %s", manifestPath)
|
|
||||||
|
|
||||||
// Load existing manifest
|
|
||||||
manifest, err := mfer.NewManifestFromFile(mfa.Fs, manifestPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to load manifest: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
existingFiles := manifest.Files()
|
|
||||||
log.Infof("manifest contains %d files", len(existingFiles))
|
|
||||||
|
|
||||||
// Build map of existing entries by path
|
|
||||||
existingByPath := make(map[string]*mfer.MFFilePath, len(existingFiles))
|
|
||||||
for _, f := range existingFiles {
|
|
||||||
existingByPath[f.Path] = f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 1: Scan filesystem
|
|
||||||
log.Infof("scanning filesystem...")
|
|
||||||
startScan := time.Now()
|
|
||||||
|
|
||||||
var entries []*freshenEntry
|
|
||||||
var scanCount int64
|
|
||||||
var removed, changed, added, unchanged int64
|
|
||||||
|
|
||||||
absBase, err := filepath.Abs(basePath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = afero.Walk(mfa.Fs, absBase, func(path string, info fs.FileInfo, walkErr error) error {
|
|
||||||
if walkErr != nil {
|
|
||||||
return walkErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get relative path
|
|
||||||
relPath, err := filepath.Rel(absBase, path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip the manifest file itself
|
|
||||||
if relPath == filepath.Base(manifestPath) || relPath == "."+filepath.Base(manifestPath) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle dotfiles
|
|
||||||
if !includeDotfiles && mfer.IsHiddenPath(filepath.ToSlash(relPath)) {
|
|
||||||
if info.IsDir() {
|
|
||||||
return filepath.SkipDir
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip directories
|
|
||||||
if info.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle symlinks
|
|
||||||
if info.Mode()&fs.ModeSymlink != 0 {
|
|
||||||
if !followSymlinks {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
realPath, err := filepath.EvalSymlinks(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil // Skip broken symlinks
|
|
||||||
}
|
|
||||||
realInfo, err := mfa.Fs.Stat(realPath)
|
|
||||||
if err != nil || realInfo.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
info = realInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
scanCount++
|
|
||||||
|
|
||||||
// Check against existing manifest
|
|
||||||
existing, inManifest := existingByPath[relPath]
|
|
||||||
if inManifest {
|
|
||||||
// Check if changed (size or mtime)
|
|
||||||
existingMtime := time.Unix(existing.Mtime.Seconds, int64(existing.Mtime.Nanos))
|
|
||||||
if existing.Size != info.Size() || !existingMtime.Equal(info.ModTime()) {
|
|
||||||
changed++
|
|
||||||
log.Verbosef("M %s", relPath)
|
|
||||||
entries = append(entries, &freshenEntry{
|
|
||||||
path: relPath,
|
|
||||||
size: info.Size(),
|
|
||||||
mtime: info.ModTime(),
|
|
||||||
needsHash: true,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
unchanged++
|
|
||||||
entries = append(entries, &freshenEntry{
|
|
||||||
path: relPath,
|
|
||||||
size: info.Size(),
|
|
||||||
mtime: info.ModTime(),
|
|
||||||
needsHash: false,
|
|
||||||
existing: existing,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
// Mark as seen
|
|
||||||
delete(existingByPath, relPath)
|
|
||||||
} else {
|
|
||||||
added++
|
|
||||||
log.Verbosef("A %s", relPath)
|
|
||||||
entries = append(entries, &freshenEntry{
|
|
||||||
path: relPath,
|
|
||||||
size: info.Size(),
|
|
||||||
mtime: info.ModTime(),
|
|
||||||
needsHash: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Report scan progress
|
|
||||||
if showProgress && scanCount%100 == 0 {
|
|
||||||
log.Progressf("Scanning: %d files found", scanCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if showProgress {
|
|
||||||
log.ProgressDone()
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to scan filesystem: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remaining entries in existingByPath are removed files
|
|
||||||
removed = int64(len(existingByPath))
|
|
||||||
for path := range existingByPath {
|
|
||||||
log.Verbosef("D %s", path)
|
|
||||||
}
|
|
||||||
|
|
||||||
scanDuration := time.Since(startScan)
|
|
||||||
log.Infof("scan complete in %s: %d unchanged, %d changed, %d added, %d removed",
|
|
||||||
scanDuration.Round(time.Millisecond), unchanged, changed, added, removed)
|
|
||||||
|
|
||||||
// Calculate total bytes to hash
|
|
||||||
var totalHashBytes int64
|
|
||||||
var filesToHash int64
|
|
||||||
for _, e := range entries {
|
|
||||||
if e.needsHash {
|
|
||||||
totalHashBytes += e.size
|
|
||||||
filesToHash++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 2: Hash changed and new files
|
|
||||||
if filesToHash > 0 {
|
|
||||||
log.Infof("hashing %d files (%s)...", filesToHash, humanize.IBytes(uint64(totalHashBytes)))
|
|
||||||
}
|
|
||||||
|
|
||||||
startHash := time.Now()
|
|
||||||
var hashedFiles int64
|
|
||||||
var hashedBytes int64
|
|
||||||
|
|
||||||
builder := mfer.NewBuilder()
|
|
||||||
|
|
||||||
// Set up signing options if sign-key is provided
|
|
||||||
if signKey := ctx.String("sign-key"); signKey != "" {
|
|
||||||
builder.SetSigningOptions(&mfer.SigningOptions{
|
|
||||||
KeyID: mfer.GPGKeyID(signKey),
|
|
||||||
})
|
|
||||||
log.Infof("signing manifest with GPG key: %s", signKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, e := range entries {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.needsHash {
|
|
||||||
// Need to read and hash the file
|
|
||||||
absPath := filepath.Join(absBase, e.path)
|
|
||||||
f, err := mfa.Fs.Open(absPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to open %s: %w", e.path, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hash, bytesRead, err := hashFile(f, e.size, func(n int64) {
|
|
||||||
if showProgress {
|
|
||||||
currentBytes := hashedBytes + n
|
|
||||||
elapsed := time.Since(startHash)
|
|
||||||
var rate float64
|
|
||||||
var eta time.Duration
|
|
||||||
if elapsed > 0 && currentBytes > 0 {
|
|
||||||
rate = float64(currentBytes) / elapsed.Seconds()
|
|
||||||
remaining := totalHashBytes - currentBytes
|
|
||||||
if rate > 0 {
|
|
||||||
eta = time.Duration(float64(remaining)/rate) * time.Second
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if eta > 0 {
|
|
||||||
log.Progressf("Hashing: %d/%d files, %s/s, ETA %s",
|
|
||||||
hashedFiles, filesToHash, humanize.IBytes(uint64(rate)), eta.Round(time.Second))
|
|
||||||
} else {
|
|
||||||
log.Progressf("Hashing: %d/%d files, %s/s",
|
|
||||||
hashedFiles, filesToHash, humanize.IBytes(uint64(rate)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
_ = f.Close()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to hash %s: %w", e.path, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hashedBytes += bytesRead
|
|
||||||
hashedFiles++
|
|
||||||
|
|
||||||
// Add to builder with computed hash
|
|
||||||
if err := addFileToBuilder(builder, e.path, e.size, e.mtime, hash); err != nil {
|
|
||||||
return fmt.Errorf("failed to add %s: %w", e.path, err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Use existing entry
|
|
||||||
if err := addExistingToBuilder(builder, e.existing); err != nil {
|
|
||||||
return fmt.Errorf("failed to add %s: %w", e.path, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if showProgress && filesToHash > 0 {
|
|
||||||
log.ProgressDone()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print summary
|
|
||||||
log.Infof("freshen complete: %d unchanged, %d changed, %d added, %d removed",
|
|
||||||
unchanged, changed, added, removed)
|
|
||||||
|
|
||||||
// Skip writing if nothing changed
|
|
||||||
if changed == 0 && added == 0 && removed == 0 {
|
|
||||||
log.Infof("manifest unchanged, skipping write")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write updated manifest atomically (write to temp, then rename)
|
|
||||||
tmpPath := manifestPath + ".tmp"
|
|
||||||
outFile, err := mfa.Fs.Create(tmpPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create temp file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = builder.Build(outFile)
|
|
||||||
_ = outFile.Close()
|
|
||||||
if err != nil {
|
|
||||||
_ = mfa.Fs.Remove(tmpPath)
|
|
||||||
return fmt.Errorf("failed to write manifest: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rename temp to final
|
|
||||||
if err := mfa.Fs.Rename(tmpPath, manifestPath); err != nil {
|
|
||||||
_ = mfa.Fs.Remove(tmpPath)
|
|
||||||
return fmt.Errorf("failed to rename manifest: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
totalDuration := time.Since(mfa.startupTime)
|
|
||||||
if hashedBytes > 0 {
|
|
||||||
hashDuration := time.Since(startHash)
|
|
||||||
hashRate := float64(hashedBytes) / hashDuration.Seconds()
|
|
||||||
log.Infof("hashed %s in %.1fs (%s/s)",
|
|
||||||
humanize.IBytes(uint64(hashedBytes)), totalDuration.Seconds(), humanize.IBytes(uint64(hashRate)))
|
|
||||||
}
|
|
||||||
log.Infof("wrote %d files to %s", len(entries), manifestPath)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// hashFile reads a file and computes its SHA256 multihash.
|
|
||||||
// Progress callback is called with bytes read so far.
|
|
||||||
func hashFile(r io.Reader, size int64, progress func(int64)) ([]byte, int64, error) {
|
|
||||||
h := sha256.New()
|
|
||||||
buf := make([]byte, 64*1024)
|
|
||||||
var total int64
|
|
||||||
|
|
||||||
for {
|
|
||||||
n, err := r.Read(buf)
|
|
||||||
if n > 0 {
|
|
||||||
h.Write(buf[:n])
|
|
||||||
total += int64(n)
|
|
||||||
if progress != nil {
|
|
||||||
progress(total)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, total, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mh, err := multihash.Encode(h.Sum(nil), multihash.SHA2_256)
|
|
||||||
if err != nil {
|
|
||||||
return nil, total, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return mh, total, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// addFileToBuilder adds a new file entry to the builder
|
|
||||||
func addFileToBuilder(b *mfer.Builder, path string, size int64, mtime time.Time, hash []byte) error {
|
|
||||||
return b.AddFileWithHash(mfer.RelFilePath(path), mfer.FileSize(size), mfer.ModTime(mtime), hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// addExistingToBuilder adds an existing manifest entry to the builder
|
|
||||||
func addExistingToBuilder(b *mfer.Builder, entry *mfer.MFFilePath) error {
|
|
||||||
mtime := time.Unix(entry.Mtime.Seconds, int64(entry.Mtime.Nanos))
|
|
||||||
if len(entry.Hashes) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return b.AddFileWithHash(mfer.RelFilePath(entry.Path), mfer.FileSize(entry.Size), mfer.ModTime(mtime), entry.Hashes[0].MultiHash)
|
|
||||||
}
|
|
||||||
@@ -1,82 +0,0 @@
|
|||||||
package cli
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"sneak.berlin/go/mfer/mfer"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestFreshenUnchanged(t *testing.T) {
|
|
||||||
// Create filesystem with test files
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("content1"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file2.txt", []byte("content2"), 0o644))
|
|
||||||
|
|
||||||
// Generate initial manifest
|
|
||||||
opts := &mfer.ScannerOptions{Fs: fs}
|
|
||||||
s := mfer.NewScannerWithOptions(opts)
|
|
||||||
require.NoError(t, s.EnumeratePath("/testdir", nil))
|
|
||||||
|
|
||||||
var manifestBuf bytes.Buffer
|
|
||||||
require.NoError(t, s.ToManifest(context.Background(), &manifestBuf, nil))
|
|
||||||
|
|
||||||
// Write manifest to filesystem
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/.index.mf", manifestBuf.Bytes(), 0o644))
|
|
||||||
|
|
||||||
// Parse manifest to verify
|
|
||||||
manifest, err := mfer.NewManifestFromFile(fs, "/testdir/.index.mf")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, manifest.Files(), 2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFreshenWithChanges(t *testing.T) {
|
|
||||||
// Create filesystem with test files
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("content1"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file2.txt", []byte("content2"), 0o644))
|
|
||||||
|
|
||||||
// Generate initial manifest
|
|
||||||
opts := &mfer.ScannerOptions{Fs: fs}
|
|
||||||
s := mfer.NewScannerWithOptions(opts)
|
|
||||||
require.NoError(t, s.EnumeratePath("/testdir", nil))
|
|
||||||
|
|
||||||
var manifestBuf bytes.Buffer
|
|
||||||
require.NoError(t, s.ToManifest(context.Background(), &manifestBuf, nil))
|
|
||||||
|
|
||||||
// Write manifest to filesystem
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/.index.mf", manifestBuf.Bytes(), 0o644))
|
|
||||||
|
|
||||||
// Verify initial manifest has 2 files
|
|
||||||
manifest, err := mfer.NewManifestFromFile(fs, "/testdir/.index.mf")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, manifest.Files(), 2)
|
|
||||||
|
|
||||||
// Add a new file
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file3.txt", []byte("content3"), 0o644))
|
|
||||||
|
|
||||||
// Modify file2 (change content and size)
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file2.txt", []byte("modified content2"), 0o644))
|
|
||||||
|
|
||||||
// Remove file1
|
|
||||||
require.NoError(t, fs.Remove("/testdir/file1.txt"))
|
|
||||||
|
|
||||||
// Note: The freshen operation would need to be run here
|
|
||||||
// For now, we just verify the test setup is correct
|
|
||||||
exists, _ := afero.Exists(fs, "/testdir/file1.txt")
|
|
||||||
assert.False(t, exists)
|
|
||||||
|
|
||||||
exists, _ = afero.Exists(fs, "/testdir/file3.txt")
|
|
||||||
assert.True(t, exists)
|
|
||||||
|
|
||||||
content, _ := afero.ReadFile(fs, "/testdir/file2.txt")
|
|
||||||
assert.Equal(t, "modified content2", string(content))
|
|
||||||
}
|
|
||||||
@@ -1,176 +0,0 @@
|
|||||||
package cli
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/dustin/go-humanize"
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"github.com/urfave/cli/v2"
|
|
||||||
"sneak.berlin/go/mfer/internal/log"
|
|
||||||
"sneak.berlin/go/mfer/mfer"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (mfa *CLIApp) generateManifestOperation(ctx *cli.Context) error {
|
|
||||||
log.Debug("generateManifestOperation()")
|
|
||||||
|
|
||||||
opts := &mfer.ScannerOptions{
|
|
||||||
IncludeDotfiles: ctx.Bool("IncludeDotfiles"),
|
|
||||||
FollowSymLinks: ctx.Bool("FollowSymLinks"),
|
|
||||||
Fs: mfa.Fs,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set up signing options if sign-key is provided
|
|
||||||
if signKey := ctx.String("sign-key"); signKey != "" {
|
|
||||||
opts.SigningOptions = &mfer.SigningOptions{
|
|
||||||
KeyID: mfer.GPGKeyID(signKey),
|
|
||||||
}
|
|
||||||
log.Infof("signing manifest with GPG key: %s", signKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
s := mfer.NewScannerWithOptions(opts)
|
|
||||||
|
|
||||||
// Phase 1: Enumeration - collect paths and stat files
|
|
||||||
args := ctx.Args()
|
|
||||||
showProgress := ctx.Bool("progress")
|
|
||||||
|
|
||||||
// Set up enumeration progress reporting
|
|
||||||
var enumProgress chan mfer.EnumerateStatus
|
|
||||||
var enumWg sync.WaitGroup
|
|
||||||
if showProgress {
|
|
||||||
enumProgress = make(chan mfer.EnumerateStatus, 1)
|
|
||||||
enumWg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer enumWg.Done()
|
|
||||||
for status := range enumProgress {
|
|
||||||
log.Progressf("Enumerating: %d files, %s",
|
|
||||||
status.FilesFound,
|
|
||||||
humanize.IBytes(uint64(status.BytesFound)))
|
|
||||||
}
|
|
||||||
log.ProgressDone()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
if args.Len() == 0 {
|
|
||||||
// Default to current directory
|
|
||||||
if err := s.EnumeratePath(".", enumProgress); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Collect and validate all paths first
|
|
||||||
paths := make([]string, 0, args.Len())
|
|
||||||
for i := 0; i < args.Len(); i++ {
|
|
||||||
inputPath := args.Get(i)
|
|
||||||
ap, err := filepath.Abs(inputPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Validate path exists before adding to list
|
|
||||||
if exists, _ := afero.Exists(mfa.Fs, ap); !exists {
|
|
||||||
return fmt.Errorf("path does not exist: %s", inputPath)
|
|
||||||
}
|
|
||||||
log.Debugf("enumerating path: %s", ap)
|
|
||||||
paths = append(paths, ap)
|
|
||||||
}
|
|
||||||
if err := s.EnumeratePaths(enumProgress, paths...); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
enumWg.Wait()
|
|
||||||
|
|
||||||
log.Infof("enumerated %d files, %s total", s.FileCount(), humanize.IBytes(uint64(s.TotalBytes())))
|
|
||||||
|
|
||||||
// Check if output file exists
|
|
||||||
outputPath := ctx.String("output")
|
|
||||||
if exists, _ := afero.Exists(mfa.Fs, outputPath); exists {
|
|
||||||
if !ctx.Bool("force") {
|
|
||||||
return fmt.Errorf("output file %s already exists (use --force to overwrite)", outputPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create temp file for atomic write
|
|
||||||
tmpPath := outputPath + ".tmp"
|
|
||||||
outFile, err := mfa.Fs.Create(tmpPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create temp file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set up signal handler to clean up temp file on Ctrl-C
|
|
||||||
sigChan := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
|
|
||||||
go func() {
|
|
||||||
sig, ok := <-sigChan
|
|
||||||
if !ok || sig == nil {
|
|
||||||
return // Channel closed normally, not a signal
|
|
||||||
}
|
|
||||||
_ = outFile.Close()
|
|
||||||
_ = mfa.Fs.Remove(tmpPath)
|
|
||||||
os.Exit(1)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Clean up temp file on any error or interruption
|
|
||||||
success := false
|
|
||||||
defer func() {
|
|
||||||
signal.Stop(sigChan)
|
|
||||||
close(sigChan)
|
|
||||||
_ = outFile.Close()
|
|
||||||
if !success {
|
|
||||||
_ = mfa.Fs.Remove(tmpPath)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Phase 2: Scan - read file contents and generate manifest
|
|
||||||
var scanProgress chan mfer.ScanStatus
|
|
||||||
var scanWg sync.WaitGroup
|
|
||||||
if showProgress {
|
|
||||||
scanProgress = make(chan mfer.ScanStatus, 1)
|
|
||||||
scanWg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer scanWg.Done()
|
|
||||||
for status := range scanProgress {
|
|
||||||
if status.ETA > 0 {
|
|
||||||
log.Progressf("Scanning: %d/%d files, %s/s, ETA %s",
|
|
||||||
status.ScannedFiles,
|
|
||||||
status.TotalFiles,
|
|
||||||
humanize.IBytes(uint64(status.BytesPerSec)),
|
|
||||||
status.ETA.Round(time.Second))
|
|
||||||
} else {
|
|
||||||
log.Progressf("Scanning: %d/%d files, %s/s",
|
|
||||||
status.ScannedFiles,
|
|
||||||
status.TotalFiles,
|
|
||||||
humanize.IBytes(uint64(status.BytesPerSec)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.ProgressDone()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
err = s.ToManifest(ctx.Context, outFile, scanProgress)
|
|
||||||
scanWg.Wait()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to generate manifest: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close file before rename to ensure all data is flushed
|
|
||||||
if err := outFile.Close(); err != nil {
|
|
||||||
return fmt.Errorf("failed to close temp file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Atomic rename
|
|
||||||
if err := mfa.Fs.Rename(tmpPath, outputPath); err != nil {
|
|
||||||
return fmt.Errorf("failed to rename temp file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
success = true
|
|
||||||
|
|
||||||
elapsed := time.Since(mfa.startupTime).Seconds()
|
|
||||||
rate := float64(s.TotalBytes()) / elapsed
|
|
||||||
log.Infof("wrote %d files (%s) to %s in %.1fs (%s/s)", s.FileCount(), humanize.IBytes(uint64(s.TotalBytes())), outputPath, elapsed, humanize.IBytes(uint64(rate)))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
package cli
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/urfave/cli/v2"
|
|
||||||
"sneak.berlin/go/mfer/internal/log"
|
|
||||||
"sneak.berlin/go/mfer/mfer"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (mfa *CLIApp) listManifestOperation(ctx *cli.Context) error {
|
|
||||||
// Default to ErrorLevel for clean output
|
|
||||||
log.SetLevel(log.ErrorLevel)
|
|
||||||
|
|
||||||
longFormat := ctx.Bool("long")
|
|
||||||
print0 := ctx.Bool("print0")
|
|
||||||
|
|
||||||
// Find manifest file
|
|
||||||
var manifestPath string
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if ctx.Args().Len() > 0 {
|
|
||||||
arg := ctx.Args().Get(0)
|
|
||||||
info, statErr := mfa.Fs.Stat(arg)
|
|
||||||
if statErr == nil && info.IsDir() {
|
|
||||||
manifestPath, err = findManifest(mfa.Fs, arg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
manifestPath = arg
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
manifestPath, err = findManifest(mfa.Fs, ".")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load manifest
|
|
||||||
manifest, err := mfer.NewManifestFromFile(mfa.Fs, manifestPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to load manifest: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
files := manifest.Files()
|
|
||||||
|
|
||||||
// Determine line ending
|
|
||||||
lineEnd := "\n"
|
|
||||||
if print0 {
|
|
||||||
lineEnd = "\x00"
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, f := range files {
|
|
||||||
if longFormat {
|
|
||||||
mtime := time.Unix(f.Mtime.Seconds, int64(f.Mtime.Nanos))
|
|
||||||
_, _ = fmt.Fprintf(mfa.Stdout, "%d\t%s\t%s%s", f.Size, mtime.Format(time.RFC3339), f.Path, lineEnd)
|
|
||||||
} else {
|
|
||||||
_, _ = fmt.Fprintf(mfa.Stdout, "%s%s", f.Path, lineEnd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
51
internal/cli/manifest.go
Normal file
51
internal/cli/manifest.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
package cli
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.eeqj.de/sneak/mfer/mfer"
|
||||||
|
"github.com/spf13/afero"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Job struct {
|
||||||
|
innerpb *mfer.MFFileInner
|
||||||
|
outerpb *mfer.MFFile
|
||||||
|
fileCount int64
|
||||||
|
totalSize int64
|
||||||
|
afs afero.Fs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Job) scanForFiles() error {
|
||||||
|
|
||||||
|
m.innerpb = &mfer.MFFileInner{}
|
||||||
|
m.innerpb.Version = mfer.MFFileInner_ONE
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
walkErr := filepath.Walk(m.sourcePath, func(itemPath string, info os.FileInfo, err error) error {
|
||||||
|
|
||||||
|
// we do not include the manifest file in the manifest
|
||||||
|
if itemPath == "index.mf" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fpi := mfer.MFFilePath{}
|
||||||
|
fpi.Path = itemPath
|
||||||
|
fpi.Size = info.Size()
|
||||||
|
m.innerpb.Files = append(m.innerpb.Files, &fpi)
|
||||||
|
m.fileCount++
|
||||||
|
m.totalSize += fpi.Size
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if walkErr != nil {
|
||||||
|
log.Fatal(walkErr)
|
||||||
|
return walkErr
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%#v\n", m.innerpb)
|
||||||
|
fmt.Printf("filecount = %#v\n", m.fileCount)
|
||||||
|
fmt.Printf("totalsize = %#v\n", m.totalSize)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
*/
|
||||||
@@ -2,277 +2,128 @@ package cli
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/spf13/afero"
|
"git.eeqj.de/sneak/mfer/mfer"
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"github.com/pterm/pterm"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"sneak.berlin/go/mfer/internal/log"
|
|
||||||
"sneak.berlin/go/mfer/mfer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// CLIApp is the main CLI application container. It holds configuration,
|
|
||||||
// I/O streams, and filesystem abstraction to enable testing and flexibility.
|
|
||||||
type CLIApp struct {
|
type CLIApp struct {
|
||||||
appname string
|
appname string
|
||||||
version string
|
version string
|
||||||
gitrev string
|
buildarch string
|
||||||
startupTime time.Time
|
startupTime time.Time
|
||||||
exitCode int
|
exitCode int
|
||||||
|
errorString string
|
||||||
app *cli.App
|
app *cli.App
|
||||||
|
|
||||||
Stdin io.Reader // Standard input stream
|
|
||||||
Stdout io.Writer // Standard output stream for normal output
|
|
||||||
Stderr io.Writer // Standard error stream for diagnostics
|
|
||||||
Fs afero.Fs // Filesystem abstraction for all file operations
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const banner = `
|
|
||||||
___ ___ ___ ___
|
|
||||||
/__/\ / /\ / /\ / /\
|
|
||||||
| |::\ / /:/_ / /:/_ / /::\
|
|
||||||
| |:|:\ / /:/ /\ / /:/ /\ / /:/\:\
|
|
||||||
__|__|:|\:\ / /:/ /:/ / /:/ /:/_ / /:/~/:/
|
|
||||||
/__/::::| \:\ /__/:/ /:/ /__/:/ /:/ /\ /__/:/ /:/___
|
|
||||||
\ \:\~~\__\/ \ \:\/:/ \ \:\/:/ /:/ \ \:\/:::::/
|
|
||||||
\ \:\ \ \::/ \ \::/ /:/ \ \::/~~~~
|
|
||||||
\ \:\ \ \:\ \ \:\/:/ \ \:\
|
|
||||||
\ \:\ \ \:\ \ \::/ \ \:\
|
|
||||||
\__\/ \__\/ \__\/ \__\/`
|
|
||||||
|
|
||||||
func (mfa *CLIApp) printBanner() {
|
func (mfa *CLIApp) printBanner() {
|
||||||
if log.GetLevel() <= log.InfoLevel {
|
s, _ := pterm.DefaultBigText.WithLetters(pterm.NewLettersFromString(mfa.appname)).Srender()
|
||||||
_, _ = fmt.Fprintln(mfa.Stdout, banner)
|
pterm.DefaultCenter.Println(s) // Print BigLetters with the default CenterPrinter
|
||||||
_, _ = fmt.Fprintf(mfa.Stdout, " mfer by @sneak: v%s released %s\n", mfer.Version, mfer.ReleaseDate)
|
|
||||||
_, _ = fmt.Fprintln(mfa.Stdout, " https://sneak.berlin/go/mfer")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// VersionString returns the version and git revision formatted for display.
|
func (mfa *CLIApp) disableStyling() {
|
||||||
func (mfa *CLIApp) VersionString() string {
|
pterm.DisableColor()
|
||||||
if mfa.gitrev != "" {
|
pterm.DisableStyling()
|
||||||
return fmt.Sprintf("%s (%s)", mfer.Version, mfa.gitrev)
|
pterm.Debug.Prefix.Text = ""
|
||||||
}
|
pterm.Info.Prefix.Text = ""
|
||||||
return mfer.Version
|
pterm.Success.Prefix.Text = ""
|
||||||
|
pterm.Warning.Prefix.Text = ""
|
||||||
|
pterm.Error.Prefix.Text = ""
|
||||||
|
pterm.Fatal.Prefix.Text = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mfa *CLIApp) setVerbosity(c *cli.Context) {
|
func (mfa *CLIApp) run() {
|
||||||
_, present := os.LookupEnv("MFER_DEBUG")
|
|
||||||
if present {
|
|
||||||
log.EnableDebugLogging()
|
|
||||||
} else if c.Bool("quiet") {
|
|
||||||
log.SetLevel(log.ErrorLevel)
|
|
||||||
} else {
|
|
||||||
log.SetLevelFromVerbosity(c.Count("verbose"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// commonFlags returns the flags shared by most commands (-v, -q)
|
|
||||||
func commonFlags() []cli.Flag {
|
|
||||||
return []cli.Flag{
|
|
||||||
&cli.BoolFlag{
|
|
||||||
Name: "verbose",
|
|
||||||
Aliases: []string{"v"},
|
|
||||||
Usage: "Increase verbosity (-v for verbose, -vv for debug)",
|
|
||||||
Count: new(int),
|
|
||||||
},
|
|
||||||
&cli.BoolFlag{
|
|
||||||
Name: "quiet",
|
|
||||||
Aliases: []string{"q"},
|
|
||||||
Usage: "Suppress output except errors",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mfa *CLIApp) run(args []string) {
|
|
||||||
mfa.startupTime = time.Now()
|
|
||||||
|
|
||||||
if NO_COLOR {
|
if NO_COLOR {
|
||||||
// shoutout to rob pike who thinks it's juvenile
|
// shoutout to rob pike who thinks it's juvenile
|
||||||
log.DisableStyling()
|
mfa.disableStyling()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure log package to use our I/O streams
|
mfa.printBanner()
|
||||||
log.SetOutput(mfa.Stdout, mfa.Stderr)
|
|
||||||
log.Init()
|
|
||||||
|
|
||||||
mfa.app = &cli.App{
|
mfa.app = &cli.App{
|
||||||
Name: mfa.appname,
|
Name: mfa.appname,
|
||||||
Usage: "Manifest generator",
|
Usage: "Manifest generator",
|
||||||
Version: mfa.VersionString(),
|
Version: mfa.version,
|
||||||
EnableBashCompletion: true,
|
EnableBashCompletion: true,
|
||||||
Writer: mfa.Stdout,
|
|
||||||
ErrWriter: mfa.Stderr,
|
|
||||||
Action: func(c *cli.Context) error {
|
|
||||||
if c.Args().Len() > 0 {
|
|
||||||
return fmt.Errorf("unknown command %q", c.Args().First())
|
|
||||||
}
|
|
||||||
mfa.printBanner()
|
|
||||||
return cli.ShowAppHelp(c)
|
|
||||||
},
|
|
||||||
Commands: []*cli.Command{
|
Commands: []*cli.Command{
|
||||||
{
|
{
|
||||||
Name: "generate",
|
Name: "generate",
|
||||||
Aliases: []string{"gen"},
|
Aliases: []string{"gen"},
|
||||||
Usage: "Generate manifest file",
|
Usage: "Generate manifest file",
|
||||||
Action: func(c *cli.Context) error {
|
Action: func(c *cli.Context) error {
|
||||||
mfa.setVerbosity(c)
|
|
||||||
mfa.printBanner()
|
|
||||||
return mfa.generateManifestOperation(c)
|
return mfa.generateManifestOperation(c)
|
||||||
},
|
},
|
||||||
Flags: append(commonFlags(),
|
Flags: []cli.Flag{
|
||||||
&cli.BoolFlag{
|
&cli.StringFlag{
|
||||||
Name: "FollowSymLinks",
|
Name: "input",
|
||||||
Aliases: []string{"follow-symlinks"},
|
Value: ".",
|
||||||
Usage: "Resolve encountered symlinks",
|
Aliases: []string{"i"},
|
||||||
},
|
Usage: "Specify input directory.",
|
||||||
&cli.BoolFlag{
|
|
||||||
Name: "IncludeDotfiles",
|
|
||||||
Aliases: []string{"include-dotfiles"},
|
|
||||||
Usage: "Include dot (hidden) files (excluded by default)",
|
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "output",
|
Name: "output",
|
||||||
Value: "./.index.mf",
|
Value: "./index.mf",
|
||||||
Aliases: []string{"o"},
|
Aliases: []string{"o"},
|
||||||
Usage: "Specify output filename",
|
Usage: "Specify output filename",
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
|
||||||
Name: "force",
|
|
||||||
Aliases: []string{"f"},
|
|
||||||
Usage: "Overwrite output file if it exists",
|
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
|
||||||
Name: "progress",
|
|
||||||
Aliases: []string{"P"},
|
|
||||||
Usage: "Show progress during enumeration and scanning",
|
|
||||||
},
|
|
||||||
&cli.StringFlag{
|
|
||||||
Name: "sign-key",
|
|
||||||
Aliases: []string{"s"},
|
|
||||||
Usage: "GPG key ID to sign the manifest with",
|
|
||||||
EnvVars: []string{"MFER_SIGN_KEY"},
|
|
||||||
},
|
|
||||||
),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "check",
|
Name: "check",
|
||||||
Usage: "Validate files using manifest file",
|
Usage: "Validate files using manifest file",
|
||||||
ArgsUsage: "[manifest file]",
|
|
||||||
Action: func(c *cli.Context) error {
|
Action: func(c *cli.Context) error {
|
||||||
mfa.setVerbosity(c)
|
return mfa.validateManifestOperation(c)
|
||||||
mfa.printBanner()
|
|
||||||
return mfa.checkManifestOperation(c)
|
|
||||||
},
|
},
|
||||||
Flags: append(commonFlags(),
|
|
||||||
&cli.StringFlag{
|
|
||||||
Name: "base",
|
|
||||||
Aliases: []string{"b"},
|
|
||||||
Value: ".",
|
|
||||||
Usage: "Base directory for resolving relative paths from manifest",
|
|
||||||
},
|
|
||||||
&cli.BoolFlag{
|
|
||||||
Name: "progress",
|
|
||||||
Aliases: []string{"P"},
|
|
||||||
Usage: "Show progress during checking",
|
|
||||||
},
|
|
||||||
&cli.BoolFlag{
|
|
||||||
Name: "no-extra-files",
|
|
||||||
Usage: "Fail if files exist in base directory that are not in manifest",
|
|
||||||
},
|
|
||||||
&cli.StringFlag{
|
|
||||||
Name: "require-signature",
|
|
||||||
Aliases: []string{"S"},
|
|
||||||
Usage: "Require manifest to be signed by the specified GPG key ID",
|
|
||||||
EnvVars: []string{"MFER_REQUIRE_SIGNATURE"},
|
|
||||||
},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "freshen",
|
|
||||||
Usage: "Update manifest with changed, new, and removed files",
|
|
||||||
ArgsUsage: "[manifest file]",
|
|
||||||
Action: func(c *cli.Context) error {
|
|
||||||
mfa.setVerbosity(c)
|
|
||||||
mfa.printBanner()
|
|
||||||
return mfa.freshenManifestOperation(c)
|
|
||||||
},
|
|
||||||
Flags: append(commonFlags(),
|
|
||||||
&cli.StringFlag{
|
|
||||||
Name: "base",
|
|
||||||
Aliases: []string{"b"},
|
|
||||||
Value: ".",
|
|
||||||
Usage: "Base directory for resolving relative paths",
|
|
||||||
},
|
|
||||||
&cli.BoolFlag{
|
|
||||||
Name: "FollowSymLinks",
|
|
||||||
Aliases: []string{"follow-symlinks"},
|
|
||||||
Usage: "Resolve encountered symlinks",
|
|
||||||
},
|
|
||||||
&cli.BoolFlag{
|
|
||||||
Name: "IncludeDotfiles",
|
|
||||||
Aliases: []string{"include-dotfiles"},
|
|
||||||
Usage: "Include dot (hidden) files (excluded by default)",
|
|
||||||
},
|
|
||||||
&cli.BoolFlag{
|
|
||||||
Name: "progress",
|
|
||||||
Aliases: []string{"P"},
|
|
||||||
Usage: "Show progress during scanning and hashing",
|
|
||||||
},
|
|
||||||
&cli.StringFlag{
|
|
||||||
Name: "sign-key",
|
|
||||||
Aliases: []string{"s"},
|
|
||||||
Usage: "GPG key ID to sign the manifest with",
|
|
||||||
EnvVars: []string{"MFER_SIGN_KEY"},
|
|
||||||
},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "version",
|
|
||||||
Usage: "Show version",
|
|
||||||
Action: func(c *cli.Context) error {
|
|
||||||
_, _ = fmt.Fprintln(mfa.Stdout, mfa.VersionString())
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "list",
|
|
||||||
Aliases: []string{"ls"},
|
|
||||||
Usage: "List files in manifest",
|
|
||||||
ArgsUsage: "[manifest file]",
|
|
||||||
Action: func(c *cli.Context) error {
|
|
||||||
return mfa.listManifestOperation(c)
|
|
||||||
},
|
|
||||||
Flags: []cli.Flag{
|
|
||||||
&cli.BoolFlag{
|
|
||||||
Name: "long",
|
|
||||||
Aliases: []string{"l"},
|
|
||||||
Usage: "Show size and mtime",
|
|
||||||
},
|
|
||||||
&cli.BoolFlag{
|
|
||||||
Name: "print0",
|
|
||||||
Usage: "Separate entries with NUL character (for xargs -0)",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "fetch",
|
|
||||||
Usage: "fetch manifest and referenced files",
|
|
||||||
Action: func(c *cli.Context) error {
|
|
||||||
mfa.setVerbosity(c)
|
|
||||||
mfa.printBanner()
|
|
||||||
return mfa.fetchManifestOperation(c)
|
|
||||||
},
|
|
||||||
Flags: commonFlags(),
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
mfa.app.HideVersion = true
|
err := mfa.app.Run(os.Args)
|
||||||
err := mfa.app.Run(args)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
mfa.exitCode = 1
|
mfa.exitCode = 1
|
||||||
log.WithError(err).Debugf("exiting")
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (mfa *CLIApp) validateManifestOperation(c *cli.Context) error {
|
||||||
|
log.Fatal("unimplemented")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mfa *CLIApp) generateManifestOperation(c *cli.Context) error {
|
||||||
|
fmt.Println("generateManifestOperation()")
|
||||||
|
fmt.Printf("called with arg: %s\n", c.String("input"))
|
||||||
|
|
||||||
|
opts := &mfer.ManifestScanOptions{
|
||||||
|
IgnoreDotfiles: true,
|
||||||
|
FollowSymLinks: false,
|
||||||
|
}
|
||||||
|
mf, err := mfer.NewFromPath(c.String("input"), opts)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
spew.Dump(mf)
|
||||||
|
|
||||||
|
/*
|
||||||
|
|
||||||
|
mgj, err := NewMFGenerationJobFromFilesystem(c.String("input"))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
mgj.scanForFiles()
|
||||||
|
//mgj.outputFile = c.String("output")
|
||||||
|
|
||||||
|
*/
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,274 +0,0 @@
|
|||||||
package log
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/apex/log"
|
|
||||||
acli "github.com/apex/log/handlers/cli"
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"github.com/pterm/pterm"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Level represents log severity levels.
|
|
||||||
// Lower values are more verbose.
|
|
||||||
type Level int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DebugLevel is for low-level tracing and structure inspection
|
|
||||||
DebugLevel Level = iota
|
|
||||||
// VerboseLevel is for detailed operational info (file listings, etc)
|
|
||||||
VerboseLevel
|
|
||||||
// InfoLevel is for operational summaries (default)
|
|
||||||
InfoLevel
|
|
||||||
// WarnLevel is for warnings
|
|
||||||
WarnLevel
|
|
||||||
// ErrorLevel is for errors
|
|
||||||
ErrorLevel
|
|
||||||
// FatalLevel is for fatal errors
|
|
||||||
FatalLevel
|
|
||||||
)
|
|
||||||
|
|
||||||
func (l Level) String() string {
|
|
||||||
switch l {
|
|
||||||
case DebugLevel:
|
|
||||||
return "debug"
|
|
||||||
case VerboseLevel:
|
|
||||||
return "verbose"
|
|
||||||
case InfoLevel:
|
|
||||||
return "info"
|
|
||||||
case WarnLevel:
|
|
||||||
return "warn"
|
|
||||||
case ErrorLevel:
|
|
||||||
return "error"
|
|
||||||
case FatalLevel:
|
|
||||||
return "fatal"
|
|
||||||
default:
|
|
||||||
return "unknown"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// mu protects the output writers and level
|
|
||||||
mu sync.RWMutex
|
|
||||||
// stdout is the writer for progress output
|
|
||||||
stdout io.Writer = os.Stdout
|
|
||||||
// stderr is the writer for log output
|
|
||||||
stderr io.Writer = os.Stderr
|
|
||||||
// currentLevel is our log level (includes Verbose)
|
|
||||||
currentLevel Level = InfoLevel
|
|
||||||
)
|
|
||||||
|
|
||||||
// SetOutput configures the output writers for the log package.
|
|
||||||
// stdout is used for progress output, stderr is used for log messages.
|
|
||||||
func SetOutput(out, err io.Writer) {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
stdout = out
|
|
||||||
stderr = err
|
|
||||||
pterm.SetDefaultOutput(out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStdout returns the configured stdout writer.
|
|
||||||
func GetStdout() io.Writer {
|
|
||||||
mu.RLock()
|
|
||||||
defer mu.RUnlock()
|
|
||||||
return stdout
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStderr returns the configured stderr writer.
|
|
||||||
func GetStderr() io.Writer {
|
|
||||||
mu.RLock()
|
|
||||||
defer mu.RUnlock()
|
|
||||||
return stderr
|
|
||||||
}
|
|
||||||
|
|
||||||
// DisableStyling turns off colors and styling for terminal output.
|
|
||||||
func DisableStyling() {
|
|
||||||
pterm.DisableColor()
|
|
||||||
pterm.DisableStyling()
|
|
||||||
pterm.Debug.Prefix.Text = ""
|
|
||||||
pterm.Info.Prefix.Text = ""
|
|
||||||
pterm.Success.Prefix.Text = ""
|
|
||||||
pterm.Warning.Prefix.Text = ""
|
|
||||||
pterm.Error.Prefix.Text = ""
|
|
||||||
pterm.Fatal.Prefix.Text = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init initializes the logger with the CLI handler and default log level.
|
|
||||||
func Init() {
|
|
||||||
mu.RLock()
|
|
||||||
w := stderr
|
|
||||||
mu.RUnlock()
|
|
||||||
log.SetHandler(acli.New(w))
|
|
||||||
log.SetLevel(log.DebugLevel) // Let apex/log pass everything; we filter ourselves
|
|
||||||
}
|
|
||||||
|
|
||||||
// isEnabled returns true if messages at the given level should be logged.
|
|
||||||
func isEnabled(l Level) bool {
|
|
||||||
mu.RLock()
|
|
||||||
defer mu.RUnlock()
|
|
||||||
return l >= currentLevel
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatalf logs a formatted message at fatal level.
|
|
||||||
func Fatalf(format string, args ...interface{}) {
|
|
||||||
if isEnabled(FatalLevel) {
|
|
||||||
log.Fatalf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatal logs a message at fatal level.
|
|
||||||
func Fatal(arg string) {
|
|
||||||
if isEnabled(FatalLevel) {
|
|
||||||
log.Fatal(arg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errorf logs a formatted message at error level.
|
|
||||||
func Errorf(format string, args ...interface{}) {
|
|
||||||
if isEnabled(ErrorLevel) {
|
|
||||||
log.Errorf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error logs a message at error level.
|
|
||||||
func Error(arg string) {
|
|
||||||
if isEnabled(ErrorLevel) {
|
|
||||||
log.Error(arg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warnf logs a formatted message at warn level.
|
|
||||||
func Warnf(format string, args ...interface{}) {
|
|
||||||
if isEnabled(WarnLevel) {
|
|
||||||
log.Warnf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warn logs a message at warn level.
|
|
||||||
func Warn(arg string) {
|
|
||||||
if isEnabled(WarnLevel) {
|
|
||||||
log.Warn(arg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Infof logs a formatted message at info level.
|
|
||||||
func Infof(format string, args ...interface{}) {
|
|
||||||
if isEnabled(InfoLevel) {
|
|
||||||
log.Infof(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Info logs a message at info level.
|
|
||||||
func Info(arg string) {
|
|
||||||
if isEnabled(InfoLevel) {
|
|
||||||
log.Info(arg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verbosef logs a formatted message at verbose level.
|
|
||||||
func Verbosef(format string, args ...interface{}) {
|
|
||||||
if isEnabled(VerboseLevel) {
|
|
||||||
log.Infof(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verbose logs a message at verbose level.
|
|
||||||
func Verbose(arg string) {
|
|
||||||
if isEnabled(VerboseLevel) {
|
|
||||||
log.Info(arg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debugf logs a formatted message at debug level with caller location.
|
|
||||||
func Debugf(format string, args ...interface{}) {
|
|
||||||
if isEnabled(DebugLevel) {
|
|
||||||
DebugReal(fmt.Sprintf(format, args...), 2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug logs a message at debug level with caller location.
|
|
||||||
func Debug(arg string) {
|
|
||||||
if isEnabled(DebugLevel) {
|
|
||||||
DebugReal(arg, 2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DebugReal logs at debug level with caller info from the specified stack depth.
|
|
||||||
func DebugReal(arg string, cs int) {
|
|
||||||
if !isEnabled(DebugLevel) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, callerFile, callerLine, ok := runtime.Caller(cs)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
tag := fmt.Sprintf("%s:%d: ", filepath.Base(callerFile), callerLine)
|
|
||||||
log.Debug(tag + arg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dump logs a spew dump of the arguments at debug level.
|
|
||||||
func Dump(args ...interface{}) {
|
|
||||||
if isEnabled(DebugLevel) {
|
|
||||||
DebugReal(spew.Sdump(args...), 2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnableDebugLogging sets the log level to debug.
|
|
||||||
func EnableDebugLogging() {
|
|
||||||
SetLevel(DebugLevel)
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerbosityStepsToLogLevel converts a -v count to a log level.
|
|
||||||
// 0 returns InfoLevel, 1 returns VerboseLevel, 2+ returns DebugLevel.
|
|
||||||
func VerbosityStepsToLogLevel(l int) Level {
|
|
||||||
switch l {
|
|
||||||
case 0:
|
|
||||||
return InfoLevel
|
|
||||||
case 1:
|
|
||||||
return VerboseLevel
|
|
||||||
default:
|
|
||||||
return DebugLevel
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLevelFromVerbosity sets the log level based on -v flag count.
|
|
||||||
func SetLevelFromVerbosity(l int) {
|
|
||||||
SetLevel(VerbosityStepsToLogLevel(l))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLevel sets the global log level.
|
|
||||||
func SetLevel(l Level) {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
currentLevel = l
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLevel returns the current log level.
|
|
||||||
func GetLevel() Level {
|
|
||||||
mu.RLock()
|
|
||||||
defer mu.RUnlock()
|
|
||||||
return currentLevel
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithError returns a log entry with the error attached.
|
|
||||||
func WithError(e error) *log.Entry {
|
|
||||||
return log.Log.WithError(e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Progressf prints a progress message that overwrites the current line.
|
|
||||||
// Use ProgressDone() when progress is complete to move to the next line.
|
|
||||||
func Progressf(format string, args ...interface{}) {
|
|
||||||
pterm.Printf("\r"+format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProgressDone clears the progress line when progress is complete.
|
|
||||||
func ProgressDone() {
|
|
||||||
// Clear the line with spaces and return to beginning
|
|
||||||
pterm.Print("\r\033[K")
|
|
||||||
}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
package log
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBuild(t *testing.T) {
|
|
||||||
Init()
|
|
||||||
assert.True(t, true)
|
|
||||||
}
|
|
||||||
251
mfer/builder.go
251
mfer/builder.go
@@ -1,251 +0,0 @@
|
|||||||
package mfer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha256"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"github.com/multiformats/go-multihash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValidatePath checks that a file path conforms to manifest path invariants:
|
|
||||||
// - Must be valid UTF-8
|
|
||||||
// - Must use forward slashes only (no backslashes)
|
|
||||||
// - Must be relative (no leading /)
|
|
||||||
// - Must not contain ".." segments
|
|
||||||
// - Must not contain empty segments (no "//")
|
|
||||||
// - Must not be empty
|
|
||||||
func ValidatePath(p string) error {
|
|
||||||
if p == "" {
|
|
||||||
return errors.New("path cannot be empty")
|
|
||||||
}
|
|
||||||
if !utf8.ValidString(p) {
|
|
||||||
return fmt.Errorf("path %q is not valid UTF-8", p)
|
|
||||||
}
|
|
||||||
if strings.ContainsRune(p, '\\') {
|
|
||||||
return fmt.Errorf("path %q contains backslash; use forward slashes only", p)
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(p, "/") {
|
|
||||||
return fmt.Errorf("path %q is absolute; must be relative", p)
|
|
||||||
}
|
|
||||||
for _, seg := range strings.Split(p, "/") {
|
|
||||||
if seg == "" {
|
|
||||||
return fmt.Errorf("path %q contains empty segment", p)
|
|
||||||
}
|
|
||||||
if seg == ".." {
|
|
||||||
return fmt.Errorf("path %q contains '..' segment", p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RelFilePath represents a relative file path within a manifest.
|
|
||||||
type RelFilePath string
|
|
||||||
|
|
||||||
// AbsFilePath represents an absolute file path on the filesystem.
|
|
||||||
type AbsFilePath string
|
|
||||||
|
|
||||||
// FileSize represents the size of a file in bytes.
|
|
||||||
type FileSize int64
|
|
||||||
|
|
||||||
// FileCount represents a count of files.
|
|
||||||
type FileCount int64
|
|
||||||
|
|
||||||
// ModTime represents a file's modification time.
|
|
||||||
type ModTime time.Time
|
|
||||||
|
|
||||||
// UnixSeconds represents seconds since Unix epoch.
|
|
||||||
type UnixSeconds int64
|
|
||||||
|
|
||||||
// UnixNanos represents the nanosecond component of a timestamp (0-999999999).
|
|
||||||
type UnixNanos int32
|
|
||||||
|
|
||||||
// Timestamp converts ModTime to a protobuf Timestamp.
|
|
||||||
func (m ModTime) Timestamp() *Timestamp {
|
|
||||||
t := time.Time(m)
|
|
||||||
return &Timestamp{
|
|
||||||
Seconds: t.Unix(),
|
|
||||||
Nanos: int32(t.Nanosecond()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multihash represents a multihash-encoded file hash (typically SHA2-256).
|
|
||||||
type Multihash []byte
|
|
||||||
|
|
||||||
// FileHashProgress reports progress during file hashing.
|
|
||||||
type FileHashProgress struct {
|
|
||||||
BytesRead FileSize // Total bytes read so far for the current file
|
|
||||||
}
|
|
||||||
|
|
||||||
// Builder constructs a manifest by adding files one at a time.
|
|
||||||
type Builder struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
files []*MFFilePath
|
|
||||||
createdAt time.Time
|
|
||||||
signingOptions *SigningOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBuilder creates a new Builder.
|
|
||||||
func NewBuilder() *Builder {
|
|
||||||
return &Builder{
|
|
||||||
files: make([]*MFFilePath, 0),
|
|
||||||
createdAt: time.Now(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddFile reads file content from reader, computes hashes, and adds to manifest.
|
|
||||||
// Progress updates are sent to the progress channel (if non-nil) without blocking.
|
|
||||||
// Returns the number of bytes read.
|
|
||||||
func (b *Builder) AddFile(
|
|
||||||
path RelFilePath,
|
|
||||||
size FileSize,
|
|
||||||
mtime ModTime,
|
|
||||||
reader io.Reader,
|
|
||||||
progress chan<- FileHashProgress,
|
|
||||||
) (FileSize, error) {
|
|
||||||
if err := ValidatePath(string(path)); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create hash writer
|
|
||||||
h := sha256.New()
|
|
||||||
|
|
||||||
// Read file in chunks, updating hash and progress
|
|
||||||
var totalRead FileSize
|
|
||||||
buf := make([]byte, 64*1024) // 64KB chunks
|
|
||||||
|
|
||||||
for {
|
|
||||||
n, err := reader.Read(buf)
|
|
||||||
if n > 0 {
|
|
||||||
h.Write(buf[:n])
|
|
||||||
totalRead += FileSize(n)
|
|
||||||
sendFileHashProgress(progress, FileHashProgress{BytesRead: totalRead})
|
|
||||||
}
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return totalRead, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify actual bytes read matches declared size
|
|
||||||
if totalRead != size {
|
|
||||||
return totalRead, fmt.Errorf("size mismatch for %q: declared %d bytes but read %d bytes", path, size, totalRead)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode hash as multihash (SHA2-256)
|
|
||||||
mh, err := multihash.Encode(h.Sum(nil), multihash.SHA2_256)
|
|
||||||
if err != nil {
|
|
||||||
return totalRead, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create file entry
|
|
||||||
entry := &MFFilePath{
|
|
||||||
Path: string(path),
|
|
||||||
Size: int64(size),
|
|
||||||
Hashes: []*MFFileChecksum{
|
|
||||||
{MultiHash: mh},
|
|
||||||
},
|
|
||||||
Mtime: mtime.Timestamp(),
|
|
||||||
}
|
|
||||||
|
|
||||||
b.mu.Lock()
|
|
||||||
b.files = append(b.files, entry)
|
|
||||||
b.mu.Unlock()
|
|
||||||
|
|
||||||
return totalRead, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendFileHashProgress sends a progress update without blocking.
|
|
||||||
func sendFileHashProgress(ch chan<- FileHashProgress, p FileHashProgress) {
|
|
||||||
if ch == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case ch <- p:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileCount returns the number of files added to the builder.
|
|
||||||
func (b *Builder) FileCount() int {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
return len(b.files)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddFileWithHash adds a file entry with a pre-computed hash.
|
|
||||||
// This is useful when the hash is already known (e.g., from an existing manifest).
|
|
||||||
// Returns an error if path is empty, size is negative, or hash is nil/empty.
|
|
||||||
func (b *Builder) AddFileWithHash(path RelFilePath, size FileSize, mtime ModTime, hash Multihash) error {
|
|
||||||
if err := ValidatePath(string(path)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if size < 0 {
|
|
||||||
return errors.New("size cannot be negative")
|
|
||||||
}
|
|
||||||
if len(hash) == 0 {
|
|
||||||
return errors.New("hash cannot be nil or empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
entry := &MFFilePath{
|
|
||||||
Path: string(path),
|
|
||||||
Size: int64(size),
|
|
||||||
Hashes: []*MFFileChecksum{
|
|
||||||
{MultiHash: hash},
|
|
||||||
},
|
|
||||||
Mtime: mtime.Timestamp(),
|
|
||||||
}
|
|
||||||
|
|
||||||
b.mu.Lock()
|
|
||||||
b.files = append(b.files, entry)
|
|
||||||
b.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetSigningOptions sets the GPG signing options for the manifest.
|
|
||||||
// If opts is non-nil, the manifest will be signed when Build() is called.
|
|
||||||
func (b *Builder) SetSigningOptions(opts *SigningOptions) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
b.signingOptions = opts
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build finalizes the manifest and writes it to the writer.
|
|
||||||
func (b *Builder) Build(w io.Writer) error {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
// Create inner manifest
|
|
||||||
inner := &MFFile{
|
|
||||||
Version: MFFile_VERSION_ONE,
|
|
||||||
CreatedAt: newTimestampFromTime(b.createdAt),
|
|
||||||
Files: b.files,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a temporary manifest to use existing serialization
|
|
||||||
m := &manifest{
|
|
||||||
pbInner: inner,
|
|
||||||
signingOptions: b.signingOptions,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate outer wrapper
|
|
||||||
if err := m.generateOuter(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate final output
|
|
||||||
if err := m.generate(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write to output
|
|
||||||
_, err := w.Write(m.output.Bytes())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
@@ -1,127 +0,0 @@
|
|||||||
package mfer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewBuilder(t *testing.T) {
|
|
||||||
b := NewBuilder()
|
|
||||||
assert.NotNil(t, b)
|
|
||||||
assert.Equal(t, 0, b.FileCount())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuilderAddFile(t *testing.T) {
|
|
||||||
b := NewBuilder()
|
|
||||||
content := []byte("test content")
|
|
||||||
reader := bytes.NewReader(content)
|
|
||||||
|
|
||||||
bytesRead, err := b.AddFile("test.txt", FileSize(len(content)), ModTime(time.Now()), reader, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, FileSize(len(content)), bytesRead)
|
|
||||||
assert.Equal(t, 1, b.FileCount())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuilderAddFileWithHash(t *testing.T) {
|
|
||||||
b := NewBuilder()
|
|
||||||
hash := make([]byte, 34) // SHA256 multihash is 34 bytes
|
|
||||||
|
|
||||||
err := b.AddFileWithHash("test.txt", 100, ModTime(time.Now()), hash)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, b.FileCount())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuilderAddFileWithHashValidation(t *testing.T) {
|
|
||||||
t.Run("empty path", func(t *testing.T) {
|
|
||||||
b := NewBuilder()
|
|
||||||
hash := make([]byte, 34)
|
|
||||||
err := b.AddFileWithHash("", 100, ModTime(time.Now()), hash)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "path")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("negative size", func(t *testing.T) {
|
|
||||||
b := NewBuilder()
|
|
||||||
hash := make([]byte, 34)
|
|
||||||
err := b.AddFileWithHash("test.txt", -1, ModTime(time.Now()), hash)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "size")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("nil hash", func(t *testing.T) {
|
|
||||||
b := NewBuilder()
|
|
||||||
err := b.AddFileWithHash("test.txt", 100, ModTime(time.Now()), nil)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "hash")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("empty hash", func(t *testing.T) {
|
|
||||||
b := NewBuilder()
|
|
||||||
err := b.AddFileWithHash("test.txt", 100, ModTime(time.Now()), []byte{})
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "hash")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("valid inputs", func(t *testing.T) {
|
|
||||||
b := NewBuilder()
|
|
||||||
hash := make([]byte, 34)
|
|
||||||
err := b.AddFileWithHash("test.txt", 100, ModTime(time.Now()), hash)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, b.FileCount())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuilderBuild(t *testing.T) {
|
|
||||||
b := NewBuilder()
|
|
||||||
content := []byte("test content")
|
|
||||||
reader := bytes.NewReader(content)
|
|
||||||
|
|
||||||
_, err := b.AddFile("test.txt", FileSize(len(content)), ModTime(time.Now()), reader, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
err = b.Build(&buf)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Should have magic bytes
|
|
||||||
assert.True(t, strings.HasPrefix(buf.String(), MAGIC))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewTimestampFromTimeExtremeDate(t *testing.T) {
|
|
||||||
// Regression test: newTimestampFromTime used UnixNano() which panics
|
|
||||||
// for dates outside ~1678-2262. Now uses Nanosecond() which is safe.
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
time time.Time
|
|
||||||
}{
|
|
||||||
{"zero time", time.Time{}},
|
|
||||||
{"year 1000", time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC)},
|
|
||||||
{"year 3000", time.Date(3000, 1, 1, 0, 0, 0, 123456789, time.UTC)},
|
|
||||||
{"unix epoch", time.Unix(0, 0)},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
// Should not panic
|
|
||||||
ts := newTimestampFromTime(tt.time)
|
|
||||||
assert.Equal(t, tt.time.Unix(), ts.Seconds)
|
|
||||||
assert.Equal(t, int32(tt.time.Nanosecond()), ts.Nanos)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuilderBuildEmpty(t *testing.T) {
|
|
||||||
b := NewBuilder()
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
err := b.Build(&buf)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Should still produce valid manifest with 0 files
|
|
||||||
assert.True(t, strings.HasPrefix(buf.String(), MAGIC))
|
|
||||||
}
|
|
||||||
327
mfer/checker.go
327
mfer/checker.go
@@ -1,327 +0,0 @@
|
|||||||
package mfer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/multiformats/go-multihash"
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Result represents the outcome of checking a single file.
|
|
||||||
type Result struct {
|
|
||||||
Path RelFilePath // Relative path from manifest
|
|
||||||
Status Status // Verification result status
|
|
||||||
Message string // Human-readable description of the result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Status represents the verification status of a file.
|
|
||||||
type Status int
|
|
||||||
|
|
||||||
const (
|
|
||||||
StatusOK Status = iota // File matches manifest (size and hash verified)
|
|
||||||
StatusMissing // File not found on disk
|
|
||||||
StatusSizeMismatch // File size differs from manifest
|
|
||||||
StatusHashMismatch // File hash differs from manifest
|
|
||||||
StatusExtra // File exists on disk but not in manifest
|
|
||||||
StatusError // Error occurred during verification
|
|
||||||
)
|
|
||||||
|
|
||||||
func (s Status) String() string {
|
|
||||||
switch s {
|
|
||||||
case StatusOK:
|
|
||||||
return "OK"
|
|
||||||
case StatusMissing:
|
|
||||||
return "MISSING"
|
|
||||||
case StatusSizeMismatch:
|
|
||||||
return "SIZE_MISMATCH"
|
|
||||||
case StatusHashMismatch:
|
|
||||||
return "HASH_MISMATCH"
|
|
||||||
case StatusExtra:
|
|
||||||
return "EXTRA"
|
|
||||||
case StatusError:
|
|
||||||
return "ERROR"
|
|
||||||
default:
|
|
||||||
return "UNKNOWN"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckStatus contains progress information for the check operation.
|
|
||||||
type CheckStatus struct {
|
|
||||||
TotalFiles FileCount // Total number of files in manifest
|
|
||||||
CheckedFiles FileCount // Number of files checked so far
|
|
||||||
TotalBytes FileSize // Total bytes to verify (sum of all file sizes)
|
|
||||||
CheckedBytes FileSize // Bytes verified so far
|
|
||||||
BytesPerSec float64 // Current throughput rate
|
|
||||||
ETA time.Duration // Estimated time to completion
|
|
||||||
Failures FileCount // Number of verification failures encountered
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checker verifies files against a manifest.
|
|
||||||
type Checker struct {
|
|
||||||
basePath AbsFilePath
|
|
||||||
files []*MFFilePath
|
|
||||||
fs afero.Fs
|
|
||||||
// manifestPaths is a set of paths in the manifest for quick lookup
|
|
||||||
manifestPaths map[RelFilePath]struct{}
|
|
||||||
// signature info from the manifest
|
|
||||||
signature []byte
|
|
||||||
signer []byte
|
|
||||||
signingPubKey []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewChecker creates a new Checker for the given manifest, base path, and filesystem.
|
|
||||||
// The basePath is the directory relative to which manifest paths are resolved.
|
|
||||||
// If fs is nil, the real filesystem (OsFs) is used.
|
|
||||||
func NewChecker(manifestPath string, basePath string, fs afero.Fs) (*Checker, error) {
|
|
||||||
if fs == nil {
|
|
||||||
fs = afero.NewOsFs()
|
|
||||||
}
|
|
||||||
|
|
||||||
m, err := NewManifestFromFile(fs, manifestPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
abs, err := filepath.Abs(basePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
files := m.Files()
|
|
||||||
manifestPaths := make(map[RelFilePath]struct{}, len(files))
|
|
||||||
for _, f := range files {
|
|
||||||
manifestPaths[RelFilePath(f.Path)] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Checker{
|
|
||||||
basePath: AbsFilePath(abs),
|
|
||||||
files: files,
|
|
||||||
fs: fs,
|
|
||||||
manifestPaths: manifestPaths,
|
|
||||||
signature: m.pbOuter.Signature,
|
|
||||||
signer: m.pbOuter.Signer,
|
|
||||||
signingPubKey: m.pbOuter.SigningPubKey,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileCount returns the number of files in the manifest.
|
|
||||||
func (c *Checker) FileCount() FileCount {
|
|
||||||
return FileCount(len(c.files))
|
|
||||||
}
|
|
||||||
|
|
||||||
// TotalBytes returns the total size of all files in the manifest.
|
|
||||||
func (c *Checker) TotalBytes() FileSize {
|
|
||||||
var total FileSize
|
|
||||||
for _, f := range c.files {
|
|
||||||
total += FileSize(f.Size)
|
|
||||||
}
|
|
||||||
return total
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsSigned returns true if the manifest has a signature.
|
|
||||||
func (c *Checker) IsSigned() bool {
|
|
||||||
return len(c.signature) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signer returns the signer fingerprint if the manifest is signed, nil otherwise.
|
|
||||||
func (c *Checker) Signer() []byte {
|
|
||||||
return c.signer
|
|
||||||
}
|
|
||||||
|
|
||||||
// SigningPubKey returns the signing public key if the manifest is signed, nil otherwise.
|
|
||||||
func (c *Checker) SigningPubKey() []byte {
|
|
||||||
return c.signingPubKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtractEmbeddedSigningKeyFP imports the manifest's embedded public key into a
|
|
||||||
// temporary keyring and extracts its fingerprint. This validates the key and
|
|
||||||
// returns its actual fingerprint from the key material itself.
|
|
||||||
func (c *Checker) ExtractEmbeddedSigningKeyFP() (string, error) {
|
|
||||||
if len(c.signingPubKey) == 0 {
|
|
||||||
return "", errors.New("manifest has no signing public key")
|
|
||||||
}
|
|
||||||
return gpgExtractPubKeyFingerprint(c.signingPubKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check verifies all files against the manifest.
|
|
||||||
// Results are sent to the results channel as files are checked.
|
|
||||||
// Progress updates are sent to the progress channel approximately once per second.
|
|
||||||
// Both channels are closed when the method returns.
|
|
||||||
func (c *Checker) Check(ctx context.Context, results chan<- Result, progress chan<- CheckStatus) error {
|
|
||||||
if results != nil {
|
|
||||||
defer close(results)
|
|
||||||
}
|
|
||||||
if progress != nil {
|
|
||||||
defer close(progress)
|
|
||||||
}
|
|
||||||
|
|
||||||
totalFiles := FileCount(len(c.files))
|
|
||||||
totalBytes := c.TotalBytes()
|
|
||||||
|
|
||||||
var checkedFiles FileCount
|
|
||||||
var checkedBytes FileSize
|
|
||||||
var failures FileCount
|
|
||||||
|
|
||||||
startTime := time.Now()
|
|
||||||
|
|
||||||
for _, entry := range c.files {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
result := c.checkFile(entry, &checkedBytes)
|
|
||||||
if result.Status != StatusOK {
|
|
||||||
failures++
|
|
||||||
}
|
|
||||||
checkedFiles++
|
|
||||||
|
|
||||||
if results != nil {
|
|
||||||
results <- result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send progress with rate and ETA calculation
|
|
||||||
if progress != nil {
|
|
||||||
elapsed := time.Since(startTime)
|
|
||||||
var bytesPerSec float64
|
|
||||||
var eta time.Duration
|
|
||||||
|
|
||||||
if elapsed > 0 && checkedBytes > 0 {
|
|
||||||
bytesPerSec = float64(checkedBytes) / elapsed.Seconds()
|
|
||||||
remainingBytes := totalBytes - checkedBytes
|
|
||||||
if bytesPerSec > 0 {
|
|
||||||
eta = time.Duration(float64(remainingBytes)/bytesPerSec) * time.Second
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sendCheckStatus(progress, CheckStatus{
|
|
||||||
TotalFiles: totalFiles,
|
|
||||||
CheckedFiles: checkedFiles,
|
|
||||||
TotalBytes: totalBytes,
|
|
||||||
CheckedBytes: checkedBytes,
|
|
||||||
BytesPerSec: bytesPerSec,
|
|
||||||
ETA: eta,
|
|
||||||
Failures: failures,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Checker) checkFile(entry *MFFilePath, checkedBytes *FileSize) Result {
|
|
||||||
absPath := filepath.Join(string(c.basePath), entry.Path)
|
|
||||||
relPath := RelFilePath(entry.Path)
|
|
||||||
|
|
||||||
// Check if file exists
|
|
||||||
info, err := c.fs.Stat(absPath)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, os.ErrNotExist) || errors.Is(err, afero.ErrFileNotFound) {
|
|
||||||
return Result{Path: relPath, Status: StatusMissing, Message: "file not found"}
|
|
||||||
}
|
|
||||||
return Result{Path: relPath, Status: StatusError, Message: err.Error()}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check size
|
|
||||||
if info.Size() != entry.Size {
|
|
||||||
*checkedBytes += FileSize(info.Size())
|
|
||||||
return Result{
|
|
||||||
Path: relPath,
|
|
||||||
Status: StatusSizeMismatch,
|
|
||||||
Message: "size mismatch",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open and hash file
|
|
||||||
f, err := c.fs.Open(absPath)
|
|
||||||
if err != nil {
|
|
||||||
return Result{Path: relPath, Status: StatusError, Message: err.Error()}
|
|
||||||
}
|
|
||||||
defer func() { _ = f.Close() }()
|
|
||||||
|
|
||||||
h := sha256.New()
|
|
||||||
n, err := io.Copy(h, f)
|
|
||||||
if err != nil {
|
|
||||||
return Result{Path: relPath, Status: StatusError, Message: err.Error()}
|
|
||||||
}
|
|
||||||
*checkedBytes += FileSize(n)
|
|
||||||
|
|
||||||
// Encode as multihash and compare
|
|
||||||
computed, err := multihash.Encode(h.Sum(nil), multihash.SHA2_256)
|
|
||||||
if err != nil {
|
|
||||||
return Result{Path: relPath, Status: StatusError, Message: err.Error()}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check against all hashes in manifest (at least one must match)
|
|
||||||
for _, hash := range entry.Hashes {
|
|
||||||
if bytes.Equal(computed, hash.MultiHash) {
|
|
||||||
return Result{Path: relPath, Status: StatusOK}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return Result{Path: relPath, Status: StatusHashMismatch, Message: "hash mismatch"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FindExtraFiles walks the filesystem and reports files not in the manifest.
|
|
||||||
// Results are sent to the results channel. The channel is closed when done.
|
|
||||||
func (c *Checker) FindExtraFiles(ctx context.Context, results chan<- Result) error {
|
|
||||||
if results != nil {
|
|
||||||
defer close(results)
|
|
||||||
}
|
|
||||||
|
|
||||||
return afero.Walk(c.fs, string(c.basePath), func(path string, info os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip directories
|
|
||||||
if info.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get relative path
|
|
||||||
rel, err := filepath.Rel(string(c.basePath), path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
relPath := RelFilePath(rel)
|
|
||||||
|
|
||||||
// Check if path is in manifest
|
|
||||||
if _, exists := c.manifestPaths[relPath]; !exists {
|
|
||||||
if results != nil {
|
|
||||||
results <- Result{
|
|
||||||
Path: relPath,
|
|
||||||
Status: StatusExtra,
|
|
||||||
Message: "not in manifest",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendCheckStatus sends a status update without blocking.
|
|
||||||
func sendCheckStatus(ch chan<- CheckStatus, status CheckStatus) {
|
|
||||||
if ch == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case ch <- status:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,437 +0,0 @@
|
|||||||
package mfer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestStatusString(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
status Status
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{StatusOK, "OK"},
|
|
||||||
{StatusMissing, "MISSING"},
|
|
||||||
{StatusSizeMismatch, "SIZE_MISMATCH"},
|
|
||||||
{StatusHashMismatch, "HASH_MISMATCH"},
|
|
||||||
{StatusExtra, "EXTRA"},
|
|
||||||
{StatusError, "ERROR"},
|
|
||||||
{Status(99), "UNKNOWN"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.expected, func(t *testing.T) {
|
|
||||||
assert.Equal(t, tt.expected, tt.status.String())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// createTestManifest creates a manifest file in the filesystem with the given files.
|
|
||||||
func createTestManifest(t *testing.T, fs afero.Fs, manifestPath string, files map[string][]byte) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
builder := NewBuilder()
|
|
||||||
for path, content := range files {
|
|
||||||
reader := bytes.NewReader(content)
|
|
||||||
_, err := builder.AddFile(RelFilePath(path), FileSize(len(content)), ModTime(time.Now()), reader, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
require.NoError(t, builder.Build(&buf))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, manifestPath, buf.Bytes(), 0o644))
|
|
||||||
}
|
|
||||||
|
|
||||||
// createFilesOnDisk creates the given files on the filesystem.
|
|
||||||
func createFilesOnDisk(t *testing.T, fs afero.Fs, basePath string, files map[string][]byte) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
for path, content := range files {
|
|
||||||
fullPath := basePath + "/" + path
|
|
||||||
require.NoError(t, fs.MkdirAll(basePath, 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, fullPath, content, 0o644))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewChecker(t *testing.T) {
|
|
||||||
t.Run("valid manifest", func(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
files := map[string][]byte{
|
|
||||||
"file1.txt": []byte("hello"),
|
|
||||||
"file2.txt": []byte("world"),
|
|
||||||
}
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", files)
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotNil(t, chk)
|
|
||||||
assert.Equal(t, FileCount(2), chk.FileCount())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("missing manifest", func(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
_, err := NewChecker("/nonexistent.mf", "/", fs)
|
|
||||||
assert.Error(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("invalid manifest", func(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/bad.mf", []byte("not a manifest"), 0o644))
|
|
||||||
_, err := NewChecker("/bad.mf", "/", fs)
|
|
||||||
assert.Error(t, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckerFileCountAndTotalBytes(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
files := map[string][]byte{
|
|
||||||
"small.txt": []byte("hi"),
|
|
||||||
"medium.txt": []byte("hello world"),
|
|
||||||
"large.txt": bytes.Repeat([]byte("x"), 1000),
|
|
||||||
}
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", files)
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, FileCount(3), chk.FileCount())
|
|
||||||
assert.Equal(t, FileSize(2+11+1000), chk.TotalBytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckAllFilesOK(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
files := map[string][]byte{
|
|
||||||
"file1.txt": []byte("content one"),
|
|
||||||
"file2.txt": []byte("content two"),
|
|
||||||
}
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", files)
|
|
||||||
createFilesOnDisk(t, fs, "/data", files)
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/data", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
results := make(chan Result, 10)
|
|
||||||
err = chk.Check(context.Background(), results, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var resultList []Result
|
|
||||||
for r := range results {
|
|
||||||
resultList = append(resultList, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Len(t, resultList, 2)
|
|
||||||
for _, r := range resultList {
|
|
||||||
assert.Equal(t, StatusOK, r.Status, "file %s should be OK", r.Path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckMissingFile(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
files := map[string][]byte{
|
|
||||||
"exists.txt": []byte("I exist"),
|
|
||||||
"missing.txt": []byte("I don't exist on disk"),
|
|
||||||
}
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", files)
|
|
||||||
// Only create one file
|
|
||||||
createFilesOnDisk(t, fs, "/data", map[string][]byte{
|
|
||||||
"exists.txt": []byte("I exist"),
|
|
||||||
})
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/data", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
results := make(chan Result, 10)
|
|
||||||
err = chk.Check(context.Background(), results, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var okCount, missingCount int
|
|
||||||
for r := range results {
|
|
||||||
switch r.Status {
|
|
||||||
case StatusOK:
|
|
||||||
okCount++
|
|
||||||
case StatusMissing:
|
|
||||||
missingCount++
|
|
||||||
assert.Equal(t, RelFilePath("missing.txt"), r.Path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, 1, okCount)
|
|
||||||
assert.Equal(t, 1, missingCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckSizeMismatch(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
files := map[string][]byte{
|
|
||||||
"file.txt": []byte("original content"),
|
|
||||||
}
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", files)
|
|
||||||
// Create file with different size
|
|
||||||
createFilesOnDisk(t, fs, "/data", map[string][]byte{
|
|
||||||
"file.txt": []byte("short"),
|
|
||||||
})
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/data", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
results := make(chan Result, 10)
|
|
||||||
err = chk.Check(context.Background(), results, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
r := <-results
|
|
||||||
assert.Equal(t, StatusSizeMismatch, r.Status)
|
|
||||||
assert.Equal(t, RelFilePath("file.txt"), r.Path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckHashMismatch(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
originalContent := []byte("original content")
|
|
||||||
files := map[string][]byte{
|
|
||||||
"file.txt": originalContent,
|
|
||||||
}
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", files)
|
|
||||||
// Create file with same size but different content
|
|
||||||
differentContent := []byte("different contnt") // same length (16 bytes) but different
|
|
||||||
require.Equal(t, len(originalContent), len(differentContent), "test requires same length")
|
|
||||||
createFilesOnDisk(t, fs, "/data", map[string][]byte{
|
|
||||||
"file.txt": differentContent,
|
|
||||||
})
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/data", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
results := make(chan Result, 10)
|
|
||||||
err = chk.Check(context.Background(), results, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
r := <-results
|
|
||||||
assert.Equal(t, StatusHashMismatch, r.Status)
|
|
||||||
assert.Equal(t, RelFilePath("file.txt"), r.Path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckWithProgress(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
files := map[string][]byte{
|
|
||||||
"file1.txt": bytes.Repeat([]byte("a"), 100),
|
|
||||||
"file2.txt": bytes.Repeat([]byte("b"), 200),
|
|
||||||
}
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", files)
|
|
||||||
createFilesOnDisk(t, fs, "/data", files)
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/data", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
results := make(chan Result, 10)
|
|
||||||
progress := make(chan CheckStatus, 10)
|
|
||||||
|
|
||||||
err = chk.Check(context.Background(), results, progress)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Drain results
|
|
||||||
for range results {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check progress was sent
|
|
||||||
var progressUpdates []CheckStatus
|
|
||||||
for p := range progress {
|
|
||||||
progressUpdates = append(progressUpdates, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.NotEmpty(t, progressUpdates)
|
|
||||||
// Final progress should show all files checked
|
|
||||||
final := progressUpdates[len(progressUpdates)-1]
|
|
||||||
assert.Equal(t, FileCount(2), final.TotalFiles)
|
|
||||||
assert.Equal(t, FileCount(2), final.CheckedFiles)
|
|
||||||
assert.Equal(t, FileSize(300), final.TotalBytes)
|
|
||||||
assert.Equal(t, FileSize(300), final.CheckedBytes)
|
|
||||||
assert.Equal(t, FileCount(0), final.Failures)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckContextCancellation(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
// Create many files to ensure we have time to cancel
|
|
||||||
files := make(map[string][]byte)
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
files[string(rune('a'+i%26))+".txt"] = bytes.Repeat([]byte("x"), 1000)
|
|
||||||
}
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", files)
|
|
||||||
createFilesOnDisk(t, fs, "/data", files)
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/data", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
cancel() // Cancel immediately
|
|
||||||
|
|
||||||
results := make(chan Result, 200)
|
|
||||||
err = chk.Check(ctx, results, nil)
|
|
||||||
assert.ErrorIs(t, err, context.Canceled)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFindExtraFiles(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
// Manifest only contains file1
|
|
||||||
manifestFiles := map[string][]byte{
|
|
||||||
"file1.txt": []byte("in manifest"),
|
|
||||||
}
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", manifestFiles)
|
|
||||||
|
|
||||||
// Disk has file1 and file2
|
|
||||||
createFilesOnDisk(t, fs, "/data", map[string][]byte{
|
|
||||||
"file1.txt": []byte("in manifest"),
|
|
||||||
"file2.txt": []byte("extra file"),
|
|
||||||
})
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/data", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
results := make(chan Result, 10)
|
|
||||||
err = chk.FindExtraFiles(context.Background(), results)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var extras []Result
|
|
||||||
for r := range results {
|
|
||||||
extras = append(extras, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Len(t, extras, 1)
|
|
||||||
assert.Equal(t, RelFilePath("file2.txt"), extras[0].Path)
|
|
||||||
assert.Equal(t, StatusExtra, extras[0].Status)
|
|
||||||
assert.Equal(t, "not in manifest", extras[0].Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFindExtraFilesContextCancellation(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
files := map[string][]byte{"file.txt": []byte("data")}
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", files)
|
|
||||||
createFilesOnDisk(t, fs, "/data", files)
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/data", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
cancel() // Cancel immediately
|
|
||||||
|
|
||||||
results := make(chan Result, 10)
|
|
||||||
err = chk.FindExtraFiles(ctx, results)
|
|
||||||
assert.ErrorIs(t, err, context.Canceled)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckNilChannels(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
files := map[string][]byte{"file.txt": []byte("data")}
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", files)
|
|
||||||
createFilesOnDisk(t, fs, "/data", files)
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/data", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Should not panic with nil channels
|
|
||||||
err = chk.Check(context.Background(), nil, nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFindExtraFilesNilChannel(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
files := map[string][]byte{"file.txt": []byte("data")}
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", files)
|
|
||||||
createFilesOnDisk(t, fs, "/data", files)
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/data", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Should not panic with nil channel
|
|
||||||
err = chk.FindExtraFiles(context.Background(), nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckSubdirectories(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
files := map[string][]byte{
|
|
||||||
"dir1/file1.txt": []byte("content1"),
|
|
||||||
"dir1/dir2/file2.txt": []byte("content2"),
|
|
||||||
"dir1/dir2/dir3/deep.txt": []byte("deep content"),
|
|
||||||
}
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", files)
|
|
||||||
|
|
||||||
// Create files with full directory structure
|
|
||||||
for path, content := range files {
|
|
||||||
fullPath := "/data/" + path
|
|
||||||
require.NoError(t, fs.MkdirAll("/data/dir1/dir2/dir3", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, fullPath, content, 0o644))
|
|
||||||
}
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/data", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
results := make(chan Result, 10)
|
|
||||||
err = chk.Check(context.Background(), results, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var okCount int
|
|
||||||
for r := range results {
|
|
||||||
assert.Equal(t, StatusOK, r.Status, "file %s should be OK", r.Path)
|
|
||||||
okCount++
|
|
||||||
}
|
|
||||||
assert.Equal(t, 3, okCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckMissingFileDetectedWithoutFallback(t *testing.T) {
|
|
||||||
// Regression test: errors.Is(err, errors.New("...")) never matches because
|
|
||||||
// errors.New creates a new value each time. The fix uses os.ErrNotExist instead.
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
files := map[string][]byte{
|
|
||||||
"exists.txt": []byte("here"),
|
|
||||||
"missing.txt": []byte("not on disk"),
|
|
||||||
}
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", files)
|
|
||||||
// Only create one file on disk
|
|
||||||
createFilesOnDisk(t, fs, "/data", map[string][]byte{
|
|
||||||
"exists.txt": []byte("here"),
|
|
||||||
})
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/data", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
results := make(chan Result, 10)
|
|
||||||
err = chk.Check(context.Background(), results, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
statusCounts := map[Status]int{}
|
|
||||||
for r := range results {
|
|
||||||
statusCounts[r.Status]++
|
|
||||||
if r.Status == StatusMissing {
|
|
||||||
assert.Equal(t, RelFilePath("missing.txt"), r.Path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.Equal(t, 1, statusCounts[StatusOK], "one file should be OK")
|
|
||||||
assert.Equal(t, 1, statusCounts[StatusMissing], "one file should be MISSING")
|
|
||||||
assert.Equal(t, 0, statusCounts[StatusError], "no files should be ERROR")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckEmptyManifest(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
// Create manifest with no files
|
|
||||||
createTestManifest(t, fs, "/manifest.mf", map[string][]byte{})
|
|
||||||
|
|
||||||
chk, err := NewChecker("/manifest.mf", "/data", fs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, FileCount(0), chk.FileCount())
|
|
||||||
assert.Equal(t, FileSize(0), chk.TotalBytes())
|
|
||||||
|
|
||||||
results := make(chan Result, 10)
|
|
||||||
err = chk.Check(context.Background(), results, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var count int
|
|
||||||
for range results {
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
assert.Equal(t, 0, count)
|
|
||||||
}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
package mfer
|
|
||||||
|
|
||||||
const (
|
|
||||||
Version = "0.1.0"
|
|
||||||
ReleaseDate = "2025-12-17"
|
|
||||||
|
|
||||||
// MaxDecompressedSize is the maximum allowed size of decompressed manifest
|
|
||||||
// data (256 MB). This prevents decompression bombs from consuming excessive
|
|
||||||
// memory.
|
|
||||||
MaxDecompressedSize int64 = 256 * 1024 * 1024
|
|
||||||
)
|
|
||||||
@@ -1,172 +0,0 @@
|
|||||||
package mfer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/sha256"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
"sneak.berlin/go/mfer/internal/bork"
|
|
||||||
"sneak.berlin/go/mfer/internal/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// validateUUID checks that the byte slice is a valid UUID (16 bytes, parseable).
|
|
||||||
func validateUUID(data []byte) error {
|
|
||||||
if len(data) != 16 {
|
|
||||||
return errors.New("invalid UUID length")
|
|
||||||
}
|
|
||||||
// Try to parse as UUID to validate format
|
|
||||||
_, err := uuid.FromBytes(data)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("invalid UUID format")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manifest) deserializeInner() error {
|
|
||||||
if m.pbOuter.Version != MFFileOuter_VERSION_ONE {
|
|
||||||
return errors.New("unknown version")
|
|
||||||
}
|
|
||||||
if m.pbOuter.CompressionType != MFFileOuter_COMPRESSION_ZSTD {
|
|
||||||
return errors.New("unknown compression type")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate outer UUID before any decompression
|
|
||||||
if err := validateUUID(m.pbOuter.Uuid); err != nil {
|
|
||||||
return errors.New("outer UUID invalid: " + err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify hash of compressed data before decompression
|
|
||||||
h := sha256.New()
|
|
||||||
if _, err := h.Write(m.pbOuter.InnerMessage); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sha256Hash := h.Sum(nil)
|
|
||||||
if !bytes.Equal(sha256Hash, m.pbOuter.Sha256) {
|
|
||||||
return errors.New("compressed data hash mismatch")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify signature if present
|
|
||||||
if len(m.pbOuter.Signature) > 0 {
|
|
||||||
if len(m.pbOuter.SigningPubKey) == 0 {
|
|
||||||
return errors.New("signature present but no public key")
|
|
||||||
}
|
|
||||||
|
|
||||||
sigString, err := m.signatureString()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to generate signature string for verification: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := gpgVerify([]byte(sigString), m.pbOuter.Signature, m.pbOuter.SigningPubKey); err != nil {
|
|
||||||
return fmt.Errorf("signature verification failed: %w", err)
|
|
||||||
}
|
|
||||||
log.Infof("signature verified successfully")
|
|
||||||
}
|
|
||||||
|
|
||||||
bb := bytes.NewBuffer(m.pbOuter.InnerMessage)
|
|
||||||
|
|
||||||
zr, err := zstd.NewReader(bb)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer zr.Close()
|
|
||||||
|
|
||||||
// Limit decompressed size to prevent decompression bombs.
|
|
||||||
// Use declared size + 1 byte to detect overflow, capped at MaxDecompressedSize.
|
|
||||||
maxSize := MaxDecompressedSize
|
|
||||||
if m.pbOuter.Size > 0 && m.pbOuter.Size < int64(maxSize) {
|
|
||||||
maxSize = int64(m.pbOuter.Size) + 1
|
|
||||||
}
|
|
||||||
limitedReader := io.LimitReader(zr, maxSize)
|
|
||||||
dat, err := io.ReadAll(limitedReader)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if int64(len(dat)) >= MaxDecompressedSize {
|
|
||||||
return fmt.Errorf("decompressed data exceeds maximum allowed size of %d bytes", MaxDecompressedSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
isize := len(dat)
|
|
||||||
if int64(isize) != m.pbOuter.Size {
|
|
||||||
log.Debugf("truncated data, got %d expected %d", isize, m.pbOuter.Size)
|
|
||||||
return bork.ErrFileTruncated
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deserialize inner message
|
|
||||||
m.pbInner = new(MFFile)
|
|
||||||
if err := proto.Unmarshal(dat, m.pbInner); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate inner UUID
|
|
||||||
if err := validateUUID(m.pbInner.Uuid); err != nil {
|
|
||||||
return errors.New("inner UUID invalid: " + err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify UUIDs match
|
|
||||||
if !bytes.Equal(m.pbOuter.Uuid, m.pbInner.Uuid) {
|
|
||||||
return errors.New("outer and inner UUID mismatch")
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("loaded manifest with %d files", len(m.pbInner.Files))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateMagic(dat []byte) bool {
|
|
||||||
ml := len([]byte(MAGIC))
|
|
||||||
if len(dat) < ml {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
got := dat[0:ml]
|
|
||||||
expected := []byte(MAGIC)
|
|
||||||
return bytes.Equal(got, expected)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewManifestFromReader reads a manifest from an io.Reader.
|
|
||||||
func NewManifestFromReader(input io.Reader) (*manifest, error) {
|
|
||||||
m := &manifest{}
|
|
||||||
dat, err := io.ReadAll(input)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !validateMagic(dat) {
|
|
||||||
return nil, errors.New("invalid file format")
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove magic bytes prefix:
|
|
||||||
ml := len([]byte(MAGIC))
|
|
||||||
bb := bytes.NewBuffer(dat[ml:])
|
|
||||||
dat = bb.Bytes()
|
|
||||||
|
|
||||||
// deserialize outer:
|
|
||||||
m.pbOuter = new(MFFileOuter)
|
|
||||||
if err := proto.Unmarshal(dat, m.pbOuter); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// deserialize inner:
|
|
||||||
if err := m.deserializeInner(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewManifestFromFile reads a manifest from a file path using the given filesystem.
|
|
||||||
// If fs is nil, the real filesystem (OsFs) is used.
|
|
||||||
func NewManifestFromFile(fs afero.Fs, path string) (*manifest, error) {
|
|
||||||
if fs == nil {
|
|
||||||
fs = afero.NewOsFs()
|
|
||||||
}
|
|
||||||
f, err := fs.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer func() { _ = f.Close() }()
|
|
||||||
return NewManifestFromReader(f)
|
|
||||||
}
|
|
||||||
3
mfer/gen.go
Normal file
3
mfer/gen.go
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
package mfer
|
||||||
|
|
||||||
|
//go:generate protoc --go_out=. --go_opt=paths=source_relative mf.proto
|
||||||
212
mfer/gpg.go
212
mfer/gpg.go
@@ -1,212 +0,0 @@
|
|||||||
package mfer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GPGKeyID represents a GPG key identifier (fingerprint or key ID).
|
|
||||||
type GPGKeyID string
|
|
||||||
|
|
||||||
// SigningOptions contains options for GPG signing.
|
|
||||||
type SigningOptions struct {
|
|
||||||
KeyID GPGKeyID
|
|
||||||
}
|
|
||||||
|
|
||||||
// gpgSign creates a detached signature of the data using the specified key.
|
|
||||||
// Returns the armored detached signature.
|
|
||||||
func gpgSign(data []byte, keyID GPGKeyID) ([]byte, error) {
|
|
||||||
cmd := exec.Command("gpg",
|
|
||||||
"--detach-sign",
|
|
||||||
"--armor",
|
|
||||||
"--local-user", string(keyID),
|
|
||||||
)
|
|
||||||
|
|
||||||
cmd.Stdin = bytes.NewReader(data)
|
|
||||||
|
|
||||||
var stdout, stderr bytes.Buffer
|
|
||||||
cmd.Stdout = &stdout
|
|
||||||
cmd.Stderr = &stderr
|
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return nil, fmt.Errorf("gpg sign failed: %w: %s", err, stderr.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return stdout.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// gpgExportPublicKey exports the public key for the specified key ID.
|
|
||||||
// Returns the armored public key.
|
|
||||||
func gpgExportPublicKey(keyID GPGKeyID) ([]byte, error) {
|
|
||||||
cmd := exec.Command("gpg",
|
|
||||||
"--export",
|
|
||||||
"--armor",
|
|
||||||
string(keyID),
|
|
||||||
)
|
|
||||||
|
|
||||||
var stdout, stderr bytes.Buffer
|
|
||||||
cmd.Stdout = &stdout
|
|
||||||
cmd.Stderr = &stderr
|
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return nil, fmt.Errorf("gpg export failed: %w: %s", err, stderr.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
if stdout.Len() == 0 {
|
|
||||||
return nil, fmt.Errorf("gpg key not found: %s", keyID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return stdout.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// gpgGetKeyFingerprint gets the full fingerprint for a key ID.
|
|
||||||
func gpgGetKeyFingerprint(keyID GPGKeyID) ([]byte, error) {
|
|
||||||
cmd := exec.Command("gpg",
|
|
||||||
"--with-colons",
|
|
||||||
"--fingerprint",
|
|
||||||
string(keyID),
|
|
||||||
)
|
|
||||||
|
|
||||||
var stdout, stderr bytes.Buffer
|
|
||||||
cmd.Stdout = &stdout
|
|
||||||
cmd.Stderr = &stderr
|
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return nil, fmt.Errorf("gpg fingerprint lookup failed: %w: %s", err, stderr.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the colon-delimited output to find the fingerprint
|
|
||||||
lines := strings.Split(stdout.String(), "\n")
|
|
||||||
for _, line := range lines {
|
|
||||||
fields := strings.Split(line, ":")
|
|
||||||
if len(fields) >= 10 && fields[0] == "fpr" {
|
|
||||||
return []byte(fields[9]), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("fingerprint not found for key: %s", keyID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// gpgExtractPubKeyFingerprint imports a public key into a temporary keyring
|
|
||||||
// and extracts its fingerprint. This verifies the key is valid and returns
|
|
||||||
// the actual fingerprint from the key material.
|
|
||||||
func gpgExtractPubKeyFingerprint(pubKey []byte) (string, error) {
|
|
||||||
// Create temporary directory for GPG operations
|
|
||||||
tmpDir, err := os.MkdirTemp("", "mfer-gpg-fingerprint-*")
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("failed to create temp dir: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
|
||||||
|
|
||||||
// Set restrictive permissions
|
|
||||||
if err := os.Chmod(tmpDir, 0o700); err != nil {
|
|
||||||
return "", fmt.Errorf("failed to set temp dir permissions: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write public key to temp file
|
|
||||||
pubKeyFile := filepath.Join(tmpDir, "pubkey.asc")
|
|
||||||
if err := os.WriteFile(pubKeyFile, pubKey, 0o600); err != nil {
|
|
||||||
return "", fmt.Errorf("failed to write public key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Import the public key into the temporary keyring
|
|
||||||
importCmd := exec.Command("gpg",
|
|
||||||
"--homedir", tmpDir,
|
|
||||||
"--import",
|
|
||||||
pubKeyFile,
|
|
||||||
)
|
|
||||||
var importStderr bytes.Buffer
|
|
||||||
importCmd.Stderr = &importStderr
|
|
||||||
if err := importCmd.Run(); err != nil {
|
|
||||||
return "", fmt.Errorf("failed to import public key: %w: %s", err, importStderr.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// List keys to get fingerprint
|
|
||||||
listCmd := exec.Command("gpg",
|
|
||||||
"--homedir", tmpDir,
|
|
||||||
"--with-colons",
|
|
||||||
"--fingerprint",
|
|
||||||
)
|
|
||||||
var listStdout, listStderr bytes.Buffer
|
|
||||||
listCmd.Stdout = &listStdout
|
|
||||||
listCmd.Stderr = &listStderr
|
|
||||||
if err := listCmd.Run(); err != nil {
|
|
||||||
return "", fmt.Errorf("failed to list keys: %w: %s", err, listStderr.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the colon-delimited output to find the fingerprint
|
|
||||||
lines := strings.Split(listStdout.String(), "\n")
|
|
||||||
for _, line := range lines {
|
|
||||||
fields := strings.Split(line, ":")
|
|
||||||
if len(fields) >= 10 && fields[0] == "fpr" {
|
|
||||||
return fields[9], nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", fmt.Errorf("fingerprint not found in imported key")
|
|
||||||
}
|
|
||||||
|
|
||||||
// gpgVerify verifies a detached signature against data using the provided public key.
|
|
||||||
// It creates a temporary keyring to import the public key for verification.
|
|
||||||
func gpgVerify(data, signature, pubKey []byte) error {
|
|
||||||
// Create temporary directory for GPG operations
|
|
||||||
tmpDir, err := os.MkdirTemp("", "mfer-gpg-verify-*")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create temp dir: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
|
||||||
|
|
||||||
// Set restrictive permissions
|
|
||||||
if err := os.Chmod(tmpDir, 0o700); err != nil {
|
|
||||||
return fmt.Errorf("failed to set temp dir permissions: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write public key to temp file
|
|
||||||
pubKeyFile := filepath.Join(tmpDir, "pubkey.asc")
|
|
||||||
if err := os.WriteFile(pubKeyFile, pubKey, 0o600); err != nil {
|
|
||||||
return fmt.Errorf("failed to write public key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write signature to temp file
|
|
||||||
sigFile := filepath.Join(tmpDir, "signature.asc")
|
|
||||||
if err := os.WriteFile(sigFile, signature, 0o600); err != nil {
|
|
||||||
return fmt.Errorf("failed to write signature: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write data to temp file
|
|
||||||
dataFile := filepath.Join(tmpDir, "data")
|
|
||||||
if err := os.WriteFile(dataFile, data, 0o600); err != nil {
|
|
||||||
return fmt.Errorf("failed to write data: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Import the public key into the temporary keyring
|
|
||||||
importCmd := exec.Command("gpg",
|
|
||||||
"--homedir", tmpDir,
|
|
||||||
"--import",
|
|
||||||
pubKeyFile,
|
|
||||||
)
|
|
||||||
var importStderr bytes.Buffer
|
|
||||||
importCmd.Stderr = &importStderr
|
|
||||||
if err := importCmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("failed to import public key: %w: %s", err, importStderr.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify the signature
|
|
||||||
verifyCmd := exec.Command("gpg",
|
|
||||||
"--homedir", tmpDir,
|
|
||||||
"--verify",
|
|
||||||
sigFile,
|
|
||||||
dataFile,
|
|
||||||
)
|
|
||||||
var verifyStderr bytes.Buffer
|
|
||||||
verifyCmd.Stderr = &verifyStderr
|
|
||||||
if err := verifyCmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("signature verification failed: %w: %s", err, verifyStderr.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
347
mfer/gpg_test.go
347
mfer/gpg_test.go
@@ -1,347 +0,0 @@
|
|||||||
package mfer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// testGPGEnv sets up a temporary GPG home directory with a test key.
|
|
||||||
// Returns the key ID and a cleanup function.
|
|
||||||
func testGPGEnv(t *testing.T) (GPGKeyID, func()) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
// Check if gpg is installed
|
|
||||||
if _, err := exec.LookPath("gpg"); err != nil {
|
|
||||||
t.Skip("gpg not installed, skipping signing test")
|
|
||||||
return "", func() {}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create temporary GPG home directory
|
|
||||||
gpgHome, err := os.MkdirTemp("", "mfer-gpg-test-*")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Set restrictive permissions on GPG home
|
|
||||||
require.NoError(t, os.Chmod(gpgHome, 0o700))
|
|
||||||
|
|
||||||
// Save original GNUPGHOME and set new one
|
|
||||||
origGPGHome := os.Getenv("GNUPGHOME")
|
|
||||||
require.NoError(t, os.Setenv("GNUPGHOME", gpgHome))
|
|
||||||
|
|
||||||
cleanup := func() {
|
|
||||||
if origGPGHome == "" {
|
|
||||||
_ = os.Unsetenv("GNUPGHOME")
|
|
||||||
} else {
|
|
||||||
_ = os.Setenv("GNUPGHOME", origGPGHome)
|
|
||||||
}
|
|
||||||
_ = os.RemoveAll(gpgHome)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate a test key with no passphrase
|
|
||||||
keyParams := `%no-protection
|
|
||||||
Key-Type: RSA
|
|
||||||
Key-Length: 2048
|
|
||||||
Name-Real: MFER Test Key
|
|
||||||
Name-Email: test@mfer.test
|
|
||||||
Expire-Date: 0
|
|
||||||
%commit
|
|
||||||
`
|
|
||||||
paramsFile := filepath.Join(gpgHome, "key-params")
|
|
||||||
require.NoError(t, os.WriteFile(paramsFile, []byte(keyParams), 0o600))
|
|
||||||
|
|
||||||
cmd := exec.Command("gpg", "--batch", "--gen-key", paramsFile)
|
|
||||||
cmd.Env = append(os.Environ(), "GNUPGHOME="+gpgHome)
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
cleanup()
|
|
||||||
t.Skipf("failed to generate test GPG key: %v: %s", err, output)
|
|
||||||
return "", func() {}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the key fingerprint
|
|
||||||
cmd = exec.Command("gpg", "--list-keys", "--with-colons", "test@mfer.test")
|
|
||||||
cmd.Env = append(os.Environ(), "GNUPGHOME="+gpgHome)
|
|
||||||
output, err = cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
cleanup()
|
|
||||||
t.Fatalf("failed to list test key: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse fingerprint from output
|
|
||||||
var keyID string
|
|
||||||
for _, line := range strings.Split(string(output), "\n") {
|
|
||||||
fields := strings.Split(line, ":")
|
|
||||||
if len(fields) >= 10 && fields[0] == "fpr" {
|
|
||||||
keyID = fields[9]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if keyID == "" {
|
|
||||||
cleanup()
|
|
||||||
t.Fatal("failed to find test key fingerprint")
|
|
||||||
}
|
|
||||||
|
|
||||||
return GPGKeyID(keyID), cleanup
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGPGSign(t *testing.T) {
|
|
||||||
keyID, cleanup := testGPGEnv(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
data := []byte("test data to sign")
|
|
||||||
sig, err := gpgSign(data, keyID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotEmpty(t, sig)
|
|
||||||
assert.Contains(t, string(sig), "-----BEGIN PGP SIGNATURE-----")
|
|
||||||
assert.Contains(t, string(sig), "-----END PGP SIGNATURE-----")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGPGExportPublicKey(t *testing.T) {
|
|
||||||
keyID, cleanup := testGPGEnv(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
pubKey, err := gpgExportPublicKey(keyID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotEmpty(t, pubKey)
|
|
||||||
assert.Contains(t, string(pubKey), "-----BEGIN PGP PUBLIC KEY BLOCK-----")
|
|
||||||
assert.Contains(t, string(pubKey), "-----END PGP PUBLIC KEY BLOCK-----")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGPGGetKeyFingerprint(t *testing.T) {
|
|
||||||
keyID, cleanup := testGPGEnv(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
fingerprint, err := gpgGetKeyFingerprint(keyID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotEmpty(t, fingerprint)
|
|
||||||
// The fingerprint should be 40 hex chars
|
|
||||||
assert.Len(t, fingerprint, 40, "fingerprint should be 40 hex chars")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGPGSignInvalidKey(t *testing.T) {
|
|
||||||
// Set up test environment (we need GNUPGHOME set)
|
|
||||||
_, cleanup := testGPGEnv(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
data := []byte("test data")
|
|
||||||
_, err := gpgSign(data, GPGKeyID("NONEXISTENT_KEY_ID_12345"))
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuilderWithSigning(t *testing.T) {
|
|
||||||
keyID, cleanup := testGPGEnv(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
// Create a builder with signing options
|
|
||||||
b := NewBuilder()
|
|
||||||
b.SetSigningOptions(&SigningOptions{
|
|
||||||
KeyID: keyID,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Add a test file
|
|
||||||
content := []byte("test file content")
|
|
||||||
reader := bytes.NewReader(content)
|
|
||||||
_, err := b.AddFile("test.txt", FileSize(len(content)), ModTime{}, reader, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Build the manifest
|
|
||||||
var buf bytes.Buffer
|
|
||||||
err = b.Build(&buf)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Parse the manifest and verify signature fields are populated
|
|
||||||
manifest, err := NewManifestFromReader(&buf)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, manifest.pbOuter)
|
|
||||||
|
|
||||||
assert.NotEmpty(t, manifest.pbOuter.Signature, "signature should be populated")
|
|
||||||
assert.NotEmpty(t, manifest.pbOuter.Signer, "signer should be populated")
|
|
||||||
assert.NotEmpty(t, manifest.pbOuter.SigningPubKey, "signing public key should be populated")
|
|
||||||
|
|
||||||
// Verify signature is a valid PGP signature
|
|
||||||
assert.Contains(t, string(manifest.pbOuter.Signature), "-----BEGIN PGP SIGNATURE-----")
|
|
||||||
|
|
||||||
// Verify public key is a valid PGP public key block
|
|
||||||
assert.Contains(t, string(manifest.pbOuter.SigningPubKey), "-----BEGIN PGP PUBLIC KEY BLOCK-----")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerWithSigning(t *testing.T) {
|
|
||||||
keyID, cleanup := testGPGEnv(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
// Create in-memory filesystem with test files
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("content1"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file2.txt", []byte("content2"), 0o644))
|
|
||||||
|
|
||||||
// Create scanner with signing options
|
|
||||||
opts := &ScannerOptions{
|
|
||||||
Fs: fs,
|
|
||||||
SigningOptions: &SigningOptions{
|
|
||||||
KeyID: keyID,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
s := NewScannerWithOptions(opts)
|
|
||||||
|
|
||||||
// Enumerate files
|
|
||||||
require.NoError(t, s.EnumeratePath("/testdir", nil))
|
|
||||||
assert.Equal(t, FileCount(2), s.FileCount())
|
|
||||||
|
|
||||||
// Generate signed manifest
|
|
||||||
var buf bytes.Buffer
|
|
||||||
require.NoError(t, s.ToManifest(context.Background(), &buf, nil))
|
|
||||||
|
|
||||||
// Parse and verify
|
|
||||||
manifest, err := NewManifestFromReader(&buf)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.NotEmpty(t, manifest.pbOuter.Signature)
|
|
||||||
assert.NotEmpty(t, manifest.pbOuter.Signer)
|
|
||||||
assert.NotEmpty(t, manifest.pbOuter.SigningPubKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGPGVerify(t *testing.T) {
|
|
||||||
keyID, cleanup := testGPGEnv(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
data := []byte("test data to sign and verify")
|
|
||||||
sig, err := gpgSign(data, keyID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
pubKey, err := gpgExportPublicKey(keyID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify the signature
|
|
||||||
err = gpgVerify(data, sig, pubKey)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGPGVerifyInvalidSignature(t *testing.T) {
|
|
||||||
keyID, cleanup := testGPGEnv(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
data := []byte("test data to sign")
|
|
||||||
sig, err := gpgSign(data, keyID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
pubKey, err := gpgExportPublicKey(keyID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Try to verify with different data - should fail
|
|
||||||
wrongData := []byte("different data")
|
|
||||||
err = gpgVerify(wrongData, sig, pubKey)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGPGVerifyBadPublicKey(t *testing.T) {
|
|
||||||
keyID, cleanup := testGPGEnv(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
data := []byte("test data")
|
|
||||||
sig, err := gpgSign(data, keyID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Try to verify with invalid public key - should fail
|
|
||||||
badPubKey := []byte("not a valid public key")
|
|
||||||
err = gpgVerify(data, sig, badPubKey)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestManifestSignatureVerification(t *testing.T) {
|
|
||||||
keyID, cleanup := testGPGEnv(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
// Create a builder with signing options
|
|
||||||
b := NewBuilder()
|
|
||||||
b.SetSigningOptions(&SigningOptions{
|
|
||||||
KeyID: keyID,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Add a test file
|
|
||||||
content := []byte("test file content for verification")
|
|
||||||
reader := bytes.NewReader(content)
|
|
||||||
_, err := b.AddFile("test.txt", FileSize(len(content)), ModTime{}, reader, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Build the manifest
|
|
||||||
var buf bytes.Buffer
|
|
||||||
err = b.Build(&buf)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Parse the manifest - signature should be verified during load
|
|
||||||
manifest, err := NewManifestFromReader(&buf)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, manifest)
|
|
||||||
|
|
||||||
// Signature should be present and valid
|
|
||||||
assert.NotEmpty(t, manifest.pbOuter.Signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestManifestTamperedSignatureFails(t *testing.T) {
|
|
||||||
keyID, cleanup := testGPGEnv(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
// Create a signed manifest
|
|
||||||
b := NewBuilder()
|
|
||||||
b.SetSigningOptions(&SigningOptions{
|
|
||||||
KeyID: keyID,
|
|
||||||
})
|
|
||||||
|
|
||||||
content := []byte("test file content")
|
|
||||||
reader := bytes.NewReader(content)
|
|
||||||
_, err := b.AddFile("test.txt", FileSize(len(content)), ModTime{}, reader, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
err = b.Build(&buf)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Tamper with the signature by replacing some bytes
|
|
||||||
data := buf.Bytes()
|
|
||||||
// Find and modify a byte in the signature portion
|
|
||||||
for i := range data {
|
|
||||||
if i > 100 && data[i] == 'A' {
|
|
||||||
data[i] = 'B'
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to load the tampered manifest - should fail
|
|
||||||
_, err = NewManifestFromReader(bytes.NewReader(data))
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuilderWithoutSigning(t *testing.T) {
|
|
||||||
// Create a builder without signing options
|
|
||||||
b := NewBuilder()
|
|
||||||
|
|
||||||
// Add a test file
|
|
||||||
content := []byte("test file content")
|
|
||||||
reader := bytes.NewReader(content)
|
|
||||||
_, err := b.AddFile("test.txt", FileSize(len(content)), ModTime{}, reader, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Build the manifest
|
|
||||||
var buf bytes.Buffer
|
|
||||||
err = b.Build(&buf)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Parse the manifest and verify signature fields are empty
|
|
||||||
manifest, err := NewManifestFromReader(&buf)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, manifest.pbOuter)
|
|
||||||
|
|
||||||
assert.Empty(t, manifest.pbOuter.Signature, "signature should be empty when not signing")
|
|
||||||
assert.Empty(t, manifest.pbOuter.Signer, "signer should be empty when not signing")
|
|
||||||
assert.Empty(t, manifest.pbOuter.SigningPubKey, "signing public key should be empty when not signing")
|
|
||||||
}
|
|
||||||
@@ -1,59 +1,66 @@
|
|||||||
package mfer
|
package mfer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
|
||||||
"github.com/multiformats/go-multihash"
|
"github.com/spf13/afero"
|
||||||
)
|
)
|
||||||
|
|
||||||
// manifest holds the internal representation of a manifest file.
|
type ManifestFile struct {
|
||||||
// Use NewManifestFromFile or NewManifestFromReader to load an existing manifest,
|
Path string
|
||||||
// or use Builder to create a new one.
|
FileInfo fs.FileInfo
|
||||||
type manifest struct {
|
|
||||||
pbInner *MFFile
|
|
||||||
pbOuter *MFFileOuter
|
|
||||||
output *bytes.Buffer
|
|
||||||
signingOptions *SigningOptions
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manifest) String() string {
|
type Manifest struct {
|
||||||
count := 0
|
SourceFS afero.Fs
|
||||||
if m.pbInner != nil {
|
Files []*ManifestFile
|
||||||
count = len(m.pbInner.Files)
|
TotalFileSize int64
|
||||||
}
|
|
||||||
return fmt.Sprintf("<Manifest count=%d>", count)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Files returns all file entries from a loaded manifest.
|
type ManifestScanOptions struct {
|
||||||
func (m *manifest) Files() []*MFFilePath {
|
IgnoreDotfiles bool
|
||||||
if m.pbInner == nil {
|
FollowSymLinks bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFromPath(inputPath string, options *ManifestScanOptions) (*Manifest, error) {
|
||||||
|
afs := afero.NewBasePathFs(afero.NewOsFs(), inputPath)
|
||||||
|
return NewFromFilesystem(afs, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFromFilesystem(fs afero.Fs, options *ManifestScanOptions) (*Manifest, error) {
|
||||||
|
m := &Manifest{
|
||||||
|
SourceFS: fs,
|
||||||
|
}
|
||||||
|
m.Scan()
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manifest) Scan() {
|
||||||
|
afero.Walk(m.SourceFS, "", func(path string, info fs.FileInfo, err error) error {
|
||||||
|
|
||||||
|
if info != nil && info.IsDir() {
|
||||||
|
// manifests contain only files, directories are implied.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return m.pbInner.Files
|
|
||||||
|
fmt.Printf("path = %s\n", path)
|
||||||
|
fileinfo, staterr := m.SourceFS.Stat(path)
|
||||||
|
if staterr != nil {
|
||||||
|
panic(staterr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// signatureString generates the canonical string used for signing/verification.
|
nf := &ManifestFile{
|
||||||
// Format: MAGIC-UUID-MULTIHASH where UUID and multihash are hex-encoded.
|
Path: path,
|
||||||
// Requires pbOuter to be set with Uuid and Sha256 fields.
|
FileInfo: fileinfo,
|
||||||
func (m *manifest) signatureString() (string, error) {
|
|
||||||
if m.pbOuter == nil {
|
|
||||||
return "", errors.New("pbOuter not set")
|
|
||||||
}
|
}
|
||||||
if len(m.pbOuter.Uuid) == 0 {
|
m.Files = append(m.Files, nf)
|
||||||
return "", errors.New("UUID not set")
|
m.TotalFileSize = m.TotalFileSize + info.Size()
|
||||||
}
|
return nil
|
||||||
if len(m.pbOuter.Sha256) == 0 {
|
})
|
||||||
return "", errors.New("SHA256 hash not set")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mh, err := multihash.Encode(m.pbOuter.Sha256, multihash.SHA2_256)
|
func (m *Manifest) Write(output io.Writer) error {
|
||||||
if err != nil {
|
return nil
|
||||||
return "", fmt.Errorf("failed to encode multihash: %w", err)
|
|
||||||
}
|
|
||||||
uuidStr := hex.EncodeToString(m.pbOuter.Uuid)
|
|
||||||
mhStr := hex.EncodeToString(mh)
|
|
||||||
return fmt.Sprintf("%s-%s-%s", MAGIC, uuidStr, mhStr), nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
package mfer
|
|
||||||
|
|
||||||
//go:generate protoc ./mf.proto --go_out=paths=source_relative:.
|
|
||||||
667
mfer/mf.pb.go
667
mfer/mf.pb.go
@@ -1,667 +0,0 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// versions:
|
|
||||||
// protoc-gen-go v1.36.11
|
|
||||||
// protoc v6.33.0
|
|
||||||
// source: mf.proto
|
|
||||||
|
|
||||||
package mfer
|
|
||||||
|
|
||||||
import (
|
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
reflect "reflect"
|
|
||||||
sync "sync"
|
|
||||||
unsafe "unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Verify that this generated code is sufficiently up-to-date.
|
|
||||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
|
||||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
|
||||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
|
||||||
)
|
|
||||||
|
|
||||||
type MFFileOuter_Version int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
MFFileOuter_VERSION_NONE MFFileOuter_Version = 0
|
|
||||||
MFFileOuter_VERSION_ONE MFFileOuter_Version = 1 // only one for now
|
|
||||||
)
|
|
||||||
|
|
||||||
// Enum value maps for MFFileOuter_Version.
|
|
||||||
var (
|
|
||||||
MFFileOuter_Version_name = map[int32]string{
|
|
||||||
0: "VERSION_NONE",
|
|
||||||
1: "VERSION_ONE",
|
|
||||||
}
|
|
||||||
MFFileOuter_Version_value = map[string]int32{
|
|
||||||
"VERSION_NONE": 0,
|
|
||||||
"VERSION_ONE": 1,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (x MFFileOuter_Version) Enum() *MFFileOuter_Version {
|
|
||||||
p := new(MFFileOuter_Version)
|
|
||||||
*p = x
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x MFFileOuter_Version) String() string {
|
|
||||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (MFFileOuter_Version) Descriptor() protoreflect.EnumDescriptor {
|
|
||||||
return file_mf_proto_enumTypes[0].Descriptor()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (MFFileOuter_Version) Type() protoreflect.EnumType {
|
|
||||||
return &file_mf_proto_enumTypes[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x MFFileOuter_Version) Number() protoreflect.EnumNumber {
|
|
||||||
return protoreflect.EnumNumber(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use MFFileOuter_Version.Descriptor instead.
|
|
||||||
func (MFFileOuter_Version) EnumDescriptor() ([]byte, []int) {
|
|
||||||
return file_mf_proto_rawDescGZIP(), []int{1, 0}
|
|
||||||
}
|
|
||||||
|
|
||||||
type MFFileOuter_CompressionType int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
MFFileOuter_COMPRESSION_NONE MFFileOuter_CompressionType = 0
|
|
||||||
MFFileOuter_COMPRESSION_ZSTD MFFileOuter_CompressionType = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
// Enum value maps for MFFileOuter_CompressionType.
|
|
||||||
var (
|
|
||||||
MFFileOuter_CompressionType_name = map[int32]string{
|
|
||||||
0: "COMPRESSION_NONE",
|
|
||||||
1: "COMPRESSION_ZSTD",
|
|
||||||
}
|
|
||||||
MFFileOuter_CompressionType_value = map[string]int32{
|
|
||||||
"COMPRESSION_NONE": 0,
|
|
||||||
"COMPRESSION_ZSTD": 1,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (x MFFileOuter_CompressionType) Enum() *MFFileOuter_CompressionType {
|
|
||||||
p := new(MFFileOuter_CompressionType)
|
|
||||||
*p = x
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x MFFileOuter_CompressionType) String() string {
|
|
||||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (MFFileOuter_CompressionType) Descriptor() protoreflect.EnumDescriptor {
|
|
||||||
return file_mf_proto_enumTypes[1].Descriptor()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (MFFileOuter_CompressionType) Type() protoreflect.EnumType {
|
|
||||||
return &file_mf_proto_enumTypes[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x MFFileOuter_CompressionType) Number() protoreflect.EnumNumber {
|
|
||||||
return protoreflect.EnumNumber(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use MFFileOuter_CompressionType.Descriptor instead.
|
|
||||||
func (MFFileOuter_CompressionType) EnumDescriptor() ([]byte, []int) {
|
|
||||||
return file_mf_proto_rawDescGZIP(), []int{1, 1}
|
|
||||||
}
|
|
||||||
|
|
||||||
type MFFile_Version int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
MFFile_VERSION_NONE MFFile_Version = 0
|
|
||||||
MFFile_VERSION_ONE MFFile_Version = 1 // only one for now
|
|
||||||
)
|
|
||||||
|
|
||||||
// Enum value maps for MFFile_Version.
|
|
||||||
var (
|
|
||||||
MFFile_Version_name = map[int32]string{
|
|
||||||
0: "VERSION_NONE",
|
|
||||||
1: "VERSION_ONE",
|
|
||||||
}
|
|
||||||
MFFile_Version_value = map[string]int32{
|
|
||||||
"VERSION_NONE": 0,
|
|
||||||
"VERSION_ONE": 1,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (x MFFile_Version) Enum() *MFFile_Version {
|
|
||||||
p := new(MFFile_Version)
|
|
||||||
*p = x
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x MFFile_Version) String() string {
|
|
||||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (MFFile_Version) Descriptor() protoreflect.EnumDescriptor {
|
|
||||||
return file_mf_proto_enumTypes[2].Descriptor()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (MFFile_Version) Type() protoreflect.EnumType {
|
|
||||||
return &file_mf_proto_enumTypes[2]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x MFFile_Version) Number() protoreflect.EnumNumber {
|
|
||||||
return protoreflect.EnumNumber(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use MFFile_Version.Descriptor instead.
|
|
||||||
func (MFFile_Version) EnumDescriptor() ([]byte, []int) {
|
|
||||||
return file_mf_proto_rawDescGZIP(), []int{4, 0}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Timestamp struct {
|
|
||||||
state protoimpl.MessageState `protogen:"open.v1"`
|
|
||||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
|
||||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Timestamp) Reset() {
|
|
||||||
*x = Timestamp{}
|
|
||||||
mi := &file_mf_proto_msgTypes[0]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Timestamp) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Timestamp) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *Timestamp) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_mf_proto_msgTypes[0]
|
|
||||||
if x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead.
|
|
||||||
func (*Timestamp) Descriptor() ([]byte, []int) {
|
|
||||||
return file_mf_proto_rawDescGZIP(), []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Timestamp) GetSeconds() int64 {
|
|
||||||
if x != nil {
|
|
||||||
return x.Seconds
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Timestamp) GetNanos() int32 {
|
|
||||||
if x != nil {
|
|
||||||
return x.Nanos
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type MFFileOuter struct {
|
|
||||||
state protoimpl.MessageState `protogen:"open.v1"`
|
|
||||||
// required mffile root attributes 1xx
|
|
||||||
Version MFFileOuter_Version `protobuf:"varint,101,opt,name=version,proto3,enum=MFFileOuter_Version" json:"version,omitempty"`
|
|
||||||
CompressionType MFFileOuter_CompressionType `protobuf:"varint,102,opt,name=compressionType,proto3,enum=MFFileOuter_CompressionType" json:"compressionType,omitempty"`
|
|
||||||
// these are used solely to detect corruption/truncation
|
|
||||||
// and not for cryptographic integrity.
|
|
||||||
Size int64 `protobuf:"varint,103,opt,name=size,proto3" json:"size,omitempty"`
|
|
||||||
Sha256 []byte `protobuf:"bytes,104,opt,name=sha256,proto3" json:"sha256,omitempty"`
|
|
||||||
// uuid must match the uuid in the inner message
|
|
||||||
Uuid []byte `protobuf:"bytes,105,opt,name=uuid,proto3" json:"uuid,omitempty"`
|
|
||||||
InnerMessage []byte `protobuf:"bytes,199,opt,name=innerMessage,proto3" json:"innerMessage,omitempty"`
|
|
||||||
// detached signature, ascii or binary
|
|
||||||
Signature []byte `protobuf:"bytes,201,opt,name=signature,proto3,oneof" json:"signature,omitempty"`
|
|
||||||
// full GPG key id
|
|
||||||
Signer []byte `protobuf:"bytes,202,opt,name=signer,proto3,oneof" json:"signer,omitempty"`
|
|
||||||
// full GPG signing public key, ascii or binary
|
|
||||||
SigningPubKey []byte `protobuf:"bytes,203,opt,name=signingPubKey,proto3,oneof" json:"signingPubKey,omitempty"`
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFileOuter) Reset() {
|
|
||||||
*x = MFFileOuter{}
|
|
||||||
mi := &file_mf_proto_msgTypes[1]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFileOuter) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*MFFileOuter) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *MFFileOuter) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_mf_proto_msgTypes[1]
|
|
||||||
if x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use MFFileOuter.ProtoReflect.Descriptor instead.
|
|
||||||
func (*MFFileOuter) Descriptor() ([]byte, []int) {
|
|
||||||
return file_mf_proto_rawDescGZIP(), []int{1}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFileOuter) GetVersion() MFFileOuter_Version {
|
|
||||||
if x != nil {
|
|
||||||
return x.Version
|
|
||||||
}
|
|
||||||
return MFFileOuter_VERSION_NONE
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFileOuter) GetCompressionType() MFFileOuter_CompressionType {
|
|
||||||
if x != nil {
|
|
||||||
return x.CompressionType
|
|
||||||
}
|
|
||||||
return MFFileOuter_COMPRESSION_NONE
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFileOuter) GetSize() int64 {
|
|
||||||
if x != nil {
|
|
||||||
return x.Size
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFileOuter) GetSha256() []byte {
|
|
||||||
if x != nil {
|
|
||||||
return x.Sha256
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFileOuter) GetUuid() []byte {
|
|
||||||
if x != nil {
|
|
||||||
return x.Uuid
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFileOuter) GetInnerMessage() []byte {
|
|
||||||
if x != nil {
|
|
||||||
return x.InnerMessage
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFileOuter) GetSignature() []byte {
|
|
||||||
if x != nil {
|
|
||||||
return x.Signature
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFileOuter) GetSigner() []byte {
|
|
||||||
if x != nil {
|
|
||||||
return x.Signer
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFileOuter) GetSigningPubKey() []byte {
|
|
||||||
if x != nil {
|
|
||||||
return x.SigningPubKey
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type MFFilePath struct {
|
|
||||||
state protoimpl.MessageState `protogen:"open.v1"`
|
|
||||||
// required attributes:
|
|
||||||
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
|
|
||||||
Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
|
|
||||||
// gotta have at least one:
|
|
||||||
Hashes []*MFFileChecksum `protobuf:"bytes,3,rep,name=hashes,proto3" json:"hashes,omitempty"`
|
|
||||||
// optional per-file metadata
|
|
||||||
MimeType *string `protobuf:"bytes,301,opt,name=mimeType,proto3,oneof" json:"mimeType,omitempty"`
|
|
||||||
Mtime *Timestamp `protobuf:"bytes,302,opt,name=mtime,proto3,oneof" json:"mtime,omitempty"`
|
|
||||||
Ctime *Timestamp `protobuf:"bytes,303,opt,name=ctime,proto3,oneof" json:"ctime,omitempty"`
|
|
||||||
Atime *Timestamp `protobuf:"bytes,304,opt,name=atime,proto3,oneof" json:"atime,omitempty"`
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFilePath) Reset() {
|
|
||||||
*x = MFFilePath{}
|
|
||||||
mi := &file_mf_proto_msgTypes[2]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFilePath) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*MFFilePath) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *MFFilePath) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_mf_proto_msgTypes[2]
|
|
||||||
if x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use MFFilePath.ProtoReflect.Descriptor instead.
|
|
||||||
func (*MFFilePath) Descriptor() ([]byte, []int) {
|
|
||||||
return file_mf_proto_rawDescGZIP(), []int{2}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFilePath) GetPath() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Path
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFilePath) GetSize() int64 {
|
|
||||||
if x != nil {
|
|
||||||
return x.Size
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFilePath) GetHashes() []*MFFileChecksum {
|
|
||||||
if x != nil {
|
|
||||||
return x.Hashes
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFilePath) GetMimeType() string {
|
|
||||||
if x != nil && x.MimeType != nil {
|
|
||||||
return *x.MimeType
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFilePath) GetMtime() *Timestamp {
|
|
||||||
if x != nil {
|
|
||||||
return x.Mtime
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFilePath) GetCtime() *Timestamp {
|
|
||||||
if x != nil {
|
|
||||||
return x.Ctime
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFilePath) GetAtime() *Timestamp {
|
|
||||||
if x != nil {
|
|
||||||
return x.Atime
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type MFFileChecksum struct {
|
|
||||||
state protoimpl.MessageState `protogen:"open.v1"`
|
|
||||||
// 1.0 golang implementation must write a multihash here
|
|
||||||
// it's ok to only ever use/verify sha256 multihash
|
|
||||||
MultiHash []byte `protobuf:"bytes,1,opt,name=multiHash,proto3" json:"multiHash,omitempty"`
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFileChecksum) Reset() {
|
|
||||||
*x = MFFileChecksum{}
|
|
||||||
mi := &file_mf_proto_msgTypes[3]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFileChecksum) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*MFFileChecksum) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *MFFileChecksum) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_mf_proto_msgTypes[3]
|
|
||||||
if x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use MFFileChecksum.ProtoReflect.Descriptor instead.
|
|
||||||
func (*MFFileChecksum) Descriptor() ([]byte, []int) {
|
|
||||||
return file_mf_proto_rawDescGZIP(), []int{3}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFileChecksum) GetMultiHash() []byte {
|
|
||||||
if x != nil {
|
|
||||||
return x.MultiHash
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type MFFile struct {
|
|
||||||
state protoimpl.MessageState `protogen:"open.v1"`
|
|
||||||
Version MFFile_Version `protobuf:"varint,100,opt,name=version,proto3,enum=MFFile_Version" json:"version,omitempty"`
|
|
||||||
// required manifest attributes:
|
|
||||||
Files []*MFFilePath `protobuf:"bytes,101,rep,name=files,proto3" json:"files,omitempty"`
|
|
||||||
// uuid is a random v4 UUID generated when creating the manifest
|
|
||||||
// used as part of the signature to prevent replay attacks
|
|
||||||
Uuid []byte `protobuf:"bytes,102,opt,name=uuid,proto3" json:"uuid,omitempty"`
|
|
||||||
// optional manifest attributes 2xx:
|
|
||||||
CreatedAt *Timestamp `protobuf:"bytes,201,opt,name=createdAt,proto3,oneof" json:"createdAt,omitempty"`
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFile) Reset() {
|
|
||||||
*x = MFFile{}
|
|
||||||
mi := &file_mf_proto_msgTypes[4]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFile) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*MFFile) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *MFFile) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_mf_proto_msgTypes[4]
|
|
||||||
if x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use MFFile.ProtoReflect.Descriptor instead.
|
|
||||||
func (*MFFile) Descriptor() ([]byte, []int) {
|
|
||||||
return file_mf_proto_rawDescGZIP(), []int{4}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFile) GetVersion() MFFile_Version {
|
|
||||||
if x != nil {
|
|
||||||
return x.Version
|
|
||||||
}
|
|
||||||
return MFFile_VERSION_NONE
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFile) GetFiles() []*MFFilePath {
|
|
||||||
if x != nil {
|
|
||||||
return x.Files
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFile) GetUuid() []byte {
|
|
||||||
if x != nil {
|
|
||||||
return x.Uuid
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MFFile) GetCreatedAt() *Timestamp {
|
|
||||||
if x != nil {
|
|
||||||
return x.CreatedAt
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var File_mf_proto protoreflect.FileDescriptor
|
|
||||||
|
|
||||||
const file_mf_proto_rawDesc = "" +
|
|
||||||
"\n" +
|
|
||||||
"\bmf.proto\";\n" +
|
|
||||||
"\tTimestamp\x12\x18\n" +
|
|
||||||
"\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
|
|
||||||
"\x05nanos\x18\x02 \x01(\x05R\x05nanos\"\xf0\x03\n" +
|
|
||||||
"\vMFFileOuter\x12.\n" +
|
|
||||||
"\aversion\x18e \x01(\x0e2\x14.MFFileOuter.VersionR\aversion\x12F\n" +
|
|
||||||
"\x0fcompressionType\x18f \x01(\x0e2\x1c.MFFileOuter.CompressionTypeR\x0fcompressionType\x12\x12\n" +
|
|
||||||
"\x04size\x18g \x01(\x03R\x04size\x12\x16\n" +
|
|
||||||
"\x06sha256\x18h \x01(\fR\x06sha256\x12\x12\n" +
|
|
||||||
"\x04uuid\x18i \x01(\fR\x04uuid\x12#\n" +
|
|
||||||
"\finnerMessage\x18\xc7\x01 \x01(\fR\finnerMessage\x12\"\n" +
|
|
||||||
"\tsignature\x18\xc9\x01 \x01(\fH\x00R\tsignature\x88\x01\x01\x12\x1c\n" +
|
|
||||||
"\x06signer\x18\xca\x01 \x01(\fH\x01R\x06signer\x88\x01\x01\x12*\n" +
|
|
||||||
"\rsigningPubKey\x18\xcb\x01 \x01(\fH\x02R\rsigningPubKey\x88\x01\x01\",\n" +
|
|
||||||
"\aVersion\x12\x10\n" +
|
|
||||||
"\fVERSION_NONE\x10\x00\x12\x0f\n" +
|
|
||||||
"\vVERSION_ONE\x10\x01\"=\n" +
|
|
||||||
"\x0fCompressionType\x12\x14\n" +
|
|
||||||
"\x10COMPRESSION_NONE\x10\x00\x12\x14\n" +
|
|
||||||
"\x10COMPRESSION_ZSTD\x10\x01B\f\n" +
|
|
||||||
"\n" +
|
|
||||||
"_signatureB\t\n" +
|
|
||||||
"\a_signerB\x10\n" +
|
|
||||||
"\x0e_signingPubKey\"\xa2\x02\n" +
|
|
||||||
"\n" +
|
|
||||||
"MFFilePath\x12\x12\n" +
|
|
||||||
"\x04path\x18\x01 \x01(\tR\x04path\x12\x12\n" +
|
|
||||||
"\x04size\x18\x02 \x01(\x03R\x04size\x12'\n" +
|
|
||||||
"\x06hashes\x18\x03 \x03(\v2\x0f.MFFileChecksumR\x06hashes\x12 \n" +
|
|
||||||
"\bmimeType\x18\xad\x02 \x01(\tH\x00R\bmimeType\x88\x01\x01\x12&\n" +
|
|
||||||
"\x05mtime\x18\xae\x02 \x01(\v2\n" +
|
|
||||||
".TimestampH\x01R\x05mtime\x88\x01\x01\x12&\n" +
|
|
||||||
"\x05ctime\x18\xaf\x02 \x01(\v2\n" +
|
|
||||||
".TimestampH\x02R\x05ctime\x88\x01\x01\x12&\n" +
|
|
||||||
"\x05atime\x18\xb0\x02 \x01(\v2\n" +
|
|
||||||
".TimestampH\x03R\x05atime\x88\x01\x01B\v\n" +
|
|
||||||
"\t_mimeTypeB\b\n" +
|
|
||||||
"\x06_mtimeB\b\n" +
|
|
||||||
"\x06_ctimeB\b\n" +
|
|
||||||
"\x06_atime\".\n" +
|
|
||||||
"\x0eMFFileChecksum\x12\x1c\n" +
|
|
||||||
"\tmultiHash\x18\x01 \x01(\fR\tmultiHash\"\xd6\x01\n" +
|
|
||||||
"\x06MFFile\x12)\n" +
|
|
||||||
"\aversion\x18d \x01(\x0e2\x0f.MFFile.VersionR\aversion\x12!\n" +
|
|
||||||
"\x05files\x18e \x03(\v2\v.MFFilePathR\x05files\x12\x12\n" +
|
|
||||||
"\x04uuid\x18f \x01(\fR\x04uuid\x12.\n" +
|
|
||||||
"\tcreatedAt\x18\xc9\x01 \x01(\v2\n" +
|
|
||||||
".TimestampH\x00R\tcreatedAt\x88\x01\x01\",\n" +
|
|
||||||
"\aVersion\x12\x10\n" +
|
|
||||||
"\fVERSION_NONE\x10\x00\x12\x0f\n" +
|
|
||||||
"\vVERSION_ONE\x10\x01B\f\n" +
|
|
||||||
"\n" +
|
|
||||||
"_createdAtB\x1dZ\x1bgit.eeqj.de/sneak/mfer/mferb\x06proto3"
|
|
||||||
|
|
||||||
var (
|
|
||||||
file_mf_proto_rawDescOnce sync.Once
|
|
||||||
file_mf_proto_rawDescData []byte
|
|
||||||
)
|
|
||||||
|
|
||||||
func file_mf_proto_rawDescGZIP() []byte {
|
|
||||||
file_mf_proto_rawDescOnce.Do(func() {
|
|
||||||
file_mf_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_mf_proto_rawDesc), len(file_mf_proto_rawDesc)))
|
|
||||||
})
|
|
||||||
return file_mf_proto_rawDescData
|
|
||||||
}
|
|
||||||
|
|
||||||
var file_mf_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
|
|
||||||
var file_mf_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
|
||||||
var file_mf_proto_goTypes = []any{
|
|
||||||
(MFFileOuter_Version)(0), // 0: MFFileOuter.Version
|
|
||||||
(MFFileOuter_CompressionType)(0), // 1: MFFileOuter.CompressionType
|
|
||||||
(MFFile_Version)(0), // 2: MFFile.Version
|
|
||||||
(*Timestamp)(nil), // 3: Timestamp
|
|
||||||
(*MFFileOuter)(nil), // 4: MFFileOuter
|
|
||||||
(*MFFilePath)(nil), // 5: MFFilePath
|
|
||||||
(*MFFileChecksum)(nil), // 6: MFFileChecksum
|
|
||||||
(*MFFile)(nil), // 7: MFFile
|
|
||||||
}
|
|
||||||
var file_mf_proto_depIdxs = []int32{
|
|
||||||
0, // 0: MFFileOuter.version:type_name -> MFFileOuter.Version
|
|
||||||
1, // 1: MFFileOuter.compressionType:type_name -> MFFileOuter.CompressionType
|
|
||||||
6, // 2: MFFilePath.hashes:type_name -> MFFileChecksum
|
|
||||||
3, // 3: MFFilePath.mtime:type_name -> Timestamp
|
|
||||||
3, // 4: MFFilePath.ctime:type_name -> Timestamp
|
|
||||||
3, // 5: MFFilePath.atime:type_name -> Timestamp
|
|
||||||
2, // 6: MFFile.version:type_name -> MFFile.Version
|
|
||||||
5, // 7: MFFile.files:type_name -> MFFilePath
|
|
||||||
3, // 8: MFFile.createdAt:type_name -> Timestamp
|
|
||||||
9, // [9:9] is the sub-list for method output_type
|
|
||||||
9, // [9:9] is the sub-list for method input_type
|
|
||||||
9, // [9:9] is the sub-list for extension type_name
|
|
||||||
9, // [9:9] is the sub-list for extension extendee
|
|
||||||
0, // [0:9] is the sub-list for field type_name
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { file_mf_proto_init() }
|
|
||||||
func file_mf_proto_init() {
|
|
||||||
if File_mf_proto != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
file_mf_proto_msgTypes[1].OneofWrappers = []any{}
|
|
||||||
file_mf_proto_msgTypes[2].OneofWrappers = []any{}
|
|
||||||
file_mf_proto_msgTypes[4].OneofWrappers = []any{}
|
|
||||||
type x struct{}
|
|
||||||
out := protoimpl.TypeBuilder{
|
|
||||||
File: protoimpl.DescBuilder{
|
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
|
||||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_mf_proto_rawDesc), len(file_mf_proto_rawDesc)),
|
|
||||||
NumEnums: 3,
|
|
||||||
NumMessages: 5,
|
|
||||||
NumExtensions: 0,
|
|
||||||
NumServices: 0,
|
|
||||||
},
|
|
||||||
GoTypes: file_mf_proto_goTypes,
|
|
||||||
DependencyIndexes: file_mf_proto_depIdxs,
|
|
||||||
EnumInfos: file_mf_proto_enumTypes,
|
|
||||||
MessageInfos: file_mf_proto_msgTypes,
|
|
||||||
}.Build()
|
|
||||||
File_mf_proto = out.File
|
|
||||||
file_mf_proto_goTypes = nil
|
|
||||||
file_mf_proto_depIdxs = nil
|
|
||||||
}
|
|
||||||
@@ -1,37 +1,26 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
option go_package = "git.eeqj.de/sneak/mfer/mfer";
|
option go_package = "git.eeqj.de/sneak/mfer";
|
||||||
|
|
||||||
message Timestamp {
|
message Timestamp {
|
||||||
int64 seconds = 1;
|
int64 seconds = 1;
|
||||||
int32 nanos = 2;
|
int32 nanos = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message MFFileOuter {
|
message MFFile {
|
||||||
enum Version {
|
enum Version {
|
||||||
VERSION_NONE = 0;
|
NONE = 0;
|
||||||
VERSION_ONE = 1; // only one for now
|
ONE = 1; // only one for now
|
||||||
}
|
}
|
||||||
|
|
||||||
// required mffile root attributes 1xx
|
// required mffile root attributes 1xx
|
||||||
Version version = 101;
|
Version version = 1;
|
||||||
|
bytes innerMessage = 2;
|
||||||
enum CompressionType {
|
|
||||||
COMPRESSION_NONE = 0;
|
|
||||||
COMPRESSION_ZSTD = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
CompressionType compressionType = 102;
|
|
||||||
|
|
||||||
// these are used solely to detect corruption/truncation
|
// these are used solely to detect corruption/truncation
|
||||||
// and not for cryptographic integrity.
|
// and not for cryptographic integrity.
|
||||||
int64 size = 103;
|
int64 size = 3;
|
||||||
bytes sha256 = 104;
|
bytes sha256 = 4;
|
||||||
|
|
||||||
// uuid must match the uuid in the inner message
|
|
||||||
bytes uuid = 105;
|
|
||||||
|
|
||||||
bytes innerMessage = 199;
|
|
||||||
// 2xx for optional manifest root attributes
|
// 2xx for optional manifest root attributes
|
||||||
// think we might use gosignify instead of gpg:
|
// think we might use gosignify instead of gpg:
|
||||||
// github.com/frankbraun/gosignify
|
// github.com/frankbraun/gosignify
|
||||||
@@ -46,9 +35,6 @@ message MFFileOuter {
|
|||||||
|
|
||||||
message MFFilePath {
|
message MFFilePath {
|
||||||
// required attributes:
|
// required attributes:
|
||||||
// Path invariants: must be valid UTF-8, use forward slashes only,
|
|
||||||
// be relative (no leading /), contain no ".." segments, and no
|
|
||||||
// empty segments (no "//").
|
|
||||||
string path = 1;
|
string path = 1;
|
||||||
int64 size = 2;
|
int64 size = 2;
|
||||||
|
|
||||||
@@ -68,19 +54,15 @@ message MFFileChecksum {
|
|||||||
bytes multiHash = 1;
|
bytes multiHash = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message MFFile {
|
message MFFileInner {
|
||||||
enum Version {
|
enum Version {
|
||||||
VERSION_NONE = 0;
|
NONE = 0;
|
||||||
VERSION_ONE = 1; // only one for now
|
ONE = 1; // only one for now
|
||||||
}
|
}
|
||||||
Version version = 100;
|
Version version = 1;
|
||||||
|
|
||||||
// required manifest attributes:
|
// required manifest attributes:
|
||||||
repeated MFFilePath files = 101;
|
repeated MFFilePath files = 2;
|
||||||
|
|
||||||
// uuid is a random v4 UUID generated when creating the manifest
|
|
||||||
// used as part of the signature to prevent replay attacks
|
|
||||||
bytes uuid = 102;
|
|
||||||
|
|
||||||
// optional manifest attributes 2xx:
|
// optional manifest attributes 2xx:
|
||||||
optional Timestamp createdAt = 201;
|
optional Timestamp createdAt = 201;
|
||||||
|
|||||||
13
mfer/mfer_test.go
Normal file
13
mfer/mfer_test.go
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
package mfer
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
// Add those variables as well
|
||||||
|
var (
|
||||||
|
existingFolder = "./testdata/a-folder-that-exists"
|
||||||
|
nonExistingFolder = "./testdata/a-folder-that-does-not-exists"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestManifestGeneration(t *testing.T) {
|
||||||
|
|
||||||
|
}
|
||||||
430
mfer/scanner.go
430
mfer/scanner.go
@@ -1,430 +0,0 @@
|
|||||||
package mfer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"io/fs"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/dustin/go-humanize"
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"sneak.berlin/go/mfer/internal/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Phase 1: Enumeration
|
|
||||||
// ---------------------
|
|
||||||
// Walking directories and calling stat() on files to collect metadata.
|
|
||||||
// Builds the list of files to be scanned. Relatively fast (metadata only).
|
|
||||||
|
|
||||||
// EnumerateStatus contains progress information for the enumeration phase.
|
|
||||||
type EnumerateStatus struct {
|
|
||||||
FilesFound FileCount // Number of files discovered so far
|
|
||||||
BytesFound FileSize // Total size of discovered files (from stat)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 2: Scan (ToManifest)
|
|
||||||
// --------------------------
|
|
||||||
// Reading file contents and computing hashes for manifest generation.
|
|
||||||
// This is the expensive phase that reads all file data.
|
|
||||||
|
|
||||||
// ScanStatus contains progress information for the scan phase.
|
|
||||||
type ScanStatus struct {
|
|
||||||
TotalFiles FileCount // Total number of files to scan
|
|
||||||
ScannedFiles FileCount // Number of files scanned so far
|
|
||||||
TotalBytes FileSize // Total bytes to read (sum of all file sizes)
|
|
||||||
ScannedBytes FileSize // Bytes read so far
|
|
||||||
BytesPerSec float64 // Current throughput rate
|
|
||||||
ETA time.Duration // Estimated time to completion
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScannerOptions configures scanner behavior.
|
|
||||||
type ScannerOptions struct {
|
|
||||||
IncludeDotfiles bool // Include files and directories starting with a dot (default: exclude)
|
|
||||||
FollowSymLinks bool // Resolve symlinks instead of skipping them
|
|
||||||
Fs afero.Fs // Filesystem to use, defaults to OsFs if nil
|
|
||||||
SigningOptions *SigningOptions // GPG signing options (nil = no signing)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileEntry represents a file that has been enumerated.
|
|
||||||
type FileEntry struct {
|
|
||||||
Path RelFilePath // Relative path (used in manifest)
|
|
||||||
AbsPath AbsFilePath // Absolute path (used for reading file content)
|
|
||||||
Size FileSize // File size in bytes
|
|
||||||
Mtime ModTime // Last modification time
|
|
||||||
Ctime time.Time // Creation time (platform-dependent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scanner accumulates files and generates manifests from them.
|
|
||||||
type Scanner struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
files []*FileEntry
|
|
||||||
totalBytes FileSize // cached sum of all file sizes
|
|
||||||
options *ScannerOptions
|
|
||||||
fs afero.Fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewScanner creates a new Scanner with default options.
|
|
||||||
func NewScanner() *Scanner {
|
|
||||||
return NewScannerWithOptions(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewScannerWithOptions creates a new Scanner with the given options.
|
|
||||||
func NewScannerWithOptions(opts *ScannerOptions) *Scanner {
|
|
||||||
if opts == nil {
|
|
||||||
opts = &ScannerOptions{}
|
|
||||||
}
|
|
||||||
fs := opts.Fs
|
|
||||||
if fs == nil {
|
|
||||||
fs = afero.NewOsFs()
|
|
||||||
}
|
|
||||||
return &Scanner{
|
|
||||||
files: make([]*FileEntry, 0),
|
|
||||||
options: opts,
|
|
||||||
fs: fs,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnumerateFile adds a single file to the scanner, calling stat() to get metadata.
|
|
||||||
func (s *Scanner) EnumerateFile(filePath string) error {
|
|
||||||
abs, err := filepath.Abs(filePath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
info, err := s.fs.Stat(abs)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// For single files, use the filename as the relative path
|
|
||||||
basePath := filepath.Dir(abs)
|
|
||||||
return s.enumerateFileWithInfo(filepath.Base(abs), basePath, info, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnumeratePath walks a directory path and adds all files to the scanner.
|
|
||||||
// If progress is non-nil, status updates are sent as files are discovered.
|
|
||||||
// The progress channel is closed when the method returns.
|
|
||||||
func (s *Scanner) EnumeratePath(inputPath string, progress chan<- EnumerateStatus) error {
|
|
||||||
if progress != nil {
|
|
||||||
defer close(progress)
|
|
||||||
}
|
|
||||||
abs, err := filepath.Abs(inputPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
afs := afero.NewReadOnlyFs(afero.NewBasePathFs(s.fs, abs))
|
|
||||||
return s.enumerateFS(afs, abs, progress)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnumeratePaths walks multiple directory paths and adds all files to the scanner.
|
|
||||||
// If progress is non-nil, status updates are sent as files are discovered.
|
|
||||||
// The progress channel is closed when the method returns.
|
|
||||||
func (s *Scanner) EnumeratePaths(progress chan<- EnumerateStatus, inputPaths ...string) error {
|
|
||||||
if progress != nil {
|
|
||||||
defer close(progress)
|
|
||||||
}
|
|
||||||
for _, p := range inputPaths {
|
|
||||||
abs, err := filepath.Abs(p)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
afs := afero.NewReadOnlyFs(afero.NewBasePathFs(s.fs, abs))
|
|
||||||
if err := s.enumerateFS(afs, abs, progress); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnumerateFS walks an afero filesystem and adds all files to the scanner.
|
|
||||||
// If progress is non-nil, status updates are sent as files are discovered.
|
|
||||||
// The progress channel is closed when the method returns.
|
|
||||||
// basePath is used to compute absolute paths for file reading.
|
|
||||||
func (s *Scanner) EnumerateFS(afs afero.Fs, basePath string, progress chan<- EnumerateStatus) error {
|
|
||||||
if progress != nil {
|
|
||||||
defer close(progress)
|
|
||||||
}
|
|
||||||
return s.enumerateFS(afs, basePath, progress)
|
|
||||||
}
|
|
||||||
|
|
||||||
// enumerateFS is the internal implementation that doesn't close the progress channel.
|
|
||||||
func (s *Scanner) enumerateFS(afs afero.Fs, basePath string, progress chan<- EnumerateStatus) error {
|
|
||||||
return afero.Walk(afs, "/", func(p string, info fs.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !s.options.IncludeDotfiles && IsHiddenPath(p) {
|
|
||||||
if info.IsDir() {
|
|
||||||
return filepath.SkipDir
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return s.enumerateFileWithInfo(p, basePath, info, progress)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// enumerateFileWithInfo adds a file with pre-existing fs.FileInfo.
|
|
||||||
func (s *Scanner) enumerateFileWithInfo(filePath string, basePath string, info fs.FileInfo, progress chan<- EnumerateStatus) error {
|
|
||||||
if info.IsDir() {
|
|
||||||
// Manifests contain only files, directories are implied
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean the path - remove leading slash if present
|
|
||||||
cleanPath := filePath
|
|
||||||
if len(cleanPath) > 0 && cleanPath[0] == '/' {
|
|
||||||
cleanPath = cleanPath[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute absolute path for file reading
|
|
||||||
absPath := filepath.Join(basePath, cleanPath)
|
|
||||||
|
|
||||||
// Handle symlinks
|
|
||||||
if info.Mode()&fs.ModeSymlink != 0 {
|
|
||||||
if !s.options.FollowSymLinks {
|
|
||||||
// Skip symlinks when not following them
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Resolve symlink to get real file info
|
|
||||||
realPath, err := filepath.EvalSymlinks(absPath)
|
|
||||||
if err != nil {
|
|
||||||
// Skip broken symlinks
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
realInfo, err := s.fs.Stat(realPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Skip if symlink points to a directory
|
|
||||||
if realInfo.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Use resolved path for reading, but keep original path in manifest
|
|
||||||
absPath = realPath
|
|
||||||
info = realInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
entry := &FileEntry{
|
|
||||||
Path: RelFilePath(cleanPath),
|
|
||||||
AbsPath: AbsFilePath(absPath),
|
|
||||||
Size: FileSize(info.Size()),
|
|
||||||
Mtime: ModTime(info.ModTime()),
|
|
||||||
// Note: Ctime not available from fs.FileInfo on all platforms
|
|
||||||
// Will need platform-specific code to extract it
|
|
||||||
}
|
|
||||||
|
|
||||||
s.mu.Lock()
|
|
||||||
s.files = append(s.files, entry)
|
|
||||||
s.totalBytes += entry.Size
|
|
||||||
filesFound := FileCount(len(s.files))
|
|
||||||
bytesFound := s.totalBytes
|
|
||||||
s.mu.Unlock()
|
|
||||||
|
|
||||||
sendEnumerateStatus(progress, EnumerateStatus{
|
|
||||||
FilesFound: filesFound,
|
|
||||||
BytesFound: bytesFound,
|
|
||||||
})
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Files returns a copy of all files added to the scanner.
|
|
||||||
func (s *Scanner) Files() []*FileEntry {
|
|
||||||
s.mu.RLock()
|
|
||||||
defer s.mu.RUnlock()
|
|
||||||
out := make([]*FileEntry, len(s.files))
|
|
||||||
copy(out, s.files)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileCount returns the number of files in the scanner.
|
|
||||||
func (s *Scanner) FileCount() FileCount {
|
|
||||||
s.mu.RLock()
|
|
||||||
defer s.mu.RUnlock()
|
|
||||||
return FileCount(len(s.files))
|
|
||||||
}
|
|
||||||
|
|
||||||
// TotalBytes returns the total size of all files in the scanner.
|
|
||||||
func (s *Scanner) TotalBytes() FileSize {
|
|
||||||
s.mu.RLock()
|
|
||||||
defer s.mu.RUnlock()
|
|
||||||
return s.totalBytes
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToManifest reads all file contents, computes hashes, and generates a manifest.
|
|
||||||
// If progress is non-nil, status updates are sent approximately once per second.
|
|
||||||
// The progress channel is closed when the method returns.
|
|
||||||
// The manifest is written to the provided io.Writer.
|
|
||||||
func (s *Scanner) ToManifest(ctx context.Context, w io.Writer, progress chan<- ScanStatus) error {
|
|
||||||
if progress != nil {
|
|
||||||
defer close(progress)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.mu.RLock()
|
|
||||||
files := make([]*FileEntry, len(s.files))
|
|
||||||
copy(files, s.files)
|
|
||||||
totalFiles := FileCount(len(files))
|
|
||||||
var totalBytes FileSize
|
|
||||||
for _, f := range files {
|
|
||||||
totalBytes += f.Size
|
|
||||||
}
|
|
||||||
s.mu.RUnlock()
|
|
||||||
|
|
||||||
builder := NewBuilder()
|
|
||||||
if s.options.SigningOptions != nil {
|
|
||||||
builder.SetSigningOptions(s.options.SigningOptions)
|
|
||||||
}
|
|
||||||
|
|
||||||
var scannedFiles FileCount
|
|
||||||
var scannedBytes FileSize
|
|
||||||
lastProgressTime := time.Now()
|
|
||||||
startTime := time.Now()
|
|
||||||
|
|
||||||
for _, entry := range files {
|
|
||||||
// Check for cancellation
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open file
|
|
||||||
f, err := s.fs.Open(string(entry.AbsPath))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create progress channel for this file
|
|
||||||
var fileProgress chan FileHashProgress
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
if progress != nil {
|
|
||||||
fileProgress = make(chan FileHashProgress, 1)
|
|
||||||
wg.Add(1)
|
|
||||||
go func(baseScannedBytes FileSize) {
|
|
||||||
defer wg.Done()
|
|
||||||
for p := range fileProgress {
|
|
||||||
// Send progress at most once per second
|
|
||||||
now := time.Now()
|
|
||||||
if now.Sub(lastProgressTime) >= time.Second {
|
|
||||||
elapsed := now.Sub(startTime).Seconds()
|
|
||||||
currentBytes := baseScannedBytes + p.BytesRead
|
|
||||||
var rate float64
|
|
||||||
var eta time.Duration
|
|
||||||
if elapsed > 0 && currentBytes > 0 {
|
|
||||||
rate = float64(currentBytes) / elapsed
|
|
||||||
remainingBytes := totalBytes - currentBytes
|
|
||||||
if rate > 0 {
|
|
||||||
eta = time.Duration(float64(remainingBytes)/rate) * time.Second
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sendScanStatus(progress, ScanStatus{
|
|
||||||
TotalFiles: totalFiles,
|
|
||||||
ScannedFiles: scannedFiles,
|
|
||||||
TotalBytes: totalBytes,
|
|
||||||
ScannedBytes: currentBytes,
|
|
||||||
BytesPerSec: rate,
|
|
||||||
ETA: eta,
|
|
||||||
})
|
|
||||||
lastProgressTime = now
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}(scannedBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add to manifest with progress channel
|
|
||||||
bytesRead, err := builder.AddFile(
|
|
||||||
entry.Path,
|
|
||||||
entry.Size,
|
|
||||||
entry.Mtime,
|
|
||||||
f,
|
|
||||||
fileProgress,
|
|
||||||
)
|
|
||||||
_ = f.Close()
|
|
||||||
|
|
||||||
// Close channel and wait for goroutine to finish
|
|
||||||
if fileProgress != nil {
|
|
||||||
close(fileProgress)
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Verbosef("+ %s (%s)", entry.Path, humanize.IBytes(uint64(bytesRead)))
|
|
||||||
|
|
||||||
scannedFiles++
|
|
||||||
scannedBytes += bytesRead
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send final progress (ETA is 0 at completion)
|
|
||||||
if progress != nil {
|
|
||||||
elapsed := time.Since(startTime).Seconds()
|
|
||||||
var rate float64
|
|
||||||
if elapsed > 0 {
|
|
||||||
rate = float64(scannedBytes) / elapsed
|
|
||||||
}
|
|
||||||
sendScanStatus(progress, ScanStatus{
|
|
||||||
TotalFiles: totalFiles,
|
|
||||||
ScannedFiles: scannedFiles,
|
|
||||||
TotalBytes: totalBytes,
|
|
||||||
ScannedBytes: scannedBytes,
|
|
||||||
BytesPerSec: rate,
|
|
||||||
ETA: 0,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build and write manifest
|
|
||||||
return builder.Build(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsHiddenPath returns true if the path or any of its parent directories
|
|
||||||
// start with a dot (hidden files/directories).
|
|
||||||
// The path should use forward slashes.
|
|
||||||
func IsHiddenPath(p string) bool {
|
|
||||||
tp := path.Clean(p)
|
|
||||||
if tp == "." || tp == "/" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(tp, ".") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
d, f := path.Split(tp)
|
|
||||||
if strings.HasPrefix(f, ".") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if d == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
tp = d[0 : len(d)-1] // trim trailing slash from dir
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendEnumerateStatus sends a status update without blocking.
|
|
||||||
// If the channel is full, the update is dropped.
|
|
||||||
func sendEnumerateStatus(ch chan<- EnumerateStatus, status EnumerateStatus) {
|
|
||||||
if ch == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case ch <- status:
|
|
||||||
default:
|
|
||||||
// Channel full, drop this update
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendScanStatus sends a status update without blocking.
|
|
||||||
// If the channel is full, the update is dropped.
|
|
||||||
func sendScanStatus(ch chan<- ScanStatus, status ScanStatus) {
|
|
||||||
if ch == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case ch <- status:
|
|
||||||
default:
|
|
||||||
// Channel full, drop this update
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,364 +0,0 @@
|
|||||||
package mfer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewScanner(t *testing.T) {
|
|
||||||
s := NewScanner()
|
|
||||||
assert.NotNil(t, s)
|
|
||||||
assert.Equal(t, FileCount(0), s.FileCount())
|
|
||||||
assert.Equal(t, FileSize(0), s.TotalBytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewScannerWithOptions(t *testing.T) {
|
|
||||||
t.Run("nil options", func(t *testing.T) {
|
|
||||||
s := NewScannerWithOptions(nil)
|
|
||||||
assert.NotNil(t, s)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("with options", func(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
opts := &ScannerOptions{
|
|
||||||
IncludeDotfiles: true,
|
|
||||||
FollowSymLinks: true,
|
|
||||||
Fs: fs,
|
|
||||||
}
|
|
||||||
s := NewScannerWithOptions(opts)
|
|
||||||
assert.NotNil(t, s)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerEnumerateFile(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/test.txt", []byte("hello world"), 0o644))
|
|
||||||
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs})
|
|
||||||
err := s.EnumerateFile("/test.txt")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, FileCount(1), s.FileCount())
|
|
||||||
assert.Equal(t, FileSize(11), s.TotalBytes())
|
|
||||||
|
|
||||||
files := s.Files()
|
|
||||||
require.Len(t, files, 1)
|
|
||||||
assert.Equal(t, RelFilePath("test.txt"), files[0].Path)
|
|
||||||
assert.Equal(t, FileSize(11), files[0].Size)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerEnumerateFileMissing(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs})
|
|
||||||
err := s.EnumerateFile("/nonexistent.txt")
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerEnumeratePath(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir/subdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("one"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file2.txt", []byte("two"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/subdir/file3.txt", []byte("three"), 0o644))
|
|
||||||
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs})
|
|
||||||
err := s.EnumeratePath("/testdir", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, FileCount(3), s.FileCount())
|
|
||||||
assert.Equal(t, FileSize(3+3+5), s.TotalBytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerEnumeratePathWithProgress(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("one"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file2.txt", []byte("two"), 0o644))
|
|
||||||
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs})
|
|
||||||
progress := make(chan EnumerateStatus, 10)
|
|
||||||
|
|
||||||
err := s.EnumeratePath("/testdir", progress)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var updates []EnumerateStatus
|
|
||||||
for p := range progress {
|
|
||||||
updates = append(updates, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.NotEmpty(t, updates)
|
|
||||||
// Final update should show all files
|
|
||||||
final := updates[len(updates)-1]
|
|
||||||
assert.Equal(t, FileCount(2), final.FilesFound)
|
|
||||||
assert.Equal(t, FileSize(6), final.BytesFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerEnumeratePaths(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
require.NoError(t, fs.MkdirAll("/dir1", 0o755))
|
|
||||||
require.NoError(t, fs.MkdirAll("/dir2", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/dir1/a.txt", []byte("aaa"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/dir2/b.txt", []byte("bbb"), 0o644))
|
|
||||||
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs})
|
|
||||||
err := s.EnumeratePaths(nil, "/dir1", "/dir2")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, FileCount(2), s.FileCount())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerExcludeDotfiles(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir/.hidden", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/visible.txt", []byte("visible"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/.hidden.txt", []byte("hidden"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/.hidden/inside.txt", []byte("inside"), 0o644))
|
|
||||||
|
|
||||||
t.Run("exclude by default", func(t *testing.T) {
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs, IncludeDotfiles: false})
|
|
||||||
err := s.EnumeratePath("/testdir", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, FileCount(1), s.FileCount())
|
|
||||||
files := s.Files()
|
|
||||||
assert.Equal(t, RelFilePath("visible.txt"), files[0].Path)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("include when enabled", func(t *testing.T) {
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs, IncludeDotfiles: true})
|
|
||||||
err := s.EnumeratePath("/testdir", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, FileCount(3), s.FileCount())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerToManifest(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file1.txt", []byte("content one"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file2.txt", []byte("content two"), 0o644))
|
|
||||||
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs})
|
|
||||||
err := s.EnumeratePath("/testdir", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
err = s.ToManifest(context.Background(), &buf, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Manifest should have magic bytes
|
|
||||||
assert.True(t, buf.Len() > 0)
|
|
||||||
assert.Equal(t, MAGIC, string(buf.Bytes()[:8]))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerToManifestWithProgress(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file.txt", bytes.Repeat([]byte("x"), 1000), 0o644))
|
|
||||||
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs})
|
|
||||||
err := s.EnumeratePath("/testdir", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
progress := make(chan ScanStatus, 10)
|
|
||||||
|
|
||||||
err = s.ToManifest(context.Background(), &buf, progress)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var updates []ScanStatus
|
|
||||||
for p := range progress {
|
|
||||||
updates = append(updates, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.NotEmpty(t, updates)
|
|
||||||
// Final update should show completion
|
|
||||||
final := updates[len(updates)-1]
|
|
||||||
assert.Equal(t, FileCount(1), final.TotalFiles)
|
|
||||||
assert.Equal(t, FileCount(1), final.ScannedFiles)
|
|
||||||
assert.Equal(t, FileSize(1000), final.TotalBytes)
|
|
||||||
assert.Equal(t, FileSize(1000), final.ScannedBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerToManifestContextCancellation(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
// Create many files to ensure we have time to cancel
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
name := string(rune('a'+i%26)) + string(rune('0'+i/26)) + ".txt"
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/"+name, bytes.Repeat([]byte("x"), 100), 0o644))
|
|
||||||
}
|
|
||||||
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs})
|
|
||||||
err := s.EnumeratePath("/testdir", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
cancel() // Cancel immediately
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
err = s.ToManifest(ctx, &buf, nil)
|
|
||||||
assert.ErrorIs(t, err, context.Canceled)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerToManifestEmptyScanner(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs})
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
err := s.ToManifest(context.Background(), &buf, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Should still produce a valid manifest
|
|
||||||
assert.True(t, buf.Len() > 0)
|
|
||||||
assert.Equal(t, MAGIC, string(buf.Bytes()[:8]))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerFilesCopiesSlice(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/test.txt", []byte("hello"), 0o644))
|
|
||||||
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs})
|
|
||||||
require.NoError(t, s.EnumerateFile("/test.txt"))
|
|
||||||
|
|
||||||
files1 := s.Files()
|
|
||||||
files2 := s.Files()
|
|
||||||
|
|
||||||
// Should be different slices
|
|
||||||
assert.NotSame(t, &files1[0], &files2[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerEnumerateFS(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir/sub", 0o755))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/file.txt", []byte("hello"), 0o644))
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/testdir/sub/nested.txt", []byte("world"), 0o644))
|
|
||||||
|
|
||||||
// Create a basepath filesystem
|
|
||||||
baseFs := afero.NewBasePathFs(fs, "/testdir")
|
|
||||||
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs})
|
|
||||||
err := s.EnumerateFS(baseFs, "/testdir", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, FileCount(2), s.FileCount())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSendEnumerateStatusNonBlocking(t *testing.T) {
|
|
||||||
// Channel with no buffer - send should not block
|
|
||||||
ch := make(chan EnumerateStatus)
|
|
||||||
|
|
||||||
// This should not block
|
|
||||||
done := make(chan bool)
|
|
||||||
go func() {
|
|
||||||
sendEnumerateStatus(ch, EnumerateStatus{FilesFound: 1})
|
|
||||||
done <- true
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
// Success - did not block
|
|
||||||
case <-time.After(100 * time.Millisecond):
|
|
||||||
t.Fatal("sendEnumerateStatus blocked on full channel")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSendScanStatusNonBlocking(t *testing.T) {
|
|
||||||
// Channel with no buffer - send should not block
|
|
||||||
ch := make(chan ScanStatus)
|
|
||||||
|
|
||||||
done := make(chan bool)
|
|
||||||
go func() {
|
|
||||||
sendScanStatus(ch, ScanStatus{ScannedFiles: 1})
|
|
||||||
done <- true
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
// Success - did not block
|
|
||||||
case <-time.After(100 * time.Millisecond):
|
|
||||||
t.Fatal("sendScanStatus blocked on full channel")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSendStatusNilChannel(t *testing.T) {
|
|
||||||
// Should not panic with nil channel
|
|
||||||
sendEnumerateStatus(nil, EnumerateStatus{})
|
|
||||||
sendScanStatus(nil, ScanStatus{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerFileEntryFields(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
now := time.Now().Truncate(time.Second)
|
|
||||||
require.NoError(t, afero.WriteFile(fs, "/test.txt", []byte("content"), 0o644))
|
|
||||||
require.NoError(t, fs.Chtimes("/test.txt", now, now))
|
|
||||||
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs})
|
|
||||||
require.NoError(t, s.EnumerateFile("/test.txt"))
|
|
||||||
|
|
||||||
files := s.Files()
|
|
||||||
require.Len(t, files, 1)
|
|
||||||
|
|
||||||
entry := files[0]
|
|
||||||
assert.Equal(t, RelFilePath("test.txt"), entry.Path)
|
|
||||||
assert.Contains(t, string(entry.AbsPath), "test.txt")
|
|
||||||
assert.Equal(t, FileSize(7), entry.Size)
|
|
||||||
// Mtime should be set (within a second of now)
|
|
||||||
assert.WithinDuration(t, now, time.Time(entry.Mtime), 2*time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestScannerLargeFileEnumeration(t *testing.T) {
|
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
require.NoError(t, fs.MkdirAll("/testdir", 0o755))
|
|
||||||
|
|
||||||
// Create 100 files
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
name := "/testdir/" + string(rune('a'+i%26)) + string(rune('0'+i/26%10)) + ".txt"
|
|
||||||
require.NoError(t, afero.WriteFile(fs, name, []byte("data"), 0o644))
|
|
||||||
}
|
|
||||||
|
|
||||||
s := NewScannerWithOptions(&ScannerOptions{Fs: fs})
|
|
||||||
progress := make(chan EnumerateStatus, 200)
|
|
||||||
|
|
||||||
err := s.EnumeratePath("/testdir", progress)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Drain channel
|
|
||||||
for range progress {
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, FileCount(100), s.FileCount())
|
|
||||||
assert.Equal(t, FileSize(400), s.TotalBytes()) // 100 * 4 bytes
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIsHiddenPath(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
path string
|
|
||||||
hidden bool
|
|
||||||
}{
|
|
||||||
{"file.txt", false},
|
|
||||||
{".hidden", true},
|
|
||||||
{"dir/file.txt", false},
|
|
||||||
{"dir/.hidden", true},
|
|
||||||
{".dir/file.txt", true},
|
|
||||||
{"/absolute/path", false},
|
|
||||||
{"/absolute/.hidden", true},
|
|
||||||
{"./relative", false}, // path.Clean removes leading ./
|
|
||||||
{"a/b/c/.d/e", true},
|
|
||||||
{".", false}, // current directory is not hidden
|
|
||||||
{"/", false}, // root is not hidden
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.path, func(t *testing.T) {
|
|
||||||
assert.Equal(t, tt.hidden, IsHiddenPath(tt.path), "IsHiddenPath(%q)", tt.path)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,118 +0,0 @@
|
|||||||
package mfer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/sha256"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MAGIC is the file format magic bytes prefix (rot13 of "MANIFEST").
|
|
||||||
const MAGIC string = "ZNAVSRFG"
|
|
||||||
|
|
||||||
func newTimestampFromTime(t time.Time) *Timestamp {
|
|
||||||
return &Timestamp{
|
|
||||||
Seconds: t.Unix(),
|
|
||||||
Nanos: int32(t.Nanosecond()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manifest) generate() error {
|
|
||||||
if m.pbInner == nil {
|
|
||||||
return errors.New("internal error: pbInner not set")
|
|
||||||
}
|
|
||||||
if m.pbOuter == nil {
|
|
||||||
e := m.generateOuter()
|
|
||||||
if e != nil {
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dat, err := proto.MarshalOptions{Deterministic: true}.Marshal(m.pbOuter)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.output = bytes.NewBuffer([]byte(MAGIC))
|
|
||||||
_, err = m.output.Write(dat)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manifest) generateOuter() error {
|
|
||||||
if m.pbInner == nil {
|
|
||||||
return errors.New("internal error")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate UUID and set on inner message
|
|
||||||
manifestUUID := uuid.New()
|
|
||||||
m.pbInner.Uuid = manifestUUID[:]
|
|
||||||
|
|
||||||
innerData, err := proto.MarshalOptions{Deterministic: true}.Marshal(m.pbInner)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compress the inner data
|
|
||||||
idc := new(bytes.Buffer)
|
|
||||||
zw, err := zstd.NewWriter(idc, zstd.WithEncoderLevel(zstd.SpeedBestCompression))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = zw.Write(innerData)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_ = zw.Close()
|
|
||||||
|
|
||||||
compressedData := idc.Bytes()
|
|
||||||
|
|
||||||
// Hash the compressed data for integrity verification before decompression
|
|
||||||
h := sha256.New()
|
|
||||||
if _, err := h.Write(compressedData); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sha256Hash := h.Sum(nil)
|
|
||||||
|
|
||||||
m.pbOuter = &MFFileOuter{
|
|
||||||
InnerMessage: compressedData,
|
|
||||||
Size: int64(len(innerData)),
|
|
||||||
Sha256: sha256Hash,
|
|
||||||
Uuid: manifestUUID[:],
|
|
||||||
Version: MFFileOuter_VERSION_ONE,
|
|
||||||
CompressionType: MFFileOuter_COMPRESSION_ZSTD,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign the manifest if signing options are provided
|
|
||||||
if m.signingOptions != nil && m.signingOptions.KeyID != "" {
|
|
||||||
sigString, err := m.signatureString()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to generate signature string: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sig, err := gpgSign([]byte(sigString), m.signingOptions.KeyID)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to sign manifest: %w", err)
|
|
||||||
}
|
|
||||||
m.pbOuter.Signature = sig
|
|
||||||
|
|
||||||
fingerprint, err := gpgGetKeyFingerprint(m.signingOptions.KeyID)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to get key fingerprint: %w", err)
|
|
||||||
}
|
|
||||||
m.pbOuter.Signer = fingerprint
|
|
||||||
|
|
||||||
pubKey, err := gpgExportPublicKey(m.signingOptions.KeyID)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to export public key: %w", err)
|
|
||||||
}
|
|
||||||
m.pbOuter.SigningPubKey = pubKey
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
53
mfer/url.go
53
mfer/url.go
@@ -1,53 +0,0 @@
|
|||||||
package mfer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ManifestURL represents a URL pointing to a manifest file.
|
|
||||||
type ManifestURL string
|
|
||||||
|
|
||||||
// FileURL represents a URL pointing to a file to be fetched.
|
|
||||||
type FileURL string
|
|
||||||
|
|
||||||
// BaseURL represents a base URL for constructing file URLs.
|
|
||||||
type BaseURL string
|
|
||||||
|
|
||||||
// JoinPath safely joins a relative file path to a base URL.
|
|
||||||
// The path is properly URL-encoded to prevent path traversal.
|
|
||||||
func (b BaseURL) JoinPath(path RelFilePath) (FileURL, error) {
|
|
||||||
base, err := url.Parse(string(b))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure base path ends with /
|
|
||||||
if !strings.HasSuffix(base.Path, "/") {
|
|
||||||
base.Path += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse and encode the relative path
|
|
||||||
ref, err := url.Parse(url.PathEscape(string(path)))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
resolved := base.ResolveReference(ref)
|
|
||||||
return FileURL(resolved.String()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the URL as a string.
|
|
||||||
func (b BaseURL) String() string {
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the URL as a string.
|
|
||||||
func (f FileURL) String() string {
|
|
||||||
return string(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the URL as a string.
|
|
||||||
func (m ManifestURL) String() string {
|
|
||||||
return string(m)
|
|
||||||
}
|
|
||||||
BIN
modcache.tzst
BIN
modcache.tzst
Binary file not shown.
BIN
vendor.tzst
BIN
vendor.tzst
Binary file not shown.
Reference in New Issue
Block a user