vendor updates
This commit is contained in:
parent
6ba6eb876b
commit
4ac80cfcec
2
vendor/github.com/spf13/afero/.gitignore
generated
vendored
Normal file
2
vendor/github.com/spf13/afero/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
sftpfs/file1
|
||||||
|
sftpfs/test/
|
26
vendor/github.com/spf13/afero/.travis.yml
generated
vendored
Normal file
26
vendor/github.com/spf13/afero/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
sudo: false
|
||||||
|
language: go
|
||||||
|
arch:
|
||||||
|
- amd64
|
||||||
|
- ppc64e
|
||||||
|
|
||||||
|
go:
|
||||||
|
- "1.14"
|
||||||
|
- "1.15"
|
||||||
|
- "1.16"
|
||||||
|
- tip
|
||||||
|
|
||||||
|
os:
|
||||||
|
- linux
|
||||||
|
- osx
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
|
fast_finish: true
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go build -v ./...
|
||||||
|
- go test -count=1 -cover -race -v ./...
|
||||||
|
- go vet ./...
|
||||||
|
- FILES=$(gofmt -s -l . zipfs sftpfs mem tarfs); if [[ -n "${FILES}" ]]; then echo "You have go format errors; gofmt your changes"; exit 1; fi
|
174
vendor/github.com/spf13/afero/LICENSE.txt
generated
vendored
Normal file
174
vendor/github.com/spf13/afero/LICENSE.txt
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
442
vendor/github.com/spf13/afero/README.md
generated
vendored
Normal file
442
vendor/github.com/spf13/afero/README.md
generated
vendored
Normal file
@ -0,0 +1,442 @@
|
|||||||
|
![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png)
|
||||||
|
|
||||||
|
A FileSystem Abstraction System for Go
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||||
|
|
||||||
|
# Overview
|
||||||
|
|
||||||
|
Afero is a filesystem framework providing a simple, uniform and universal API
|
||||||
|
interacting with any filesystem, as an abstraction layer providing interfaces,
|
||||||
|
types and methods. Afero has an exceptionally clean interface and simple design
|
||||||
|
without needless constructors or initialization methods.
|
||||||
|
|
||||||
|
Afero is also a library providing a base set of interoperable backend
|
||||||
|
filesystems that make it easy to work with afero while retaining all the power
|
||||||
|
and benefit of the os and ioutil packages.
|
||||||
|
|
||||||
|
Afero provides significant improvements over using the os package alone, most
|
||||||
|
notably the ability to create mock and testing filesystems without relying on the disk.
|
||||||
|
|
||||||
|
It is suitable for use in any situation where you would consider using the OS
|
||||||
|
package as it provides an additional abstraction that makes it easy to use a
|
||||||
|
memory backed file system during testing. It also adds support for the http
|
||||||
|
filesystem for full interoperability.
|
||||||
|
|
||||||
|
|
||||||
|
## Afero Features
|
||||||
|
|
||||||
|
* A single consistent API for accessing a variety of filesystems
|
||||||
|
* Interoperation between a variety of file system types
|
||||||
|
* A set of interfaces to encourage and enforce interoperability between backends
|
||||||
|
* An atomic cross platform memory backed file system
|
||||||
|
* Support for compositional (union) file systems by combining multiple file systems acting as one
|
||||||
|
* Specialized backends which modify existing filesystems (Read Only, Regexp filtered)
|
||||||
|
* A set of utility functions ported from io, ioutil & hugo to be afero aware
|
||||||
|
* Wrapper for go 1.16 filesystem abstraction `io/fs.FS`
|
||||||
|
|
||||||
|
# Using Afero
|
||||||
|
|
||||||
|
Afero is easy to use and easier to adopt.
|
||||||
|
|
||||||
|
A few different ways you could use Afero:
|
||||||
|
|
||||||
|
* Use the interfaces alone to define your own file system.
|
||||||
|
* Wrapper for the OS packages.
|
||||||
|
* Define different filesystems for different parts of your application.
|
||||||
|
* Use Afero for mock filesystems while testing
|
||||||
|
|
||||||
|
## Step 1: Install Afero
|
||||||
|
|
||||||
|
First use go get to install the latest version of the library.
|
||||||
|
|
||||||
|
$ go get github.com/spf13/afero
|
||||||
|
|
||||||
|
Next include Afero in your application.
|
||||||
|
```go
|
||||||
|
import "github.com/spf13/afero"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Step 2: Declare a backend
|
||||||
|
|
||||||
|
First define a package variable and set it to a pointer to a filesystem.
|
||||||
|
```go
|
||||||
|
var AppFs = afero.NewMemMapFs()
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
var AppFs = afero.NewOsFs()
|
||||||
|
```
|
||||||
|
It is important to note that if you repeat the composite literal you
|
||||||
|
will be using a completely new and isolated filesystem. In the case of
|
||||||
|
OsFs it will still use the same underlying filesystem but will reduce
|
||||||
|
the ability to drop in other filesystems as desired.
|
||||||
|
|
||||||
|
## Step 3: Use it like you would the OS package
|
||||||
|
|
||||||
|
Throughout your application use any function and method like you normally
|
||||||
|
would.
|
||||||
|
|
||||||
|
So if my application before had:
|
||||||
|
```go
|
||||||
|
os.Open("/tmp/foo")
|
||||||
|
```
|
||||||
|
We would replace it with:
|
||||||
|
```go
|
||||||
|
AppFs.Open("/tmp/foo")
|
||||||
|
```
|
||||||
|
|
||||||
|
`AppFs` being the variable we defined above.
|
||||||
|
|
||||||
|
|
||||||
|
## List of all available functions
|
||||||
|
|
||||||
|
File System Methods Available:
|
||||||
|
```go
|
||||||
|
Chmod(name string, mode os.FileMode) : error
|
||||||
|
Chown(name string, uid, gid int) : error
|
||||||
|
Chtimes(name string, atime time.Time, mtime time.Time) : error
|
||||||
|
Create(name string) : File, error
|
||||||
|
Mkdir(name string, perm os.FileMode) : error
|
||||||
|
MkdirAll(path string, perm os.FileMode) : error
|
||||||
|
Name() : string
|
||||||
|
Open(name string) : File, error
|
||||||
|
OpenFile(name string, flag int, perm os.FileMode) : File, error
|
||||||
|
Remove(name string) : error
|
||||||
|
RemoveAll(path string) : error
|
||||||
|
Rename(oldname, newname string) : error
|
||||||
|
Stat(name string) : os.FileInfo, error
|
||||||
|
```
|
||||||
|
File Interfaces and Methods Available:
|
||||||
|
```go
|
||||||
|
io.Closer
|
||||||
|
io.Reader
|
||||||
|
io.ReaderAt
|
||||||
|
io.Seeker
|
||||||
|
io.Writer
|
||||||
|
io.WriterAt
|
||||||
|
|
||||||
|
Name() : string
|
||||||
|
Readdir(count int) : []os.FileInfo, error
|
||||||
|
Readdirnames(n int) : []string, error
|
||||||
|
Stat() : os.FileInfo, error
|
||||||
|
Sync() : error
|
||||||
|
Truncate(size int64) : error
|
||||||
|
WriteString(s string) : ret int, err error
|
||||||
|
```
|
||||||
|
In some applications it may make sense to define a new package that
|
||||||
|
simply exports the file system variable for easy access from anywhere.
|
||||||
|
|
||||||
|
## Using Afero's utility functions
|
||||||
|
|
||||||
|
Afero provides a set of functions to make it easier to use the underlying file systems.
|
||||||
|
These functions have been primarily ported from io & ioutil with some developed for Hugo.
|
||||||
|
|
||||||
|
The afero utilities support all afero compatible backends.
|
||||||
|
|
||||||
|
The list of utilities includes:
|
||||||
|
|
||||||
|
```go
|
||||||
|
DirExists(path string) (bool, error)
|
||||||
|
Exists(path string) (bool, error)
|
||||||
|
FileContainsBytes(filename string, subslice []byte) (bool, error)
|
||||||
|
GetTempDir(subPath string) string
|
||||||
|
IsDir(path string) (bool, error)
|
||||||
|
IsEmpty(path string) (bool, error)
|
||||||
|
ReadDir(dirname string) ([]os.FileInfo, error)
|
||||||
|
ReadFile(filename string) ([]byte, error)
|
||||||
|
SafeWriteReader(path string, r io.Reader) (err error)
|
||||||
|
TempDir(dir, prefix string) (name string, err error)
|
||||||
|
TempFile(dir, prefix string) (f File, err error)
|
||||||
|
Walk(root string, walkFn filepath.WalkFunc) error
|
||||||
|
WriteFile(filename string, data []byte, perm os.FileMode) error
|
||||||
|
WriteReader(path string, r io.Reader) (err error)
|
||||||
|
```
|
||||||
|
For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero)
|
||||||
|
|
||||||
|
They are available under two different approaches to use. You can either call
|
||||||
|
them directly where the first parameter of each function will be the file
|
||||||
|
system, or you can declare a new `Afero`, a custom type used to bind these
|
||||||
|
functions as methods to a given filesystem.
|
||||||
|
|
||||||
|
### Calling utilities directly
|
||||||
|
|
||||||
|
```go
|
||||||
|
fs := new(afero.MemMapFs)
|
||||||
|
f, err := afero.TempFile(fs,"", "ioutil-test")
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Calling via Afero
|
||||||
|
|
||||||
|
```go
|
||||||
|
fs := afero.NewMemMapFs()
|
||||||
|
afs := &afero.Afero{Fs: fs}
|
||||||
|
f, err := afs.TempFile("", "ioutil-test")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using Afero for Testing
|
||||||
|
|
||||||
|
There is a large benefit to using a mock filesystem for testing. It has a
|
||||||
|
completely blank state every time it is initialized and can be easily
|
||||||
|
reproducible regardless of OS. You could create files to your heart’s content
|
||||||
|
and the file access would be fast while also saving you from all the annoying
|
||||||
|
issues with deleting temporary files, Windows file locking, etc. The MemMapFs
|
||||||
|
backend is perfect for testing.
|
||||||
|
|
||||||
|
* Much faster than performing I/O operations on disk
|
||||||
|
* Avoid security issues and permissions
|
||||||
|
* Far more control. 'rm -rf /' with confidence
|
||||||
|
* Test setup is far more easier to do
|
||||||
|
* No test cleanup needed
|
||||||
|
|
||||||
|
One way to accomplish this is to define a variable as mentioned above.
|
||||||
|
In your application this will be set to afero.NewOsFs() during testing you
|
||||||
|
can set it to afero.NewMemMapFs().
|
||||||
|
|
||||||
|
It wouldn't be uncommon to have each test initialize a blank slate memory
|
||||||
|
backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere
|
||||||
|
appropriate in my application code. This approach ensures that Tests are order
|
||||||
|
independent, with no test relying on the state left by an earlier test.
|
||||||
|
|
||||||
|
Then in my tests I would initialize a new MemMapFs for each test:
|
||||||
|
```go
|
||||||
|
func TestExist(t *testing.T) {
|
||||||
|
appFS := afero.NewMemMapFs()
|
||||||
|
// create test files and directories
|
||||||
|
appFS.MkdirAll("src/a", 0755)
|
||||||
|
afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644)
|
||||||
|
afero.WriteFile(appFS, "src/c", []byte("file c"), 0644)
|
||||||
|
name := "src/c"
|
||||||
|
_, err := appFS.Stat(name)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
t.Errorf("file \"%s\" does not exist.\n", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
# Available Backends
|
||||||
|
|
||||||
|
## Operating System Native
|
||||||
|
|
||||||
|
### OsFs
|
||||||
|
|
||||||
|
The first is simply a wrapper around the native OS calls. This makes it
|
||||||
|
very easy to use as all of the calls are the same as the existing OS
|
||||||
|
calls. It also makes it trivial to have your code use the OS during
|
||||||
|
operation and a mock filesystem during testing or as needed.
|
||||||
|
|
||||||
|
```go
|
||||||
|
appfs := afero.NewOsFs()
|
||||||
|
appfs.MkdirAll("src/a", 0755)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Memory Backed Storage
|
||||||
|
|
||||||
|
### MemMapFs
|
||||||
|
|
||||||
|
Afero also provides a fully atomic memory backed filesystem perfect for use in
|
||||||
|
mocking and to speed up unnecessary disk io when persistence isn’t
|
||||||
|
necessary. It is fully concurrent and will work within go routines
|
||||||
|
safely.
|
||||||
|
|
||||||
|
```go
|
||||||
|
mm := afero.NewMemMapFs()
|
||||||
|
mm.MkdirAll("src/a", 0755)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### InMemoryFile
|
||||||
|
|
||||||
|
As part of MemMapFs, Afero also provides an atomic, fully concurrent memory
|
||||||
|
backed file implementation. This can be used in other memory backed file
|
||||||
|
systems with ease. Plans are to add a radix tree memory stored file
|
||||||
|
system using InMemoryFile.
|
||||||
|
|
||||||
|
## Network Interfaces
|
||||||
|
|
||||||
|
### SftpFs
|
||||||
|
|
||||||
|
Afero has experimental support for secure file transfer protocol (sftp). Which can
|
||||||
|
be used to perform file operations over a encrypted channel.
|
||||||
|
|
||||||
|
### GCSFs
|
||||||
|
|
||||||
|
Afero has experimental support for Google Cloud Storage (GCS). You can either set the
|
||||||
|
`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in
|
||||||
|
`NewGcsFS` to configure access to your GCS bucket.
|
||||||
|
|
||||||
|
Some known limitations of the existing implementation:
|
||||||
|
* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version.
|
||||||
|
* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version.
|
||||||
|
* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent.
|
||||||
|
|
||||||
|
|
||||||
|
## Filtering Backends
|
||||||
|
|
||||||
|
### BasePathFs
|
||||||
|
|
||||||
|
The BasePathFs restricts all operations to a given path within an Fs.
|
||||||
|
The given file name to the operations on this Fs will be prepended with
|
||||||
|
the base path before calling the source Fs.
|
||||||
|
|
||||||
|
```go
|
||||||
|
bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path")
|
||||||
|
```
|
||||||
|
|
||||||
|
### ReadOnlyFs
|
||||||
|
|
||||||
|
A thin wrapper around the source Fs providing a read only view.
|
||||||
|
|
||||||
|
```go
|
||||||
|
fs := afero.NewReadOnlyFs(afero.NewOsFs())
|
||||||
|
_, err := fs.Create("/file.txt")
|
||||||
|
// err = syscall.EPERM
|
||||||
|
```
|
||||||
|
|
||||||
|
# RegexpFs
|
||||||
|
|
||||||
|
A filtered view on file names, any file NOT matching
|
||||||
|
the passed regexp will be treated as non-existing.
|
||||||
|
Files not matching the regexp provided will not be created.
|
||||||
|
Directories are not filtered.
|
||||||
|
|
||||||
|
```go
|
||||||
|
fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`))
|
||||||
|
_, err := fs.Create("/file.html")
|
||||||
|
// err = syscall.ENOENT
|
||||||
|
```
|
||||||
|
|
||||||
|
### HttpFs
|
||||||
|
|
||||||
|
Afero provides an http compatible backend which can wrap any of the existing
|
||||||
|
backends.
|
||||||
|
|
||||||
|
The Http package requires a slightly specific version of Open which
|
||||||
|
returns an http.File type.
|
||||||
|
|
||||||
|
Afero provides an httpFs file system which satisfies this requirement.
|
||||||
|
Any Afero FileSystem can be used as an httpFs.
|
||||||
|
|
||||||
|
```go
|
||||||
|
httpFs := afero.NewHttpFs(<ExistingFS>)
|
||||||
|
fileserver := http.FileServer(httpFs.Dir(<PATH>))
|
||||||
|
http.Handle("/", fileserver)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Composite Backends
|
||||||
|
|
||||||
|
Afero provides the ability have two filesystems (or more) act as a single
|
||||||
|
file system.
|
||||||
|
|
||||||
|
### CacheOnReadFs
|
||||||
|
|
||||||
|
The CacheOnReadFs will lazily make copies of any accessed files from the base
|
||||||
|
layer into the overlay. Subsequent reads will be pulled from the overlay
|
||||||
|
directly permitting the request is within the cache duration of when it was
|
||||||
|
created in the overlay.
|
||||||
|
|
||||||
|
If the base filesystem is writeable, any changes to files will be
|
||||||
|
done first to the base, then to the overlay layer. Write calls to open file
|
||||||
|
handles like `Write()` or `Truncate()` to the overlay first.
|
||||||
|
|
||||||
|
To writing files to the overlay only, you can use the overlay Fs directly (not
|
||||||
|
via the union Fs).
|
||||||
|
|
||||||
|
Cache files in the layer for the given time.Duration, a cache duration of 0
|
||||||
|
means "forever" meaning the file will not be re-requested from the base ever.
|
||||||
|
|
||||||
|
A read-only base will make the overlay also read-only but still copy files
|
||||||
|
from the base to the overlay when they're not present (or outdated) in the
|
||||||
|
caching layer.
|
||||||
|
|
||||||
|
```go
|
||||||
|
base := afero.NewOsFs()
|
||||||
|
layer := afero.NewMemMapFs()
|
||||||
|
ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second)
|
||||||
|
```
|
||||||
|
|
||||||
|
### CopyOnWriteFs()
|
||||||
|
|
||||||
|
The CopyOnWriteFs is a read only base file system with a potentially
|
||||||
|
writeable layer on top.
|
||||||
|
|
||||||
|
Read operations will first look in the overlay and if not found there, will
|
||||||
|
serve the file from the base.
|
||||||
|
|
||||||
|
Changes to the file system will only be made in the overlay.
|
||||||
|
|
||||||
|
Any attempt to modify a file found only in the base will copy the file to the
|
||||||
|
overlay layer before modification (including opening a file with a writable
|
||||||
|
handle).
|
||||||
|
|
||||||
|
Removing and Renaming files present only in the base layer is not currently
|
||||||
|
permitted. If a file is present in the base layer and the overlay, only the
|
||||||
|
overlay will be removed/renamed.
|
||||||
|
|
||||||
|
```go
|
||||||
|
base := afero.NewOsFs()
|
||||||
|
roBase := afero.NewReadOnlyFs(base)
|
||||||
|
ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs())
|
||||||
|
|
||||||
|
fh, _ = ufs.Create("/home/test/file2.txt")
|
||||||
|
fh.WriteString("This is a test")
|
||||||
|
fh.Close()
|
||||||
|
```
|
||||||
|
|
||||||
|
In this example all write operations will only occur in memory (MemMapFs)
|
||||||
|
leaving the base filesystem (OsFs) untouched.
|
||||||
|
|
||||||
|
|
||||||
|
## Desired/possible backends
|
||||||
|
|
||||||
|
The following is a short list of possible backends we hope someone will
|
||||||
|
implement:
|
||||||
|
|
||||||
|
* SSH
|
||||||
|
* S3
|
||||||
|
|
||||||
|
# About the project
|
||||||
|
|
||||||
|
## What's in the name
|
||||||
|
|
||||||
|
Afero comes from the latin roots Ad-Facere.
|
||||||
|
|
||||||
|
**"Ad"** is a prefix meaning "to".
|
||||||
|
|
||||||
|
**"Facere"** is a form of the root "faciō" making "make or do".
|
||||||
|
|
||||||
|
The literal meaning of afero is "to make" or "to do" which seems very fitting
|
||||||
|
for a library that allows one to make files and directories and do things with them.
|
||||||
|
|
||||||
|
The English word that shares the same roots as Afero is "affair". Affair shares
|
||||||
|
the same concept but as a noun it means "something that is made or done" or "an
|
||||||
|
object of a particular type".
|
||||||
|
|
||||||
|
It's also nice that unlike some of my other libraries (hugo, cobra, viper) it
|
||||||
|
Googles very well.
|
||||||
|
|
||||||
|
## Release Notes
|
||||||
|
|
||||||
|
See the [Releases Page](https://github.com/spf13/afero/releases).
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
1. Fork it
|
||||||
|
2. Create your feature branch (`git checkout -b my-new-feature`)
|
||||||
|
3. Commit your changes (`git commit -am 'Add some feature'`)
|
||||||
|
4. Push to the branch (`git push origin my-new-feature`)
|
||||||
|
5. Create new Pull Request
|
||||||
|
|
||||||
|
## Contributors
|
||||||
|
|
||||||
|
Names in no particular order:
|
||||||
|
|
||||||
|
* [spf13](https://github.com/spf13)
|
||||||
|
* [jaqx0r](https://github.com/jaqx0r)
|
||||||
|
* [mbertschler](https://github.com/mbertschler)
|
||||||
|
* [xor-gate](https://github.com/xor-gate)
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Afero is released under the Apache 2.0 license. See
|
||||||
|
[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt)
|
111
vendor/github.com/spf13/afero/afero.go
generated
vendored
Normal file
111
vendor/github.com/spf13/afero/afero.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||||
|
// Copyright 2013 tsuru authors. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package afero provides types and methods for interacting with the filesystem,
|
||||||
|
// as an abstraction layer.
|
||||||
|
|
||||||
|
// Afero also provides a few implementations that are mostly interoperable. One that
|
||||||
|
// uses the operating system filesystem, one that uses memory to store files
|
||||||
|
// (cross platform) and an interface that should be implemented if you want to
|
||||||
|
// provide your own filesystem.
|
||||||
|
|
||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Afero struct {
|
||||||
|
Fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// File represents a file in the filesystem.
|
||||||
|
type File interface {
|
||||||
|
io.Closer
|
||||||
|
io.Reader
|
||||||
|
io.ReaderAt
|
||||||
|
io.Seeker
|
||||||
|
io.Writer
|
||||||
|
io.WriterAt
|
||||||
|
|
||||||
|
Name() string
|
||||||
|
Readdir(count int) ([]os.FileInfo, error)
|
||||||
|
Readdirnames(n int) ([]string, error)
|
||||||
|
Stat() (os.FileInfo, error)
|
||||||
|
Sync() error
|
||||||
|
Truncate(size int64) error
|
||||||
|
WriteString(s string) (ret int, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs is the filesystem interface.
|
||||||
|
//
|
||||||
|
// Any simulated or real filesystem should implement this interface.
|
||||||
|
type Fs interface {
|
||||||
|
// Create creates a file in the filesystem, returning the file and an
|
||||||
|
// error, if any happens.
|
||||||
|
Create(name string) (File, error)
|
||||||
|
|
||||||
|
// Mkdir creates a directory in the filesystem, return an error if any
|
||||||
|
// happens.
|
||||||
|
Mkdir(name string, perm os.FileMode) error
|
||||||
|
|
||||||
|
// MkdirAll creates a directory path and all parents that does not exist
|
||||||
|
// yet.
|
||||||
|
MkdirAll(path string, perm os.FileMode) error
|
||||||
|
|
||||||
|
// Open opens a file, returning it or an error, if any happens.
|
||||||
|
Open(name string) (File, error)
|
||||||
|
|
||||||
|
// OpenFile opens a file using the given flags and the given mode.
|
||||||
|
OpenFile(name string, flag int, perm os.FileMode) (File, error)
|
||||||
|
|
||||||
|
// Remove removes a file identified by name, returning an error, if any
|
||||||
|
// happens.
|
||||||
|
Remove(name string) error
|
||||||
|
|
||||||
|
// RemoveAll removes a directory path and any children it contains. It
|
||||||
|
// does not fail if the path does not exist (return nil).
|
||||||
|
RemoveAll(path string) error
|
||||||
|
|
||||||
|
// Rename renames a file.
|
||||||
|
Rename(oldname, newname string) error
|
||||||
|
|
||||||
|
// Stat returns a FileInfo describing the named file, or an error, if any
|
||||||
|
// happens.
|
||||||
|
Stat(name string) (os.FileInfo, error)
|
||||||
|
|
||||||
|
// The name of this FileSystem
|
||||||
|
Name() string
|
||||||
|
|
||||||
|
// Chmod changes the mode of the named file to mode.
|
||||||
|
Chmod(name string, mode os.FileMode) error
|
||||||
|
|
||||||
|
// Chown changes the uid and gid of the named file.
|
||||||
|
Chown(name string, uid, gid int) error
|
||||||
|
|
||||||
|
//Chtimes changes the access and modification times of the named file
|
||||||
|
Chtimes(name string, atime time.Time, mtime time.Time) error
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrFileClosed = errors.New("File is closed")
|
||||||
|
ErrOutOfRange = errors.New("Out of range")
|
||||||
|
ErrTooLarge = errors.New("Too large")
|
||||||
|
ErrFileNotFound = os.ErrNotExist
|
||||||
|
ErrFileExists = os.ErrExist
|
||||||
|
ErrDestinationExists = os.ErrExist
|
||||||
|
)
|
15
vendor/github.com/spf13/afero/appveyor.yml
generated
vendored
Normal file
15
vendor/github.com/spf13/afero/appveyor.yml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
version: '{build}'
|
||||||
|
clone_folder: C:\gopath\src\github.com\spf13\afero
|
||||||
|
environment:
|
||||||
|
GOPATH: C:\gopath
|
||||||
|
build_script:
|
||||||
|
- cmd: >-
|
||||||
|
go version
|
||||||
|
|
||||||
|
go env
|
||||||
|
|
||||||
|
go get -v github.com/spf13/afero/...
|
||||||
|
|
||||||
|
go build -v github.com/spf13/afero/...
|
||||||
|
test_script:
|
||||||
|
- cmd: go test -count=1 -cover -race -v github.com/spf13/afero/...
|
211
vendor/github.com/spf13/afero/basepath.go
generated
vendored
Normal file
211
vendor/github.com/spf13/afero/basepath.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ Lstater = (*BasePathFs)(nil)
|
||||||
|
|
||||||
|
// The BasePathFs restricts all operations to a given path within an Fs.
|
||||||
|
// The given file name to the operations on this Fs will be prepended with
|
||||||
|
// the base path before calling the base Fs.
|
||||||
|
// Any file name (after filepath.Clean()) outside this base path will be
|
||||||
|
// treated as non existing file.
|
||||||
|
//
|
||||||
|
// Note that it does not clean the error messages on return, so you may
|
||||||
|
// reveal the real path on errors.
|
||||||
|
type BasePathFs struct {
|
||||||
|
source Fs
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
type BasePathFile struct {
|
||||||
|
File
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *BasePathFile) Name() string {
|
||||||
|
sourcename := f.File.Name()
|
||||||
|
return strings.TrimPrefix(sourcename, filepath.Clean(f.path))
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBasePathFs(source Fs, path string) Fs {
|
||||||
|
return &BasePathFs{source: source, path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
// on a file outside the base path it returns the given file name and an error,
|
||||||
|
// else the given file with the base path prepended
|
||||||
|
func (b *BasePathFs) RealPath(name string) (path string, err error) {
|
||||||
|
if err := validateBasePathName(name); err != nil {
|
||||||
|
return name, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bpath := filepath.Clean(b.path)
|
||||||
|
path = filepath.Clean(filepath.Join(bpath, name))
|
||||||
|
if !strings.HasPrefix(path, bpath) {
|
||||||
|
return name, os.ErrNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateBasePathName(name string) error {
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
// Not much to do here;
|
||||||
|
// the virtual file paths all look absolute on *nix.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// On Windows a common mistake would be to provide an absolute OS path
|
||||||
|
// We could strip out the base part, but that would not be very portable.
|
||||||
|
if filepath.IsAbs(name) {
|
||||||
|
return os.ErrNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
|
||||||
|
if name, err = b.RealPath(name); err != nil {
|
||||||
|
return &os.PathError{Op: "chtimes", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
return b.source.Chtimes(name, atime, mtime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
|
||||||
|
if name, err = b.RealPath(name); err != nil {
|
||||||
|
return &os.PathError{Op: "chmod", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
return b.source.Chmod(name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) Chown(name string, uid, gid int) (err error) {
|
||||||
|
if name, err = b.RealPath(name); err != nil {
|
||||||
|
return &os.PathError{Op: "chown", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
return b.source.Chown(name, uid, gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) Name() string {
|
||||||
|
return "BasePathFs"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
|
||||||
|
if name, err = b.RealPath(name); err != nil {
|
||||||
|
return nil, &os.PathError{Op: "stat", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
return b.source.Stat(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) Rename(oldname, newname string) (err error) {
|
||||||
|
if oldname, err = b.RealPath(oldname); err != nil {
|
||||||
|
return &os.PathError{Op: "rename", Path: oldname, Err: err}
|
||||||
|
}
|
||||||
|
if newname, err = b.RealPath(newname); err != nil {
|
||||||
|
return &os.PathError{Op: "rename", Path: newname, Err: err}
|
||||||
|
}
|
||||||
|
return b.source.Rename(oldname, newname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) RemoveAll(name string) (err error) {
|
||||||
|
if name, err = b.RealPath(name); err != nil {
|
||||||
|
return &os.PathError{Op: "remove_all", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
return b.source.RemoveAll(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) Remove(name string) (err error) {
|
||||||
|
if name, err = b.RealPath(name); err != nil {
|
||||||
|
return &os.PathError{Op: "remove", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
return b.source.Remove(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
|
||||||
|
if name, err = b.RealPath(name); err != nil {
|
||||||
|
return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
sourcef, err := b.source.OpenFile(name, flag, mode)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &BasePathFile{sourcef, b.path}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) Open(name string) (f File, err error) {
|
||||||
|
if name, err = b.RealPath(name); err != nil {
|
||||||
|
return nil, &os.PathError{Op: "open", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
sourcef, err := b.source.Open(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &BasePathFile{File: sourcef, path: b.path}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
|
||||||
|
if name, err = b.RealPath(name); err != nil {
|
||||||
|
return &os.PathError{Op: "mkdir", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
return b.source.Mkdir(name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
|
||||||
|
if name, err = b.RealPath(name); err != nil {
|
||||||
|
return &os.PathError{Op: "mkdir", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
return b.source.MkdirAll(name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) Create(name string) (f File, err error) {
|
||||||
|
if name, err = b.RealPath(name); err != nil {
|
||||||
|
return nil, &os.PathError{Op: "create", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
sourcef, err := b.source.Create(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &BasePathFile{File: sourcef, path: b.path}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
|
||||||
|
name, err := b.RealPath(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
if lstater, ok := b.source.(Lstater); ok {
|
||||||
|
return lstater.LstatIfPossible(name)
|
||||||
|
}
|
||||||
|
fi, err := b.source.Stat(name)
|
||||||
|
return fi, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error {
|
||||||
|
oldname, err := b.RealPath(oldname)
|
||||||
|
if err != nil {
|
||||||
|
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err}
|
||||||
|
}
|
||||||
|
newname, err = b.RealPath(newname)
|
||||||
|
if err != nil {
|
||||||
|
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err}
|
||||||
|
}
|
||||||
|
if linker, ok := b.source.(Linker); ok {
|
||||||
|
return linker.SymlinkIfPossible(oldname, newname)
|
||||||
|
}
|
||||||
|
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) {
|
||||||
|
name, err := b.RealPath(name)
|
||||||
|
if err != nil {
|
||||||
|
return "", &os.PathError{Op: "readlink", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
if reader, ok := b.source.(LinkReader); ok {
|
||||||
|
return reader.ReadlinkIfPossible(name)
|
||||||
|
}
|
||||||
|
return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
|
||||||
|
}
|
315
vendor/github.com/spf13/afero/cacheOnReadFs.go
generated
vendored
Normal file
315
vendor/github.com/spf13/afero/cacheOnReadFs.go
generated
vendored
Normal file
@ -0,0 +1,315 @@
|
|||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// If the cache duration is 0, cache time will be unlimited, i.e. once
|
||||||
|
// a file is in the layer, the base will never be read again for this file.
|
||||||
|
//
|
||||||
|
// For cache times greater than 0, the modification time of a file is
|
||||||
|
// checked. Note that a lot of file system implementations only allow a
|
||||||
|
// resolution of a second for timestamps... or as the godoc for os.Chtimes()
|
||||||
|
// states: "The underlying filesystem may truncate or round the values to a
|
||||||
|
// less precise time unit."
|
||||||
|
//
|
||||||
|
// This caching union will forward all write calls also to the base file
|
||||||
|
// system first. To prevent writing to the base Fs, wrap it in a read-only
|
||||||
|
// filter - Note: this will also make the overlay read-only, for writing files
|
||||||
|
// in the overlay, use the overlay Fs directly, not via the union Fs.
|
||||||
|
type CacheOnReadFs struct {
|
||||||
|
base Fs
|
||||||
|
layer Fs
|
||||||
|
cacheTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs {
|
||||||
|
return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime}
|
||||||
|
}
|
||||||
|
|
||||||
|
type cacheState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// not present in the overlay, unknown if it exists in the base:
|
||||||
|
cacheMiss cacheState = iota
|
||||||
|
// present in the overlay and in base, base file is newer:
|
||||||
|
cacheStale
|
||||||
|
// present in the overlay - with cache time == 0 it may exist in the base,
|
||||||
|
// with cacheTime > 0 it exists in the base and is same age or newer in the
|
||||||
|
// overlay
|
||||||
|
cacheHit
|
||||||
|
// happens if someone writes directly to the overlay without
|
||||||
|
// going through this union
|
||||||
|
cacheLocal
|
||||||
|
)
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
|
||||||
|
var lfi, bfi os.FileInfo
|
||||||
|
lfi, err = u.layer.Stat(name)
|
||||||
|
if err == nil {
|
||||||
|
if u.cacheTime == 0 {
|
||||||
|
return cacheHit, lfi, nil
|
||||||
|
}
|
||||||
|
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
|
||||||
|
bfi, err = u.base.Stat(name)
|
||||||
|
if err != nil {
|
||||||
|
return cacheLocal, lfi, nil
|
||||||
|
}
|
||||||
|
if bfi.ModTime().After(lfi.ModTime()) {
|
||||||
|
return cacheStale, bfi, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cacheHit, lfi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == syscall.ENOENT || os.IsNotExist(err) {
|
||||||
|
return cacheMiss, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return cacheMiss, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) copyToLayer(name string) error {
|
||||||
|
return copyToLayer(u.base, u.layer, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) copyFileToLayer(name string, flag int, perm os.FileMode) error {
|
||||||
|
return copyFileToLayer(u.base, u.layer, name, flag, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error {
|
||||||
|
st, _, err := u.cacheStatus(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch st {
|
||||||
|
case cacheLocal:
|
||||||
|
case cacheHit:
|
||||||
|
err = u.base.Chtimes(name, atime, mtime)
|
||||||
|
case cacheStale, cacheMiss:
|
||||||
|
if err := u.copyToLayer(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = u.base.Chtimes(name, atime, mtime)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return u.layer.Chtimes(name, atime, mtime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error {
|
||||||
|
st, _, err := u.cacheStatus(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch st {
|
||||||
|
case cacheLocal:
|
||||||
|
case cacheHit:
|
||||||
|
err = u.base.Chmod(name, mode)
|
||||||
|
case cacheStale, cacheMiss:
|
||||||
|
if err := u.copyToLayer(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = u.base.Chmod(name, mode)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return u.layer.Chmod(name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) Chown(name string, uid, gid int) error {
|
||||||
|
st, _, err := u.cacheStatus(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch st {
|
||||||
|
case cacheLocal:
|
||||||
|
case cacheHit:
|
||||||
|
err = u.base.Chown(name, uid, gid)
|
||||||
|
case cacheStale, cacheMiss:
|
||||||
|
if err := u.copyToLayer(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = u.base.Chown(name, uid, gid)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return u.layer.Chown(name, uid, gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) {
|
||||||
|
st, fi, err := u.cacheStatus(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch st {
|
||||||
|
case cacheMiss:
|
||||||
|
return u.base.Stat(name)
|
||||||
|
default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo
|
||||||
|
return fi, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) Rename(oldname, newname string) error {
|
||||||
|
st, _, err := u.cacheStatus(oldname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch st {
|
||||||
|
case cacheLocal:
|
||||||
|
case cacheHit:
|
||||||
|
err = u.base.Rename(oldname, newname)
|
||||||
|
case cacheStale, cacheMiss:
|
||||||
|
if err := u.copyToLayer(oldname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = u.base.Rename(oldname, newname)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return u.layer.Rename(oldname, newname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) Remove(name string) error {
|
||||||
|
st, _, err := u.cacheStatus(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch st {
|
||||||
|
case cacheLocal:
|
||||||
|
case cacheHit, cacheStale, cacheMiss:
|
||||||
|
err = u.base.Remove(name)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return u.layer.Remove(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) RemoveAll(name string) error {
|
||||||
|
st, _, err := u.cacheStatus(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch st {
|
||||||
|
case cacheLocal:
|
||||||
|
case cacheHit, cacheStale, cacheMiss:
|
||||||
|
err = u.base.RemoveAll(name)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return u.layer.RemoveAll(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
|
||||||
|
st, _, err := u.cacheStatus(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch st {
|
||||||
|
case cacheLocal, cacheHit:
|
||||||
|
default:
|
||||||
|
if err := u.copyFileToLayer(name, flag, perm); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
|
||||||
|
bfi, err := u.base.OpenFile(name, flag, perm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
lfi, err := u.layer.OpenFile(name, flag, perm)
|
||||||
|
if err != nil {
|
||||||
|
bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &UnionFile{Base: bfi, Layer: lfi}, nil
|
||||||
|
}
|
||||||
|
return u.layer.OpenFile(name, flag, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) Open(name string) (File, error) {
|
||||||
|
st, fi, err := u.cacheStatus(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch st {
|
||||||
|
case cacheLocal:
|
||||||
|
return u.layer.Open(name)
|
||||||
|
|
||||||
|
case cacheMiss:
|
||||||
|
bfi, err := u.base.Stat(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if bfi.IsDir() {
|
||||||
|
return u.base.Open(name)
|
||||||
|
}
|
||||||
|
if err := u.copyToLayer(name); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return u.layer.Open(name)
|
||||||
|
|
||||||
|
case cacheStale:
|
||||||
|
if !fi.IsDir() {
|
||||||
|
if err := u.copyToLayer(name); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return u.layer.Open(name)
|
||||||
|
}
|
||||||
|
case cacheHit:
|
||||||
|
if !fi.IsDir() {
|
||||||
|
return u.layer.Open(name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// the dirs from cacheHit, cacheStale fall down here:
|
||||||
|
bfile, _ := u.base.Open(name)
|
||||||
|
lfile, err := u.layer.Open(name)
|
||||||
|
if err != nil && bfile == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &UnionFile{Base: bfile, Layer: lfile}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error {
|
||||||
|
err := u.base.Mkdir(name, perm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) Name() string {
|
||||||
|
return "CacheOnReadFs"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error {
|
||||||
|
err := u.base.MkdirAll(name, perm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return u.layer.MkdirAll(name, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CacheOnReadFs) Create(name string) (File, error) {
|
||||||
|
bfh, err := u.base.Create(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
lfh, err := u.layer.Create(name)
|
||||||
|
if err != nil {
|
||||||
|
// oops, see comment about OS_TRUNC above, should we remove? then we have to
|
||||||
|
// remember if the file did not exist before
|
||||||
|
bfh.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &UnionFile{Base: bfh, Layer: lfh}, nil
|
||||||
|
}
|
22
vendor/github.com/spf13/afero/const_bsds.go
generated
vendored
Normal file
22
vendor/github.com/spf13/afero/const_bsds.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
// Copyright © 2016 Steve Francia <spf@spf13.com>.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build aix darwin openbsd freebsd netbsd dragonfly
|
||||||
|
|
||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
const BADFD = syscall.EBADF
|
26
vendor/github.com/spf13/afero/const_win_unix.go
generated
vendored
Normal file
26
vendor/github.com/spf13/afero/const_win_unix.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
// Copyright © 2016 Steve Francia <spf@spf13.com>.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
// +build !darwin
|
||||||
|
// +build !openbsd
|
||||||
|
// +build !freebsd
|
||||||
|
// +build !dragonfly
|
||||||
|
// +build !netbsd
|
||||||
|
// +build !aix
|
||||||
|
|
||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
const BADFD = syscall.EBADFD
|
326
vendor/github.com/spf13/afero/copyOnWriteFs.go
generated
vendored
Normal file
326
vendor/github.com/spf13/afero/copyOnWriteFs.go
generated
vendored
Normal file
@ -0,0 +1,326 @@
|
|||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ Lstater = (*CopyOnWriteFs)(nil)
|
||||||
|
|
||||||
|
// The CopyOnWriteFs is a union filesystem: a read only base file system with
|
||||||
|
// a possibly writeable layer on top. Changes to the file system will only
|
||||||
|
// be made in the overlay: Changing an existing file in the base layer which
|
||||||
|
// is not present in the overlay will copy the file to the overlay ("changing"
|
||||||
|
// includes also calls to e.g. Chtimes(), Chmod() and Chown()).
|
||||||
|
//
|
||||||
|
// Reading directories is currently only supported via Open(), not OpenFile().
|
||||||
|
type CopyOnWriteFs struct {
|
||||||
|
base Fs
|
||||||
|
layer Fs
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCopyOnWriteFs(base Fs, layer Fs) Fs {
|
||||||
|
return &CopyOnWriteFs{base: base, layer: layer}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if the file is not in the overlay
|
||||||
|
func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) {
|
||||||
|
if _, err := u.layer.Stat(name); err == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
_, err := u.base.Stat(name)
|
||||||
|
if err != nil {
|
||||||
|
if oerr, ok := err.(*os.PathError); ok {
|
||||||
|
if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == syscall.ENOENT {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) copyToLayer(name string) error {
|
||||||
|
return copyToLayer(u.base, u.layer, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error {
|
||||||
|
b, err := u.isBaseFile(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b {
|
||||||
|
if err := u.copyToLayer(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return u.layer.Chtimes(name, atime, mtime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error {
|
||||||
|
b, err := u.isBaseFile(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b {
|
||||||
|
if err := u.copyToLayer(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return u.layer.Chmod(name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) Chown(name string, uid, gid int) error {
|
||||||
|
b, err := u.isBaseFile(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b {
|
||||||
|
if err := u.copyToLayer(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return u.layer.Chown(name, uid, gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) {
|
||||||
|
fi, err := u.layer.Stat(name)
|
||||||
|
if err != nil {
|
||||||
|
isNotExist := u.isNotExist(err)
|
||||||
|
if isNotExist {
|
||||||
|
return u.base.Stat(name)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
|
||||||
|
llayer, ok1 := u.layer.(Lstater)
|
||||||
|
lbase, ok2 := u.base.(Lstater)
|
||||||
|
|
||||||
|
if ok1 {
|
||||||
|
fi, b, err := llayer.LstatIfPossible(name)
|
||||||
|
if err == nil {
|
||||||
|
return fi, b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !u.isNotExist(err) {
|
||||||
|
return nil, b, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok2 {
|
||||||
|
fi, b, err := lbase.LstatIfPossible(name)
|
||||||
|
if err == nil {
|
||||||
|
return fi, b, nil
|
||||||
|
}
|
||||||
|
if !u.isNotExist(err) {
|
||||||
|
return nil, b, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fi, err := u.Stat(name)
|
||||||
|
|
||||||
|
return fi, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error {
|
||||||
|
if slayer, ok := u.layer.(Linker); ok {
|
||||||
|
return slayer.SymlinkIfPossible(oldname, newname)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) {
|
||||||
|
if rlayer, ok := u.layer.(LinkReader); ok {
|
||||||
|
return rlayer.ReadlinkIfPossible(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rbase, ok := u.base.(LinkReader); ok {
|
||||||
|
return rbase.ReadlinkIfPossible(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) isNotExist(err error) bool {
|
||||||
|
if e, ok := err.(*os.PathError); ok {
|
||||||
|
err = e.Err
|
||||||
|
}
|
||||||
|
if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Renaming files present only in the base layer is not permitted
|
||||||
|
func (u *CopyOnWriteFs) Rename(oldname, newname string) error {
|
||||||
|
b, err := u.isBaseFile(oldname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b {
|
||||||
|
return syscall.EPERM
|
||||||
|
}
|
||||||
|
return u.layer.Rename(oldname, newname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removing files present only in the base layer is not permitted. If
|
||||||
|
// a file is present in the base layer and the overlay, only the overlay
|
||||||
|
// will be removed.
|
||||||
|
func (u *CopyOnWriteFs) Remove(name string) error {
|
||||||
|
err := u.layer.Remove(name)
|
||||||
|
switch err {
|
||||||
|
case syscall.ENOENT:
|
||||||
|
_, err = u.base.Stat(name)
|
||||||
|
if err == nil {
|
||||||
|
return syscall.EPERM
|
||||||
|
}
|
||||||
|
return syscall.ENOENT
|
||||||
|
default:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) RemoveAll(name string) error {
|
||||||
|
err := u.layer.RemoveAll(name)
|
||||||
|
switch err {
|
||||||
|
case syscall.ENOENT:
|
||||||
|
_, err = u.base.Stat(name)
|
||||||
|
if err == nil {
|
||||||
|
return syscall.EPERM
|
||||||
|
}
|
||||||
|
return syscall.ENOENT
|
||||||
|
default:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
|
||||||
|
b, err := u.isBaseFile(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
|
||||||
|
if b {
|
||||||
|
if err = u.copyToLayer(name); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return u.layer.OpenFile(name, flag, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := filepath.Dir(name)
|
||||||
|
isaDir, err := IsDir(u.base, dir)
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if isaDir {
|
||||||
|
if err = u.layer.MkdirAll(dir, 0777); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return u.layer.OpenFile(name, flag, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
isaDir, err = IsDir(u.layer, dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if isaDir {
|
||||||
|
return u.layer.OpenFile(name, flag, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist?
|
||||||
|
}
|
||||||
|
if b {
|
||||||
|
return u.base.OpenFile(name, flag, perm)
|
||||||
|
}
|
||||||
|
return u.layer.OpenFile(name, flag, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function handles the 9 different possibilities caused
|
||||||
|
// by the union which are the intersection of the following...
|
||||||
|
// layer: doesn't exist, exists as a file, and exists as a directory
|
||||||
|
// base: doesn't exist, exists as a file, and exists as a directory
|
||||||
|
func (u *CopyOnWriteFs) Open(name string) (File, error) {
|
||||||
|
// Since the overlay overrides the base we check that first
|
||||||
|
b, err := u.isBaseFile(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If overlay doesn't exist, return the base (base state irrelevant)
|
||||||
|
if b {
|
||||||
|
return u.base.Open(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If overlay is a file, return it (base state irrelevant)
|
||||||
|
dir, err := IsDir(u.layer, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !dir {
|
||||||
|
return u.layer.Open(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overlay is a directory, base state now matters.
|
||||||
|
// Base state has 3 states to check but 2 outcomes:
|
||||||
|
// A. It's a file or non-readable in the base (return just the overlay)
|
||||||
|
// B. It's an accessible directory in the base (return a UnionFile)
|
||||||
|
|
||||||
|
// If base is file or nonreadable, return overlay
|
||||||
|
dir, err = IsDir(u.base, name)
|
||||||
|
if !dir || err != nil {
|
||||||
|
return u.layer.Open(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Both base & layer are directories
|
||||||
|
// Return union file (if opens are without error)
|
||||||
|
bfile, bErr := u.base.Open(name)
|
||||||
|
lfile, lErr := u.layer.Open(name)
|
||||||
|
|
||||||
|
// If either have errors at this point something is very wrong. Return nil and the errors
|
||||||
|
if bErr != nil || lErr != nil {
|
||||||
|
return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &UnionFile{Base: bfile, Layer: lfile}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error {
|
||||||
|
dir, err := IsDir(u.base, name)
|
||||||
|
if err != nil {
|
||||||
|
return u.layer.MkdirAll(name, perm)
|
||||||
|
}
|
||||||
|
if dir {
|
||||||
|
return ErrFileExists
|
||||||
|
}
|
||||||
|
return u.layer.MkdirAll(name, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) Name() string {
|
||||||
|
return "CopyOnWriteFs"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error {
|
||||||
|
dir, err := IsDir(u.base, name)
|
||||||
|
if err != nil {
|
||||||
|
return u.layer.MkdirAll(name, perm)
|
||||||
|
}
|
||||||
|
if dir {
|
||||||
|
// This is in line with how os.MkdirAll behaves.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return u.layer.MkdirAll(name, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *CopyOnWriteFs) Create(name string) (File, error) {
|
||||||
|
return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666)
|
||||||
|
}
|
13
vendor/github.com/spf13/afero/go.mod
generated
vendored
Normal file
13
vendor/github.com/spf13/afero/go.mod
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
module github.com/spf13/afero
|
||||||
|
|
||||||
|
require (
|
||||||
|
cloud.google.com/go/storage v1.14.0
|
||||||
|
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8
|
||||||
|
github.com/pkg/sftp v1.13.1
|
||||||
|
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa
|
||||||
|
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99
|
||||||
|
golang.org/x/text v0.3.4
|
||||||
|
google.golang.org/api v0.40.0
|
||||||
|
)
|
||||||
|
|
||||||
|
go 1.13
|
432
vendor/github.com/spf13/afero/go.sum
generated
vendored
Normal file
432
vendor/github.com/spf13/afero/go.sum
generated
vendored
Normal file
@ -0,0 +1,432 @@
|
|||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||||
|
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||||
|
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||||
|
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||||
|
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||||
|
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||||
|
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||||
|
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||||
|
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||||
|
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||||
|
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||||
|
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||||
|
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||||
|
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||||
|
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||||
|
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||||
|
cloud.google.com/go v0.75.0 h1:XgtDnVJRCPEUG21gjFiRPz4zI1Mjg16R+NYQjfmU4XY=
|
||||||
|
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
|
||||||
|
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||||
|
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||||
|
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||||
|
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||||
|
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||||
|
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||||
|
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||||
|
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||||
|
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||||
|
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||||
|
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||||
|
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||||
|
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||||
|
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||||
|
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||||
|
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||||
|
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||||
|
cloud.google.com/go/storage v1.14.0 h1:6RRlFMv1omScs6iq2hfE3IvgE+l6RfJPampq8UZc5TU=
|
||||||
|
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||||
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||||
|
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||||
|
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
|
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
|
||||||
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||||
|
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||||
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
|
||||||
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||||
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
|
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
|
github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60=
|
||||||
|
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
|
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
|
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
|
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 h1:tlyzajkF3030q6M8SvmJSemC9DTHL/xaMa18b65+JM4=
|
||||||
|
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
|
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
|
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
|
||||||
|
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||||
|
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||||
|
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs=
|
||||||
|
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
|
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
|
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
|
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
|
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
|
go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0=
|
||||||
|
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
|
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa h1:idItI2DDfCokpg0N51B2VtiLdJ4vAuXC9fnCb2gACo4=
|
||||||
|
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
|
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||||
|
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||||
|
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||||
|
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||||
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||||
|
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=
|
||||||
|
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||||
|
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||||
|
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||||
|
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY=
|
||||||
|
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99 h1:5vD4XjIc0X5+kHZjx4UecYdjA6mJo+XXNoaW0EjU5Os=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
|
||||||
|
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||||
|
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||||
|
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||||
|
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||||
|
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
|
||||||
|
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
|
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||||
|
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
|
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
|
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||||
|
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||||
|
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||||
|
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||||
|
google.golang.org/api v0.40.0 h1:uWrpz12dpVPn7cojP82mk02XDgTJLDPc2KbVTxrWb4A=
|
||||||
|
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||||
|
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||||
|
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||||
|
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||||
|
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||||
|
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705 h1:PYBmACG+YEv8uQPW0r1kJj8tR+gkF0UWq7iFdUezwEw=
|
||||||
|
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
|
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||||
|
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||||
|
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
|
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
|
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
|
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||||
|
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||||
|
google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8=
|
||||||
|
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||||
|
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
|
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
|
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
114
vendor/github.com/spf13/afero/httpFs.go
generated
vendored
Normal file
114
vendor/github.com/spf13/afero/httpFs.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type httpDir struct {
|
||||||
|
basePath string
|
||||||
|
fs HttpFs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d httpDir) Open(name string) (http.File, error) {
|
||||||
|
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
|
||||||
|
strings.Contains(name, "\x00") {
|
||||||
|
return nil, errors.New("http: invalid character in file path")
|
||||||
|
}
|
||||||
|
dir := string(d.basePath)
|
||||||
|
if dir == "" {
|
||||||
|
dir = "."
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type HttpFs struct {
|
||||||
|
source Fs
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHttpFs(source Fs) *HttpFs {
|
||||||
|
return &HttpFs{source: source}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h HttpFs) Dir(s string) *httpDir {
|
||||||
|
return &httpDir{basePath: s, fs: h}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h HttpFs) Name() string { return "h HttpFs" }
|
||||||
|
|
||||||
|
func (h HttpFs) Create(name string) (File, error) {
|
||||||
|
return h.source.Create(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h HttpFs) Chmod(name string, mode os.FileMode) error {
|
||||||
|
return h.source.Chmod(name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h HttpFs) Chown(name string, uid, gid int) error {
|
||||||
|
return h.source.Chown(name, uid, gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
|
||||||
|
return h.source.Chtimes(name, atime, mtime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h HttpFs) Mkdir(name string, perm os.FileMode) error {
|
||||||
|
return h.source.Mkdir(name, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h HttpFs) MkdirAll(path string, perm os.FileMode) error {
|
||||||
|
return h.source.MkdirAll(path, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h HttpFs) Open(name string) (http.File, error) {
|
||||||
|
f, err := h.source.Open(name)
|
||||||
|
if err == nil {
|
||||||
|
if httpfile, ok := f.(http.File); ok {
|
||||||
|
return httpfile, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
|
||||||
|
return h.source.OpenFile(name, flag, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h HttpFs) Remove(name string) error {
|
||||||
|
return h.source.Remove(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h HttpFs) RemoveAll(path string) error {
|
||||||
|
return h.source.RemoveAll(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h HttpFs) Rename(oldname, newname string) error {
|
||||||
|
return h.source.Rename(oldname, newname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h HttpFs) Stat(name string) (os.FileInfo, error) {
|
||||||
|
return h.source.Stat(name)
|
||||||
|
}
|
288
vendor/github.com/spf13/afero/iofs.go
generated
vendored
Normal file
288
vendor/github.com/spf13/afero/iofs.go
generated
vendored
Normal file
@ -0,0 +1,288 @@
|
|||||||
|
// +build go1.16
|
||||||
|
|
||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IOFS adopts afero.Fs to stdlib io/fs.FS
|
||||||
|
type IOFS struct {
|
||||||
|
Fs
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewIOFS(fs Fs) IOFS {
|
||||||
|
return IOFS{Fs: fs}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ fs.FS = IOFS{}
|
||||||
|
_ fs.GlobFS = IOFS{}
|
||||||
|
_ fs.ReadDirFS = IOFS{}
|
||||||
|
_ fs.ReadFileFS = IOFS{}
|
||||||
|
_ fs.StatFS = IOFS{}
|
||||||
|
_ fs.SubFS = IOFS{}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (iofs IOFS) Open(name string) (fs.File, error) {
|
||||||
|
const op = "open"
|
||||||
|
|
||||||
|
// by convention for fs.FS implementations we should perform this check
|
||||||
|
if !fs.ValidPath(name) {
|
||||||
|
return nil, iofs.wrapError(op, name, fs.ErrInvalid)
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := iofs.Fs.Open(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, iofs.wrapError(op, name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// file should implement fs.ReadDirFile
|
||||||
|
if _, ok := file.(fs.ReadDirFile); !ok {
|
||||||
|
file = readDirFile{file}
|
||||||
|
}
|
||||||
|
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iofs IOFS) Glob(pattern string) ([]string, error) {
|
||||||
|
const op = "glob"
|
||||||
|
|
||||||
|
// afero.Glob does not perform this check but it's required for implementations
|
||||||
|
if _, err := path.Match(pattern, ""); err != nil {
|
||||||
|
return nil, iofs.wrapError(op, pattern, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
items, err := Glob(iofs.Fs, pattern)
|
||||||
|
if err != nil {
|
||||||
|
return nil, iofs.wrapError(op, pattern, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return items, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) {
|
||||||
|
items, err := ReadDir(iofs.Fs, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, iofs.wrapError("readdir", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := make([]fs.DirEntry, len(items))
|
||||||
|
for i := range items {
|
||||||
|
ret[i] = dirEntry{items[i]}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iofs IOFS) ReadFile(name string) ([]byte, error) {
|
||||||
|
const op = "readfile"
|
||||||
|
|
||||||
|
if !fs.ValidPath(name) {
|
||||||
|
return nil, iofs.wrapError(op, name, fs.ErrInvalid)
|
||||||
|
}
|
||||||
|
|
||||||
|
bytes, err := ReadFile(iofs.Fs, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, iofs.wrapError(op, name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return bytes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iofs IOFS) Sub(dir string) (fs.FS, error) { return IOFS{NewBasePathFs(iofs.Fs, dir)}, nil }
|
||||||
|
|
||||||
|
func (IOFS) wrapError(op, path string, err error) error {
|
||||||
|
if _, ok := err.(*fs.PathError); ok {
|
||||||
|
return err // don't need to wrap again
|
||||||
|
}
|
||||||
|
|
||||||
|
return &fs.PathError{
|
||||||
|
Op: op,
|
||||||
|
Path: path,
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dirEntry provides adapter from os.FileInfo to fs.DirEntry
|
||||||
|
type dirEntry struct {
|
||||||
|
fs.FileInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fs.DirEntry = dirEntry{}
|
||||||
|
|
||||||
|
func (d dirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() }
|
||||||
|
|
||||||
|
func (d dirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil }
|
||||||
|
|
||||||
|
// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open
|
||||||
|
type readDirFile struct {
|
||||||
|
File
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fs.ReadDirFile = readDirFile{}
|
||||||
|
|
||||||
|
func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) {
|
||||||
|
items, err := r.File.Readdir(n)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := make([]fs.DirEntry, len(items))
|
||||||
|
for i := range items {
|
||||||
|
ret[i] = dirEntry{items[i]}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromIOFS adopts io/fs.FS to use it as afero.Fs
|
||||||
|
// Note that io/fs.FS is read-only so all mutating methods will return fs.PathError with fs.ErrPermission
|
||||||
|
// To store modifications you may use afero.CopyOnWriteFs
|
||||||
|
type FromIOFS struct {
|
||||||
|
fs.FS
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Fs = FromIOFS{}
|
||||||
|
|
||||||
|
func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) }
|
||||||
|
|
||||||
|
func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) }
|
||||||
|
|
||||||
|
func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error {
|
||||||
|
return notImplemented("mkdirall", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FromIOFS) Open(name string) (File, error) {
|
||||||
|
file, err := f.FS.Open(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fromIOFSFile{File: file, name: name}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FromIOFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
|
||||||
|
return f.Open(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FromIOFS) Remove(name string) error {
|
||||||
|
return notImplemented("remove", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FromIOFS) RemoveAll(path string) error {
|
||||||
|
return notImplemented("removeall", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FromIOFS) Rename(oldname, newname string) error {
|
||||||
|
return notImplemented("rename", oldname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FromIOFS) Stat(name string) (os.FileInfo, error) { return fs.Stat(f.FS, name) }
|
||||||
|
|
||||||
|
func (f FromIOFS) Name() string { return "fromiofs" }
|
||||||
|
|
||||||
|
func (f FromIOFS) Chmod(name string, mode os.FileMode) error {
|
||||||
|
return notImplemented("chmod", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FromIOFS) Chown(name string, uid, gid int) error {
|
||||||
|
return notImplemented("chown", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FromIOFS) Chtimes(name string, atime time.Time, mtime time.Time) error {
|
||||||
|
return notImplemented("chtimes", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
type fromIOFSFile struct {
|
||||||
|
fs.File
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f fromIOFSFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||||
|
readerAt, ok := f.File.(io.ReaderAt)
|
||||||
|
if !ok {
|
||||||
|
return -1, notImplemented("readat", f.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return readerAt.ReadAt(p, off)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f fromIOFSFile) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
seeker, ok := f.File.(io.Seeker)
|
||||||
|
if !ok {
|
||||||
|
return -1, notImplemented("seek", f.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return seeker.Seek(offset, whence)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f fromIOFSFile) Write(p []byte) (n int, err error) {
|
||||||
|
return -1, notImplemented("write", f.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f fromIOFSFile) WriteAt(p []byte, off int64) (n int, err error) {
|
||||||
|
return -1, notImplemented("writeat", f.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f fromIOFSFile) Name() string { return f.name }
|
||||||
|
|
||||||
|
func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||||
|
rdfile, ok := f.File.(fs.ReadDirFile)
|
||||||
|
if !ok {
|
||||||
|
return nil, notImplemented("readdir", f.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err := rdfile.ReadDir(count)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := make([]os.FileInfo, len(entries))
|
||||||
|
for i := range entries {
|
||||||
|
ret[i], err = entries[i].Info()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f fromIOFSFile) Readdirnames(n int) ([]string, error) {
|
||||||
|
rdfile, ok := f.File.(fs.ReadDirFile)
|
||||||
|
if !ok {
|
||||||
|
return nil, notImplemented("readdir", f.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err := rdfile.ReadDir(n)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := make([]string, len(entries))
|
||||||
|
for i := range entries {
|
||||||
|
ret[i] = entries[i].Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f fromIOFSFile) Sync() error { return nil }
|
||||||
|
|
||||||
|
func (f fromIOFSFile) Truncate(size int64) error {
|
||||||
|
return notImplemented("truncate", f.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f fromIOFSFile) WriteString(s string) (ret int, err error) {
|
||||||
|
return -1, notImplemented("writestring", f.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func notImplemented(op, path string) error {
|
||||||
|
return &fs.PathError{Op: op, Path: path, Err: fs.ErrPermission}
|
||||||
|
}
|
240
vendor/github.com/spf13/afero/ioutil.go
generated
vendored
Normal file
240
vendor/github.com/spf13/afero/ioutil.go
generated
vendored
Normal file
@ -0,0 +1,240 @@
|
|||||||
|
// Copyright ©2015 The Go Authors
|
||||||
|
// Copyright ©2015 Steve Francia <spf@spf13.com>
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// byName implements sort.Interface.
|
||||||
|
type byName []os.FileInfo
|
||||||
|
|
||||||
|
func (f byName) Len() int { return len(f) }
|
||||||
|
func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
|
||||||
|
func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
|
||||||
|
|
||||||
|
// ReadDir reads the directory named by dirname and returns
|
||||||
|
// a list of sorted directory entries.
|
||||||
|
func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||||
|
return ReadDir(a.Fs, dirname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) {
|
||||||
|
f, err := fs.Open(dirname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
list, err := f.Readdir(-1)
|
||||||
|
f.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sort.Sort(byName(list))
|
||||||
|
return list, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadFile reads the file named by filename and returns the contents.
|
||||||
|
// A successful call returns err == nil, not err == EOF. Because ReadFile
|
||||||
|
// reads the whole file, it does not treat an EOF from Read as an error
|
||||||
|
// to be reported.
|
||||||
|
func (a Afero) ReadFile(filename string) ([]byte, error) {
|
||||||
|
return ReadFile(a.Fs, filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadFile(fs Fs, filename string) ([]byte, error) {
|
||||||
|
f, err := fs.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
// It's a good but not certain bet that FileInfo will tell us exactly how much to
|
||||||
|
// read, so let's try it but be prepared for the answer to be wrong.
|
||||||
|
var n int64
|
||||||
|
|
||||||
|
if fi, err := f.Stat(); err == nil {
|
||||||
|
// Don't preallocate a huge buffer, just in case.
|
||||||
|
if size := fi.Size(); size < 1e9 {
|
||||||
|
n = size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// As initial capacity for readAll, use n + a little extra in case Size is zero,
|
||||||
|
// and to avoid another allocation after Read has filled the buffer. The readAll
|
||||||
|
// call will read into its allocated internal buffer cheaply. If the size was
|
||||||
|
// wrong, we'll either waste some space off the end or reallocate as needed, but
|
||||||
|
// in the overwhelmingly common case we'll get it just right.
|
||||||
|
return readAll(f, n+bytes.MinRead)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readAll reads from r until an error or EOF and returns the data it read
|
||||||
|
// from the internal buffer allocated with a specified capacity.
|
||||||
|
func readAll(r io.Reader, capacity int64) (b []byte, err error) {
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, capacity))
|
||||||
|
// If the buffer overflows, we will get bytes.ErrTooLarge.
|
||||||
|
// Return that as an error. Any other panic remains.
|
||||||
|
defer func() {
|
||||||
|
e := recover()
|
||||||
|
if e == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {
|
||||||
|
err = panicErr
|
||||||
|
} else {
|
||||||
|
panic(e)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
_, err = buf.ReadFrom(r)
|
||||||
|
return buf.Bytes(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAll reads from r until an error or EOF and returns the data it read.
|
||||||
|
// A successful call returns err == nil, not err == EOF. Because ReadAll is
|
||||||
|
// defined to read from src until EOF, it does not treat an EOF from Read
|
||||||
|
// as an error to be reported.
|
||||||
|
func ReadAll(r io.Reader) ([]byte, error) {
|
||||||
|
return readAll(r, bytes.MinRead)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteFile writes data to a file named by filename.
|
||||||
|
// If the file does not exist, WriteFile creates it with permissions perm;
|
||||||
|
// otherwise WriteFile truncates it before writing.
|
||||||
|
func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||||
|
return WriteFile(a.Fs, filename, data, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error {
|
||||||
|
f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n, err := f.Write(data)
|
||||||
|
if err == nil && n < len(data) {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
}
|
||||||
|
if err1 := f.Close(); err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Random number state.
|
||||||
|
// We generate random temporary file names so that there's a good
|
||||||
|
// chance the file doesn't exist yet - keeps the number of tries in
|
||||||
|
// TempFile to a minimum.
|
||||||
|
var rand uint32
|
||||||
|
var randmu sync.Mutex
|
||||||
|
|
||||||
|
func reseed() uint32 {
|
||||||
|
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func nextRandom() string {
|
||||||
|
randmu.Lock()
|
||||||
|
r := rand
|
||||||
|
if r == 0 {
|
||||||
|
r = reseed()
|
||||||
|
}
|
||||||
|
r = r*1664525 + 1013904223 // constants from Numerical Recipes
|
||||||
|
rand = r
|
||||||
|
randmu.Unlock()
|
||||||
|
return strconv.Itoa(int(1e9 + r%1e9))[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempFile creates a new temporary file in the directory dir,
|
||||||
|
// opens the file for reading and writing, and returns the resulting *os.File.
|
||||||
|
// The filename is generated by taking pattern and adding a random
|
||||||
|
// string to the end. If pattern includes a "*", the random string
|
||||||
|
// replaces the last "*".
|
||||||
|
// If dir is the empty string, TempFile uses the default directory
|
||||||
|
// for temporary files (see os.TempDir).
|
||||||
|
// Multiple programs calling TempFile simultaneously
|
||||||
|
// will not choose the same file. The caller can use f.Name()
|
||||||
|
// to find the pathname of the file. It is the caller's responsibility
|
||||||
|
// to remove the file when no longer needed.
|
||||||
|
func (a Afero) TempFile(dir, pattern string) (f File, err error) {
|
||||||
|
return TempFile(a.Fs, dir, pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TempFile(fs Fs, dir, pattern string) (f File, err error) {
|
||||||
|
if dir == "" {
|
||||||
|
dir = os.TempDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
var prefix, suffix string
|
||||||
|
if pos := strings.LastIndex(pattern, "*"); pos != -1 {
|
||||||
|
prefix, suffix = pattern[:pos], pattern[pos+1:]
|
||||||
|
} else {
|
||||||
|
prefix = pattern
|
||||||
|
}
|
||||||
|
|
||||||
|
nconflict := 0
|
||||||
|
for i := 0; i < 10000; i++ {
|
||||||
|
name := filepath.Join(dir, prefix+nextRandom()+suffix)
|
||||||
|
f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
||||||
|
if os.IsExist(err) {
|
||||||
|
if nconflict++; nconflict > 10 {
|
||||||
|
randmu.Lock()
|
||||||
|
rand = reseed()
|
||||||
|
randmu.Unlock()
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TempDir creates a new temporary directory in the directory dir
|
||||||
|
// with a name beginning with prefix and returns the path of the
|
||||||
|
// new directory. If dir is the empty string, TempDir uses the
|
||||||
|
// default directory for temporary files (see os.TempDir).
|
||||||
|
// Multiple programs calling TempDir simultaneously
|
||||||
|
// will not choose the same directory. It is the caller's responsibility
|
||||||
|
// to remove the directory when no longer needed.
|
||||||
|
func (a Afero) TempDir(dir, prefix string) (name string, err error) {
|
||||||
|
return TempDir(a.Fs, dir, prefix)
|
||||||
|
}
|
||||||
|
func TempDir(fs Fs, dir, prefix string) (name string, err error) {
|
||||||
|
if dir == "" {
|
||||||
|
dir = os.TempDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
nconflict := 0
|
||||||
|
for i := 0; i < 10000; i++ {
|
||||||
|
try := filepath.Join(dir, prefix+nextRandom())
|
||||||
|
err = fs.Mkdir(try, 0700)
|
||||||
|
if os.IsExist(err) {
|
||||||
|
if nconflict++; nconflict > 10 {
|
||||||
|
randmu.Lock()
|
||||||
|
rand = reseed()
|
||||||
|
randmu.Unlock()
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
name = try
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
27
vendor/github.com/spf13/afero/lstater.go
generated
vendored
Normal file
27
vendor/github.com/spf13/afero/lstater.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
// Copyright © 2018 Steve Francia <spf@spf13.com>.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Lstater is an optional interface in Afero. It is only implemented by the
|
||||||
|
// filesystems saying so.
|
||||||
|
// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem.
|
||||||
|
// Else it will call Stat.
|
||||||
|
// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not.
|
||||||
|
type Lstater interface {
|
||||||
|
LstatIfPossible(name string) (os.FileInfo, bool, error)
|
||||||
|
}
|
110
vendor/github.com/spf13/afero/match.go
generated
vendored
Normal file
110
vendor/github.com/spf13/afero/match.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Glob returns the names of all files matching pattern or nil
|
||||||
|
// if there is no matching file. The syntax of patterns is the same
|
||||||
|
// as in Match. The pattern may describe hierarchical names such as
|
||||||
|
// /usr/*/bin/ed (assuming the Separator is '/').
|
||||||
|
//
|
||||||
|
// Glob ignores file system errors such as I/O errors reading directories.
|
||||||
|
// The only possible returned error is ErrBadPattern, when pattern
|
||||||
|
// is malformed.
|
||||||
|
//
|
||||||
|
// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
|
||||||
|
// built-ins from that package.
|
||||||
|
func Glob(fs Fs, pattern string) (matches []string, err error) {
|
||||||
|
if !hasMeta(pattern) {
|
||||||
|
// Lstat not supported by a ll filesystems.
|
||||||
|
if _, err = lstatIfPossible(fs, pattern); err != nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return []string{pattern}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, file := filepath.Split(pattern)
|
||||||
|
switch dir {
|
||||||
|
case "":
|
||||||
|
dir = "."
|
||||||
|
case string(filepath.Separator):
|
||||||
|
// nothing
|
||||||
|
default:
|
||||||
|
dir = dir[0 : len(dir)-1] // chop off trailing separator
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hasMeta(dir) {
|
||||||
|
return glob(fs, dir, file, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
var m []string
|
||||||
|
m, err = Glob(fs, dir)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, d := range m {
|
||||||
|
matches, err = glob(fs, d, file, matches)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// glob searches for files matching pattern in the directory dir
|
||||||
|
// and appends them to matches. If the directory cannot be
|
||||||
|
// opened, it returns the existing matches. New matches are
|
||||||
|
// added in lexicographical order.
|
||||||
|
func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
|
||||||
|
m = matches
|
||||||
|
fi, err := fs.Stat(dir)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !fi.IsDir() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d, err := fs.Open(dir)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer d.Close()
|
||||||
|
|
||||||
|
names, _ := d.Readdirnames(-1)
|
||||||
|
sort.Strings(names)
|
||||||
|
|
||||||
|
for _, n := range names {
|
||||||
|
matched, err := filepath.Match(pattern, n)
|
||||||
|
if err != nil {
|
||||||
|
return m, err
|
||||||
|
}
|
||||||
|
if matched {
|
||||||
|
m = append(m, filepath.Join(dir, n))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasMeta reports whether path contains any of the magic characters
|
||||||
|
// recognized by Match.
|
||||||
|
func hasMeta(path string) bool {
|
||||||
|
// TODO(niemeyer): Should other magic characters be added here?
|
||||||
|
return strings.ContainsAny(path, "*?[")
|
||||||
|
}
|
37
vendor/github.com/spf13/afero/mem/dir.go
generated
vendored
Normal file
37
vendor/github.com/spf13/afero/mem/dir.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package mem
|
||||||
|
|
||||||
|
type Dir interface {
|
||||||
|
Len() int
|
||||||
|
Names() []string
|
||||||
|
Files() []*FileData
|
||||||
|
Add(*FileData)
|
||||||
|
Remove(*FileData)
|
||||||
|
}
|
||||||
|
|
||||||
|
func RemoveFromMemDir(dir *FileData, f *FileData) {
|
||||||
|
dir.memDir.Remove(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
func AddToMemDir(dir *FileData, f *FileData) {
|
||||||
|
dir.memDir.Add(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitializeDir(d *FileData) {
|
||||||
|
if d.memDir == nil {
|
||||||
|
d.dir = true
|
||||||
|
d.memDir = &DirMap{}
|
||||||
|
}
|
||||||
|
}
|
43
vendor/github.com/spf13/afero/mem/dirmap.go
generated
vendored
Normal file
43
vendor/github.com/spf13/afero/mem/dirmap.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
// Copyright © 2015 Steve Francia <spf@spf13.com>.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package mem
|
||||||
|
|
||||||
|
import "sort"
|
||||||
|
|
||||||
|
type DirMap map[string]*FileData
|
||||||
|
|
||||||
|
func (m DirMap) Len() int { return len(m) }
|
||||||
|
func (m DirMap) Add(f *FileData) { m[f.name] = f }
|
||||||
|
func (m DirMap) Remove(f *FileData) { delete(m, f.name) }
|
||||||
|
func (m DirMap) Files() (files []*FileData) {
|
||||||
|
for _, f := range m {
|
||||||
|
files = append(files, f)
|
||||||
|
}
|
||||||
|
sort.Sort(filesSorter(files))
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
|
||||||
|
// implement sort.Interface for []*FileData
|
||||||
|
type filesSorter []*FileData
|
||||||
|
|
||||||
|
func (s filesSorter) Len() int { return len(s) }
|
||||||
|
func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name }
|
||||||
|
|
||||||
|
func (m DirMap) Names() (names []string) {
|
||||||
|
for x := range m {
|
||||||
|
names = append(names, x)
|
||||||
|
}
|
||||||
|
return names
|
||||||
|
}
|
338
vendor/github.com/spf13/afero/mem/file.go
generated
vendored
Normal file
338
vendor/github.com/spf13/afero/mem/file.go
generated
vendored
Normal file
@ -0,0 +1,338 @@
|
|||||||
|
// Copyright © 2015 Steve Francia <spf@spf13.com>.
|
||||||
|
// Copyright 2013 tsuru authors. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package mem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const FilePathSeparator = string(filepath.Separator)
|
||||||
|
|
||||||
|
type File struct {
|
||||||
|
// atomic requires 64-bit alignment for struct field access
|
||||||
|
at int64
|
||||||
|
readDirCount int64
|
||||||
|
closed bool
|
||||||
|
readOnly bool
|
||||||
|
fileData *FileData
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFileHandle(data *FileData) *File {
|
||||||
|
return &File{fileData: data}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewReadOnlyFileHandle(data *FileData) *File {
|
||||||
|
return &File{fileData: data, readOnly: true}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) Data() *FileData {
|
||||||
|
return f.fileData
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileData struct {
|
||||||
|
sync.Mutex
|
||||||
|
name string
|
||||||
|
data []byte
|
||||||
|
memDir Dir
|
||||||
|
dir bool
|
||||||
|
mode os.FileMode
|
||||||
|
modtime time.Time
|
||||||
|
uid int
|
||||||
|
gid int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *FileData) Name() string {
|
||||||
|
d.Lock()
|
||||||
|
defer d.Unlock()
|
||||||
|
return d.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateFile(name string) *FileData {
|
||||||
|
return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateDir(name string) *FileData {
|
||||||
|
return &FileData{name: name, memDir: &DirMap{}, dir: true}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ChangeFileName(f *FileData, newname string) {
|
||||||
|
f.Lock()
|
||||||
|
f.name = newname
|
||||||
|
f.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetMode(f *FileData, mode os.FileMode) {
|
||||||
|
f.Lock()
|
||||||
|
f.mode = mode
|
||||||
|
f.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetModTime(f *FileData, mtime time.Time) {
|
||||||
|
f.Lock()
|
||||||
|
setModTime(f, mtime)
|
||||||
|
f.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func setModTime(f *FileData, mtime time.Time) {
|
||||||
|
f.modtime = mtime
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetUID(f *FileData, uid int) {
|
||||||
|
f.Lock()
|
||||||
|
f.uid = uid
|
||||||
|
f.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetGID(f *FileData, gid int) {
|
||||||
|
f.Lock()
|
||||||
|
f.gid = gid
|
||||||
|
f.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetFileInfo(f *FileData) *FileInfo {
|
||||||
|
return &FileInfo{f}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Open() error {
|
||||||
|
atomic.StoreInt64(&f.at, 0)
|
||||||
|
atomic.StoreInt64(&f.readDirCount, 0)
|
||||||
|
f.fileData.Lock()
|
||||||
|
f.closed = false
|
||||||
|
f.fileData.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Close() error {
|
||||||
|
f.fileData.Lock()
|
||||||
|
f.closed = true
|
||||||
|
if !f.readOnly {
|
||||||
|
setModTime(f.fileData, time.Now())
|
||||||
|
}
|
||||||
|
f.fileData.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Name() string {
|
||||||
|
return f.fileData.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Stat() (os.FileInfo, error) {
|
||||||
|
return &FileInfo{f.fileData}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Sync() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
|
||||||
|
if !f.fileData.dir {
|
||||||
|
return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")}
|
||||||
|
}
|
||||||
|
var outLength int64
|
||||||
|
|
||||||
|
f.fileData.Lock()
|
||||||
|
files := f.fileData.memDir.Files()[f.readDirCount:]
|
||||||
|
if count > 0 {
|
||||||
|
if len(files) < count {
|
||||||
|
outLength = int64(len(files))
|
||||||
|
} else {
|
||||||
|
outLength = int64(count)
|
||||||
|
}
|
||||||
|
if len(files) == 0 {
|
||||||
|
err = io.EOF
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
outLength = int64(len(files))
|
||||||
|
}
|
||||||
|
f.readDirCount += outLength
|
||||||
|
f.fileData.Unlock()
|
||||||
|
|
||||||
|
res = make([]os.FileInfo, outLength)
|
||||||
|
for i := range res {
|
||||||
|
res[i] = &FileInfo{files[i]}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Readdirnames(n int) (names []string, err error) {
|
||||||
|
fi, err := f.Readdir(n)
|
||||||
|
names = make([]string, len(fi))
|
||||||
|
for i, f := range fi {
|
||||||
|
_, names[i] = filepath.Split(f.Name())
|
||||||
|
}
|
||||||
|
return names, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Read(b []byte) (n int, err error) {
|
||||||
|
f.fileData.Lock()
|
||||||
|
defer f.fileData.Unlock()
|
||||||
|
if f.closed == true {
|
||||||
|
return 0, ErrFileClosed
|
||||||
|
}
|
||||||
|
if len(b) > 0 && int(f.at) == len(f.fileData.data) {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
if int(f.at) > len(f.fileData.data) {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if len(f.fileData.data)-int(f.at) >= len(b) {
|
||||||
|
n = len(b)
|
||||||
|
} else {
|
||||||
|
n = len(f.fileData.data) - int(f.at)
|
||||||
|
}
|
||||||
|
copy(b, f.fileData.data[f.at:f.at+int64(n)])
|
||||||
|
atomic.AddInt64(&f.at, int64(n))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
|
||||||
|
prev := atomic.LoadInt64(&f.at)
|
||||||
|
atomic.StoreInt64(&f.at, off)
|
||||||
|
n, err = f.Read(b)
|
||||||
|
atomic.StoreInt64(&f.at, prev)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Truncate(size int64) error {
|
||||||
|
if f.closed == true {
|
||||||
|
return ErrFileClosed
|
||||||
|
}
|
||||||
|
if f.readOnly {
|
||||||
|
return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")}
|
||||||
|
}
|
||||||
|
if size < 0 {
|
||||||
|
return ErrOutOfRange
|
||||||
|
}
|
||||||
|
f.fileData.Lock()
|
||||||
|
defer f.fileData.Unlock()
|
||||||
|
if size > int64(len(f.fileData.data)) {
|
||||||
|
diff := size - int64(len(f.fileData.data))
|
||||||
|
f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...)
|
||||||
|
} else {
|
||||||
|
f.fileData.data = f.fileData.data[0:size]
|
||||||
|
}
|
||||||
|
setModTime(f.fileData, time.Now())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
if f.closed == true {
|
||||||
|
return 0, ErrFileClosed
|
||||||
|
}
|
||||||
|
switch whence {
|
||||||
|
case io.SeekStart:
|
||||||
|
atomic.StoreInt64(&f.at, offset)
|
||||||
|
case io.SeekCurrent:
|
||||||
|
atomic.AddInt64(&f.at, offset)
|
||||||
|
case io.SeekEnd:
|
||||||
|
atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset)
|
||||||
|
}
|
||||||
|
return f.at, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Write(b []byte) (n int, err error) {
|
||||||
|
if f.closed == true {
|
||||||
|
return 0, ErrFileClosed
|
||||||
|
}
|
||||||
|
if f.readOnly {
|
||||||
|
return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
|
||||||
|
}
|
||||||
|
n = len(b)
|
||||||
|
cur := atomic.LoadInt64(&f.at)
|
||||||
|
f.fileData.Lock()
|
||||||
|
defer f.fileData.Unlock()
|
||||||
|
diff := cur - int64(len(f.fileData.data))
|
||||||
|
var tail []byte
|
||||||
|
if n+int(cur) < len(f.fileData.data) {
|
||||||
|
tail = f.fileData.data[n+int(cur):]
|
||||||
|
}
|
||||||
|
if diff > 0 {
|
||||||
|
f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{00}, int(diff)), b...)...)
|
||||||
|
f.fileData.data = append(f.fileData.data, tail...)
|
||||||
|
} else {
|
||||||
|
f.fileData.data = append(f.fileData.data[:cur], b...)
|
||||||
|
f.fileData.data = append(f.fileData.data, tail...)
|
||||||
|
}
|
||||||
|
setModTime(f.fileData, time.Now())
|
||||||
|
|
||||||
|
atomic.AddInt64(&f.at, int64(n))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
|
||||||
|
atomic.StoreInt64(&f.at, off)
|
||||||
|
return f.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) WriteString(s string) (ret int, err error) {
|
||||||
|
return f.Write([]byte(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Info() *FileInfo {
|
||||||
|
return &FileInfo{f.fileData}
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileInfo struct {
|
||||||
|
*FileData
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implements os.FileInfo
|
||||||
|
func (s *FileInfo) Name() string {
|
||||||
|
s.Lock()
|
||||||
|
_, name := filepath.Split(s.name)
|
||||||
|
s.Unlock()
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
func (s *FileInfo) Mode() os.FileMode {
|
||||||
|
s.Lock()
|
||||||
|
defer s.Unlock()
|
||||||
|
return s.mode
|
||||||
|
}
|
||||||
|
func (s *FileInfo) ModTime() time.Time {
|
||||||
|
s.Lock()
|
||||||
|
defer s.Unlock()
|
||||||
|
return s.modtime
|
||||||
|
}
|
||||||
|
func (s *FileInfo) IsDir() bool {
|
||||||
|
s.Lock()
|
||||||
|
defer s.Unlock()
|
||||||
|
return s.dir
|
||||||
|
}
|
||||||
|
func (s *FileInfo) Sys() interface{} { return nil }
|
||||||
|
func (s *FileInfo) Size() int64 {
|
||||||
|
if s.IsDir() {
|
||||||
|
return int64(42)
|
||||||
|
}
|
||||||
|
s.Lock()
|
||||||
|
defer s.Unlock()
|
||||||
|
return int64(len(s.data))
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrFileClosed = errors.New("File is closed")
|
||||||
|
ErrOutOfRange = errors.New("Out of range")
|
||||||
|
ErrTooLarge = errors.New("Too large")
|
||||||
|
ErrFileNotFound = os.ErrNotExist
|
||||||
|
ErrFileExists = os.ErrExist
|
||||||
|
ErrDestinationExists = os.ErrExist
|
||||||
|
)
|
404
vendor/github.com/spf13/afero/memmap.go
generated
vendored
Normal file
404
vendor/github.com/spf13/afero/memmap.go
generated
vendored
Normal file
@ -0,0 +1,404 @@
|
|||||||
|
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/spf13/afero/mem"
|
||||||
|
)
|
||||||
|
|
||||||
|
const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod()
|
||||||
|
|
||||||
|
type MemMapFs struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
data map[string]*mem.FileData
|
||||||
|
init sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMemMapFs() Fs {
|
||||||
|
return &MemMapFs{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) getData() map[string]*mem.FileData {
|
||||||
|
m.init.Do(func() {
|
||||||
|
m.data = make(map[string]*mem.FileData)
|
||||||
|
// Root should always exist, right?
|
||||||
|
// TODO: what about windows?
|
||||||
|
root := mem.CreateDir(FilePathSeparator)
|
||||||
|
mem.SetMode(root, os.ModeDir|0755)
|
||||||
|
m.data[FilePathSeparator] = root
|
||||||
|
})
|
||||||
|
return m.data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*MemMapFs) Name() string { return "MemMapFS" }
|
||||||
|
|
||||||
|
func (m *MemMapFs) Create(name string) (File, error) {
|
||||||
|
name = normalizePath(name)
|
||||||
|
m.mu.Lock()
|
||||||
|
file := mem.CreateFile(name)
|
||||||
|
m.getData()[name] = file
|
||||||
|
m.registerWithParent(file, 0)
|
||||||
|
m.mu.Unlock()
|
||||||
|
return mem.NewFileHandle(file), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) unRegisterWithParent(fileName string) error {
|
||||||
|
f, err := m.lockfreeOpen(fileName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
parent := m.findParent(f)
|
||||||
|
if parent == nil {
|
||||||
|
log.Panic("parent of ", f.Name(), " is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
parent.Lock()
|
||||||
|
mem.RemoveFromMemDir(parent, f)
|
||||||
|
parent.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData {
|
||||||
|
pdir, _ := filepath.Split(f.Name())
|
||||||
|
pdir = filepath.Clean(pdir)
|
||||||
|
pfile, err := m.lockfreeOpen(pdir)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return pfile
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) {
|
||||||
|
if f == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
parent := m.findParent(f)
|
||||||
|
if parent == nil {
|
||||||
|
pdir := filepath.Dir(filepath.Clean(f.Name()))
|
||||||
|
err := m.lockfreeMkdir(pdir, perm)
|
||||||
|
if err != nil {
|
||||||
|
//log.Println("Mkdir error:", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
parent, err = m.lockfreeOpen(pdir)
|
||||||
|
if err != nil {
|
||||||
|
//log.Println("Open after Mkdir error:", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
parent.Lock()
|
||||||
|
mem.InitializeDir(parent)
|
||||||
|
mem.AddToMemDir(parent, f)
|
||||||
|
parent.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
|
||||||
|
name = normalizePath(name)
|
||||||
|
x, ok := m.getData()[name]
|
||||||
|
if ok {
|
||||||
|
// Only return ErrFileExists if it's a file, not a directory.
|
||||||
|
i := mem.FileInfo{FileData: x}
|
||||||
|
if !i.IsDir() {
|
||||||
|
return ErrFileExists
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
item := mem.CreateDir(name)
|
||||||
|
mem.SetMode(item, os.ModeDir|perm)
|
||||||
|
m.getData()[name] = item
|
||||||
|
m.registerWithParent(item, perm)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
|
||||||
|
perm &= chmodBits
|
||||||
|
name = normalizePath(name)
|
||||||
|
|
||||||
|
m.mu.RLock()
|
||||||
|
_, ok := m.getData()[name]
|
||||||
|
m.mu.RUnlock()
|
||||||
|
if ok {
|
||||||
|
return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mu.Lock()
|
||||||
|
item := mem.CreateDir(name)
|
||||||
|
mem.SetMode(item, os.ModeDir|perm)
|
||||||
|
m.getData()[name] = item
|
||||||
|
m.registerWithParent(item, perm)
|
||||||
|
m.mu.Unlock()
|
||||||
|
|
||||||
|
return m.setFileMode(name, perm|os.ModeDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
|
||||||
|
err := m.Mkdir(path, perm)
|
||||||
|
if err != nil {
|
||||||
|
if err.(*os.PathError).Err == ErrFileExists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle some relative paths
|
||||||
|
func normalizePath(path string) string {
|
||||||
|
path = filepath.Clean(path)
|
||||||
|
|
||||||
|
switch path {
|
||||||
|
case ".":
|
||||||
|
return FilePathSeparator
|
||||||
|
case "..":
|
||||||
|
return FilePathSeparator
|
||||||
|
default:
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) Open(name string) (File, error) {
|
||||||
|
f, err := m.open(name)
|
||||||
|
if f != nil {
|
||||||
|
return mem.NewReadOnlyFileHandle(f), err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) openWrite(name string) (File, error) {
|
||||||
|
f, err := m.open(name)
|
||||||
|
if f != nil {
|
||||||
|
return mem.NewFileHandle(f), err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) open(name string) (*mem.FileData, error) {
|
||||||
|
name = normalizePath(name)
|
||||||
|
|
||||||
|
m.mu.RLock()
|
||||||
|
f, ok := m.getData()[name]
|
||||||
|
m.mu.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound}
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) {
|
||||||
|
name = normalizePath(name)
|
||||||
|
f, ok := m.getData()[name]
|
||||||
|
if ok {
|
||||||
|
return f, nil
|
||||||
|
} else {
|
||||||
|
return nil, ErrFileNotFound
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
|
||||||
|
perm &= chmodBits
|
||||||
|
chmod := false
|
||||||
|
file, err := m.openWrite(name)
|
||||||
|
if err == nil && (flag&os.O_EXCL > 0) {
|
||||||
|
return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists}
|
||||||
|
}
|
||||||
|
if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
|
||||||
|
file, err = m.Create(name)
|
||||||
|
chmod = true
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if flag == os.O_RDONLY {
|
||||||
|
file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data())
|
||||||
|
}
|
||||||
|
if flag&os.O_APPEND > 0 {
|
||||||
|
_, err = file.Seek(0, os.SEEK_END)
|
||||||
|
if err != nil {
|
||||||
|
file.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 {
|
||||||
|
err = file.Truncate(0)
|
||||||
|
if err != nil {
|
||||||
|
file.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if chmod {
|
||||||
|
return file, m.setFileMode(name, perm)
|
||||||
|
}
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) Remove(name string) error {
|
||||||
|
name = normalizePath(name)
|
||||||
|
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
|
if _, ok := m.getData()[name]; ok {
|
||||||
|
err := m.unRegisterWithParent(name)
|
||||||
|
if err != nil {
|
||||||
|
return &os.PathError{Op: "remove", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
delete(m.getData(), name)
|
||||||
|
} else {
|
||||||
|
return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) RemoveAll(path string) error {
|
||||||
|
path = normalizePath(path)
|
||||||
|
m.mu.Lock()
|
||||||
|
m.unRegisterWithParent(path)
|
||||||
|
m.mu.Unlock()
|
||||||
|
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
|
||||||
|
for p := range m.getData() {
|
||||||
|
if strings.HasPrefix(p, path) {
|
||||||
|
m.mu.RUnlock()
|
||||||
|
m.mu.Lock()
|
||||||
|
delete(m.getData(), p)
|
||||||
|
m.mu.Unlock()
|
||||||
|
m.mu.RLock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) Rename(oldname, newname string) error {
|
||||||
|
oldname = normalizePath(oldname)
|
||||||
|
newname = normalizePath(newname)
|
||||||
|
|
||||||
|
if oldname == newname {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
if _, ok := m.getData()[oldname]; ok {
|
||||||
|
m.mu.RUnlock()
|
||||||
|
m.mu.Lock()
|
||||||
|
m.unRegisterWithParent(oldname)
|
||||||
|
fileData := m.getData()[oldname]
|
||||||
|
delete(m.getData(), oldname)
|
||||||
|
mem.ChangeFileName(fileData, newname)
|
||||||
|
m.getData()[newname] = fileData
|
||||||
|
m.registerWithParent(fileData, 0)
|
||||||
|
m.mu.Unlock()
|
||||||
|
m.mu.RLock()
|
||||||
|
} else {
|
||||||
|
return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
|
||||||
|
fileInfo, err := m.Stat(name)
|
||||||
|
return fileInfo, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
|
||||||
|
f, err := m.Open(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fi := mem.GetFileInfo(f.(*mem.File).Data())
|
||||||
|
return fi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
|
||||||
|
mode &= chmodBits
|
||||||
|
|
||||||
|
m.mu.RLock()
|
||||||
|
f, ok := m.getData()[name]
|
||||||
|
m.mu.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
|
||||||
|
}
|
||||||
|
prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits
|
||||||
|
|
||||||
|
mode = prevOtherBits | mode
|
||||||
|
return m.setFileMode(name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error {
|
||||||
|
name = normalizePath(name)
|
||||||
|
|
||||||
|
m.mu.RLock()
|
||||||
|
f, ok := m.getData()[name]
|
||||||
|
m.mu.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mu.Lock()
|
||||||
|
mem.SetMode(f, mode)
|
||||||
|
m.mu.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) Chown(name string, uid, gid int) error {
|
||||||
|
name = normalizePath(name)
|
||||||
|
|
||||||
|
m.mu.RLock()
|
||||||
|
f, ok := m.getData()[name]
|
||||||
|
m.mu.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
return &os.PathError{Op: "chown", Path: name, Err: ErrFileNotFound}
|
||||||
|
}
|
||||||
|
|
||||||
|
mem.SetUID(f, uid)
|
||||||
|
mem.SetGID(f, gid)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
|
||||||
|
name = normalizePath(name)
|
||||||
|
|
||||||
|
m.mu.RLock()
|
||||||
|
f, ok := m.getData()[name]
|
||||||
|
m.mu.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound}
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mu.Lock()
|
||||||
|
mem.SetModTime(f, mtime)
|
||||||
|
m.mu.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemMapFs) List() {
|
||||||
|
for _, x := range m.data {
|
||||||
|
y := mem.FileInfo{FileData: x}
|
||||||
|
fmt.Println(x.Name(), y.Size())
|
||||||
|
}
|
||||||
|
}
|
113
vendor/github.com/spf13/afero/os.go
generated
vendored
Normal file
113
vendor/github.com/spf13/afero/os.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||||
|
// Copyright 2013 tsuru authors. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ Lstater = (*OsFs)(nil)
|
||||||
|
|
||||||
|
// OsFs is a Fs implementation that uses functions provided by the os package.
|
||||||
|
//
|
||||||
|
// For details in any method, check the documentation of the os package
|
||||||
|
// (http://golang.org/pkg/os/).
|
||||||
|
type OsFs struct{}
|
||||||
|
|
||||||
|
func NewOsFs() Fs {
|
||||||
|
return &OsFs{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) Name() string { return "OsFs" }
|
||||||
|
|
||||||
|
func (OsFs) Create(name string) (File, error) {
|
||||||
|
f, e := os.Create(name)
|
||||||
|
if f == nil {
|
||||||
|
// while this looks strange, we need to return a bare nil (of type nil) not
|
||||||
|
// a nil value of type *os.File or nil won't be nil
|
||||||
|
return nil, e
|
||||||
|
}
|
||||||
|
return f, e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) Mkdir(name string, perm os.FileMode) error {
|
||||||
|
return os.Mkdir(name, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) MkdirAll(path string, perm os.FileMode) error {
|
||||||
|
return os.MkdirAll(path, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) Open(name string) (File, error) {
|
||||||
|
f, e := os.Open(name)
|
||||||
|
if f == nil {
|
||||||
|
// while this looks strange, we need to return a bare nil (of type nil) not
|
||||||
|
// a nil value of type *os.File or nil won't be nil
|
||||||
|
return nil, e
|
||||||
|
}
|
||||||
|
return f, e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
|
||||||
|
f, e := os.OpenFile(name, flag, perm)
|
||||||
|
if f == nil {
|
||||||
|
// while this looks strange, we need to return a bare nil (of type nil) not
|
||||||
|
// a nil value of type *os.File or nil won't be nil
|
||||||
|
return nil, e
|
||||||
|
}
|
||||||
|
return f, e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) Remove(name string) error {
|
||||||
|
return os.Remove(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) RemoveAll(path string) error {
|
||||||
|
return os.RemoveAll(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) Rename(oldname, newname string) error {
|
||||||
|
return os.Rename(oldname, newname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) Stat(name string) (os.FileInfo, error) {
|
||||||
|
return os.Stat(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) Chmod(name string, mode os.FileMode) error {
|
||||||
|
return os.Chmod(name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) Chown(name string, uid, gid int) error {
|
||||||
|
return os.Chown(name, uid, gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
|
||||||
|
return os.Chtimes(name, atime, mtime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
|
||||||
|
fi, err := os.Lstat(name)
|
||||||
|
return fi, true, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) SymlinkIfPossible(oldname, newname string) error {
|
||||||
|
return os.Symlink(oldname, newname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (OsFs) ReadlinkIfPossible(name string) (string, error) {
|
||||||
|
return os.Readlink(name)
|
||||||
|
}
|
106
vendor/github.com/spf13/afero/path.go
generated
vendored
Normal file
106
vendor/github.com/spf13/afero/path.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
// Copyright ©2015 The Go Authors
|
||||||
|
// Copyright ©2015 Steve Francia <spf@spf13.com>
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// readDirNames reads the directory named by dirname and returns
|
||||||
|
// a sorted list of directory entries.
|
||||||
|
// adapted from https://golang.org/src/path/filepath/path.go
|
||||||
|
func readDirNames(fs Fs, dirname string) ([]string, error) {
|
||||||
|
f, err := fs.Open(dirname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
names, err := f.Readdirnames(-1)
|
||||||
|
f.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
return names, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// walk recursively descends path, calling walkFn
|
||||||
|
// adapted from https://golang.org/src/path/filepath/path.go
|
||||||
|
func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
|
||||||
|
err := walkFn(path, info, nil)
|
||||||
|
if err != nil {
|
||||||
|
if info.IsDir() && err == filepath.SkipDir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !info.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
names, err := readDirNames(fs, path)
|
||||||
|
if err != nil {
|
||||||
|
return walkFn(path, info, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, name := range names {
|
||||||
|
filename := filepath.Join(path, name)
|
||||||
|
fileInfo, err := lstatIfPossible(fs, filename)
|
||||||
|
if err != nil {
|
||||||
|
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err = walk(fs, filename, fileInfo, walkFn)
|
||||||
|
if err != nil {
|
||||||
|
if !fileInfo.IsDir() || err != filepath.SkipDir {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the filesystem supports it, use Lstat, else use fs.Stat
|
||||||
|
func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) {
|
||||||
|
if lfs, ok := fs.(Lstater); ok {
|
||||||
|
fi, _, err := lfs.LstatIfPossible(path)
|
||||||
|
return fi, err
|
||||||
|
}
|
||||||
|
return fs.Stat(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk walks the file tree rooted at root, calling walkFn for each file or
|
||||||
|
// directory in the tree, including root. All errors that arise visiting files
|
||||||
|
// and directories are filtered by walkFn. The files are walked in lexical
|
||||||
|
// order, which makes the output deterministic but means that for very
|
||||||
|
// large directories Walk can be inefficient.
|
||||||
|
// Walk does not follow symbolic links.
|
||||||
|
|
||||||
|
func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error {
|
||||||
|
return Walk(a.Fs, root, walkFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error {
|
||||||
|
info, err := lstatIfPossible(fs, root)
|
||||||
|
if err != nil {
|
||||||
|
return walkFn(root, nil, err)
|
||||||
|
}
|
||||||
|
return walk(fs, root, info, walkFn)
|
||||||
|
}
|
96
vendor/github.com/spf13/afero/readonlyfs.go
generated
vendored
Normal file
96
vendor/github.com/spf13/afero/readonlyfs.go
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ Lstater = (*ReadOnlyFs)(nil)
|
||||||
|
|
||||||
|
type ReadOnlyFs struct {
|
||||||
|
source Fs
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewReadOnlyFs(source Fs) Fs {
|
||||||
|
return &ReadOnlyFs{source: source}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) {
|
||||||
|
return ReadDir(r.source, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error {
|
||||||
|
return syscall.EPERM
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error {
|
||||||
|
return syscall.EPERM
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) Chown(n string, uid, gid int) error {
|
||||||
|
return syscall.EPERM
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) Name() string {
|
||||||
|
return "ReadOnlyFilter"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) {
|
||||||
|
return r.source.Stat(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
|
||||||
|
if lsf, ok := r.source.(Lstater); ok {
|
||||||
|
return lsf.LstatIfPossible(name)
|
||||||
|
}
|
||||||
|
fi, err := r.Stat(name)
|
||||||
|
return fi, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error {
|
||||||
|
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) {
|
||||||
|
if srdr, ok := r.source.(LinkReader); ok {
|
||||||
|
return srdr.ReadlinkIfPossible(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) Rename(o, n string) error {
|
||||||
|
return syscall.EPERM
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) RemoveAll(p string) error {
|
||||||
|
return syscall.EPERM
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) Remove(n string) error {
|
||||||
|
return syscall.EPERM
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
|
||||||
|
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
|
||||||
|
return nil, syscall.EPERM
|
||||||
|
}
|
||||||
|
return r.source.OpenFile(name, flag, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) Open(n string) (File, error) {
|
||||||
|
return r.source.Open(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error {
|
||||||
|
return syscall.EPERM
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error {
|
||||||
|
return syscall.EPERM
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadOnlyFs) Create(n string) (File, error) {
|
||||||
|
return nil, syscall.EPERM
|
||||||
|
}
|
224
vendor/github.com/spf13/afero/regexpfs.go
generated
vendored
Normal file
224
vendor/github.com/spf13/afero/regexpfs.go
generated
vendored
Normal file
@ -0,0 +1,224 @@
|
|||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The RegexpFs filters files (not directories) by regular expression. Only
|
||||||
|
// files matching the given regexp will be allowed, all others get a ENOENT error (
|
||||||
|
// "No such file or directory").
|
||||||
|
//
|
||||||
|
type RegexpFs struct {
|
||||||
|
re *regexp.Regexp
|
||||||
|
source Fs
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRegexpFs(source Fs, re *regexp.Regexp) Fs {
|
||||||
|
return &RegexpFs{source: source, re: re}
|
||||||
|
}
|
||||||
|
|
||||||
|
type RegexpFile struct {
|
||||||
|
f File
|
||||||
|
re *regexp.Regexp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) matchesName(name string) error {
|
||||||
|
if r.re == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if r.re.MatchString(name) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return syscall.ENOENT
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) dirOrMatches(name string) error {
|
||||||
|
dir, err := IsDir(r.source, name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if dir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return r.matchesName(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) Chtimes(name string, a, m time.Time) error {
|
||||||
|
if err := r.dirOrMatches(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return r.source.Chtimes(name, a, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) Chmod(name string, mode os.FileMode) error {
|
||||||
|
if err := r.dirOrMatches(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return r.source.Chmod(name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) Chown(name string, uid, gid int) error {
|
||||||
|
if err := r.dirOrMatches(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return r.source.Chown(name, uid, gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) Name() string {
|
||||||
|
return "RegexpFs"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) Stat(name string) (os.FileInfo, error) {
|
||||||
|
if err := r.dirOrMatches(name); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return r.source.Stat(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) Rename(oldname, newname string) error {
|
||||||
|
dir, err := IsDir(r.source, oldname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if dir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := r.matchesName(oldname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := r.matchesName(newname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return r.source.Rename(oldname, newname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) RemoveAll(p string) error {
|
||||||
|
dir, err := IsDir(r.source, p)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !dir {
|
||||||
|
if err := r.matchesName(p); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r.source.RemoveAll(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) Remove(name string) error {
|
||||||
|
if err := r.dirOrMatches(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return r.source.Remove(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
|
||||||
|
if err := r.dirOrMatches(name); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return r.source.OpenFile(name, flag, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) Open(name string) (File, error) {
|
||||||
|
dir, err := IsDir(r.source, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !dir {
|
||||||
|
if err := r.matchesName(name); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f, err := r.source.Open(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &RegexpFile{f: f, re: r.re}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) Mkdir(n string, p os.FileMode) error {
|
||||||
|
return r.source.Mkdir(n, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error {
|
||||||
|
return r.source.MkdirAll(n, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpFs) Create(name string) (File, error) {
|
||||||
|
if err := r.matchesName(name); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return r.source.Create(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *RegexpFile) Close() error {
|
||||||
|
return f.f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *RegexpFile) Read(s []byte) (int, error) {
|
||||||
|
return f.f.Read(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) {
|
||||||
|
return f.f.ReadAt(s, o)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *RegexpFile) Seek(o int64, w int) (int64, error) {
|
||||||
|
return f.f.Seek(o, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *RegexpFile) Write(s []byte) (int, error) {
|
||||||
|
return f.f.Write(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) {
|
||||||
|
return f.f.WriteAt(s, o)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *RegexpFile) Name() string {
|
||||||
|
return f.f.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) {
|
||||||
|
var rfi []os.FileInfo
|
||||||
|
rfi, err = f.f.Readdir(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, i := range rfi {
|
||||||
|
if i.IsDir() || f.re.MatchString(i.Name()) {
|
||||||
|
fi = append(fi, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *RegexpFile) Readdirnames(c int) (n []string, err error) {
|
||||||
|
fi, err := f.Readdir(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, s := range fi {
|
||||||
|
n = append(n, s.Name())
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *RegexpFile) Stat() (os.FileInfo, error) {
|
||||||
|
return f.f.Stat()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *RegexpFile) Sync() error {
|
||||||
|
return f.f.Sync()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *RegexpFile) Truncate(s int64) error {
|
||||||
|
return f.f.Truncate(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *RegexpFile) WriteString(s string) (int, error) {
|
||||||
|
return f.f.WriteString(s)
|
||||||
|
}
|
55
vendor/github.com/spf13/afero/symlink.go
generated
vendored
Normal file
55
vendor/github.com/spf13/afero/symlink.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
// Copyright © 2018 Steve Francia <spf@spf13.com>.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Symlinker is an optional interface in Afero. It is only implemented by the
|
||||||
|
// filesystems saying so.
|
||||||
|
// It indicates support for 3 symlink related interfaces that implement the
|
||||||
|
// behaviors of the os methods:
|
||||||
|
// - Lstat
|
||||||
|
// - Symlink, and
|
||||||
|
// - Readlink
|
||||||
|
type Symlinker interface {
|
||||||
|
Lstater
|
||||||
|
Linker
|
||||||
|
LinkReader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Linker is an optional interface in Afero. It is only implemented by the
|
||||||
|
// filesystems saying so.
|
||||||
|
// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem,
|
||||||
|
// or the filesystem otherwise supports Symlink's.
|
||||||
|
type Linker interface {
|
||||||
|
SymlinkIfPossible(oldname, newname string) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system
|
||||||
|
// does not support Symlink's either directly or through its delegated filesystem.
|
||||||
|
// As expressed by support for the Linker interface.
|
||||||
|
var ErrNoSymlink = errors.New("symlink not supported")
|
||||||
|
|
||||||
|
// LinkReader is an optional interface in Afero. It is only implemented by the
|
||||||
|
// filesystems saying so.
|
||||||
|
type LinkReader interface {
|
||||||
|
ReadlinkIfPossible(name string) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system
|
||||||
|
// does not support the readlink operation either directly or through its delegated filesystem.
|
||||||
|
// As expressed by support for the LinkReader interface.
|
||||||
|
var ErrNoReadlink = errors.New("readlink not supported")
|
331
vendor/github.com/spf13/afero/unionFile.go
generated
vendored
Normal file
331
vendor/github.com/spf13/afero/unionFile.go
generated
vendored
Normal file
@ -0,0 +1,331 @@
|
|||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The UnionFile implements the afero.File interface and will be returned
|
||||||
|
// when reading a directory present at least in the overlay or opening a file
|
||||||
|
// for writing.
|
||||||
|
//
|
||||||
|
// The calls to
|
||||||
|
// Readdir() and Readdirnames() merge the file os.FileInfo / names from the
|
||||||
|
// base and the overlay - for files present in both layers, only those
|
||||||
|
// from the overlay will be used.
|
||||||
|
//
|
||||||
|
// When opening files for writing (Create() / OpenFile() with the right flags)
|
||||||
|
// the operations will be done in both layers, starting with the overlay. A
|
||||||
|
// successful read in the overlay will move the cursor position in the base layer
|
||||||
|
// by the number of bytes read.
|
||||||
|
type UnionFile struct {
|
||||||
|
Base File
|
||||||
|
Layer File
|
||||||
|
Merger DirsMerger
|
||||||
|
off int
|
||||||
|
files []os.FileInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *UnionFile) Close() error {
|
||||||
|
// first close base, so we have a newer timestamp in the overlay. If we'd close
|
||||||
|
// the overlay first, we'd get a cacheStale the next time we access this file
|
||||||
|
// -> cache would be useless ;-)
|
||||||
|
if f.Base != nil {
|
||||||
|
f.Base.Close()
|
||||||
|
}
|
||||||
|
if f.Layer != nil {
|
||||||
|
return f.Layer.Close()
|
||||||
|
}
|
||||||
|
return BADFD
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *UnionFile) Read(s []byte) (int, error) {
|
||||||
|
if f.Layer != nil {
|
||||||
|
n, err := f.Layer.Read(s)
|
||||||
|
if (err == nil || err == io.EOF) && f.Base != nil {
|
||||||
|
// advance the file position also in the base file, the next
|
||||||
|
// call may be a write at this position (or a seek with SEEK_CUR)
|
||||||
|
if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil {
|
||||||
|
// only overwrite err in case the seek fails: we need to
|
||||||
|
// report an eventual io.EOF to the caller
|
||||||
|
err = seekErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
if f.Base != nil {
|
||||||
|
return f.Base.Read(s)
|
||||||
|
}
|
||||||
|
return 0, BADFD
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) {
|
||||||
|
if f.Layer != nil {
|
||||||
|
n, err := f.Layer.ReadAt(s, o)
|
||||||
|
if (err == nil || err == io.EOF) && f.Base != nil {
|
||||||
|
_, err = f.Base.Seek(o+int64(n), os.SEEK_SET)
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
if f.Base != nil {
|
||||||
|
return f.Base.ReadAt(s, o)
|
||||||
|
}
|
||||||
|
return 0, BADFD
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) {
|
||||||
|
if f.Layer != nil {
|
||||||
|
pos, err = f.Layer.Seek(o, w)
|
||||||
|
if (err == nil || err == io.EOF) && f.Base != nil {
|
||||||
|
_, err = f.Base.Seek(o, w)
|
||||||
|
}
|
||||||
|
return pos, err
|
||||||
|
}
|
||||||
|
if f.Base != nil {
|
||||||
|
return f.Base.Seek(o, w)
|
||||||
|
}
|
||||||
|
return 0, BADFD
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *UnionFile) Write(s []byte) (n int, err error) {
|
||||||
|
if f.Layer != nil {
|
||||||
|
n, err = f.Layer.Write(s)
|
||||||
|
if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
|
||||||
|
_, err = f.Base.Write(s)
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
if f.Base != nil {
|
||||||
|
return f.Base.Write(s)
|
||||||
|
}
|
||||||
|
return 0, BADFD
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) {
|
||||||
|
if f.Layer != nil {
|
||||||
|
n, err = f.Layer.WriteAt(s, o)
|
||||||
|
if err == nil && f.Base != nil {
|
||||||
|
_, err = f.Base.WriteAt(s, o)
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
if f.Base != nil {
|
||||||
|
return f.Base.WriteAt(s, o)
|
||||||
|
}
|
||||||
|
return 0, BADFD
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *UnionFile) Name() string {
|
||||||
|
if f.Layer != nil {
|
||||||
|
return f.Layer.Name()
|
||||||
|
}
|
||||||
|
return f.Base.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirsMerger is how UnionFile weaves two directories together.
|
||||||
|
// It takes the FileInfo slices from the layer and the base and returns a
|
||||||
|
// single view.
|
||||||
|
type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error)
|
||||||
|
|
||||||
|
var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) {
|
||||||
|
var files = make(map[string]os.FileInfo)
|
||||||
|
|
||||||
|
for _, fi := range lofi {
|
||||||
|
files[fi.Name()] = fi
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fi := range bofi {
|
||||||
|
if _, exists := files[fi.Name()]; !exists {
|
||||||
|
files[fi.Name()] = fi
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rfi := make([]os.FileInfo, len(files))
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for _, fi := range files {
|
||||||
|
rfi[i] = fi
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
return rfi, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Readdir will weave the two directories together and
|
||||||
|
// return a single view of the overlayed directories.
|
||||||
|
// At the end of the directory view, the error is io.EOF if c > 0.
|
||||||
|
func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
|
||||||
|
var merge DirsMerger = f.Merger
|
||||||
|
if merge == nil {
|
||||||
|
merge = defaultUnionMergeDirsFn
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.off == 0 {
|
||||||
|
var lfi []os.FileInfo
|
||||||
|
if f.Layer != nil {
|
||||||
|
lfi, err = f.Layer.Readdir(-1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var bfi []os.FileInfo
|
||||||
|
if f.Base != nil {
|
||||||
|
bfi, err = f.Base.Readdir(-1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
merged, err := merge(lfi, bfi)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f.files = append(f.files, merged...)
|
||||||
|
}
|
||||||
|
files := f.files[f.off:]
|
||||||
|
|
||||||
|
if c <= 0 {
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(files) == 0 {
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
if c > len(files) {
|
||||||
|
c = len(files)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() { f.off += c }()
|
||||||
|
return files[:c], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *UnionFile) Readdirnames(c int) ([]string, error) {
|
||||||
|
rfi, err := f.Readdir(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var names []string
|
||||||
|
for _, fi := range rfi {
|
||||||
|
names = append(names, fi.Name())
|
||||||
|
}
|
||||||
|
return names, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *UnionFile) Stat() (os.FileInfo, error) {
|
||||||
|
if f.Layer != nil {
|
||||||
|
return f.Layer.Stat()
|
||||||
|
}
|
||||||
|
if f.Base != nil {
|
||||||
|
return f.Base.Stat()
|
||||||
|
}
|
||||||
|
return nil, BADFD
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *UnionFile) Sync() (err error) {
|
||||||
|
if f.Layer != nil {
|
||||||
|
err = f.Layer.Sync()
|
||||||
|
if err == nil && f.Base != nil {
|
||||||
|
err = f.Base.Sync()
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if f.Base != nil {
|
||||||
|
return f.Base.Sync()
|
||||||
|
}
|
||||||
|
return BADFD
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *UnionFile) Truncate(s int64) (err error) {
|
||||||
|
if f.Layer != nil {
|
||||||
|
err = f.Layer.Truncate(s)
|
||||||
|
if err == nil && f.Base != nil {
|
||||||
|
err = f.Base.Truncate(s)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if f.Base != nil {
|
||||||
|
return f.Base.Truncate(s)
|
||||||
|
}
|
||||||
|
return BADFD
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *UnionFile) WriteString(s string) (n int, err error) {
|
||||||
|
if f.Layer != nil {
|
||||||
|
n, err = f.Layer.WriteString(s)
|
||||||
|
if err == nil && f.Base != nil {
|
||||||
|
_, err = f.Base.WriteString(s)
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
if f.Base != nil {
|
||||||
|
return f.Base.WriteString(s)
|
||||||
|
}
|
||||||
|
return 0, BADFD
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyFile(base Fs, layer Fs, name string, bfh File) error {
|
||||||
|
// First make sure the directory exists
|
||||||
|
exists, err := Exists(layer, filepath.Dir(name))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME?
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the file on the overlay
|
||||||
|
lfh, err := layer.Create(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n, err := io.Copy(lfh, bfh)
|
||||||
|
if err != nil {
|
||||||
|
// If anything fails, clean up the file
|
||||||
|
layer.Remove(name)
|
||||||
|
lfh.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bfi, err := bfh.Stat()
|
||||||
|
if err != nil || bfi.Size() != n {
|
||||||
|
layer.Remove(name)
|
||||||
|
lfh.Close()
|
||||||
|
return syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
err = lfh.Close()
|
||||||
|
if err != nil {
|
||||||
|
layer.Remove(name)
|
||||||
|
lfh.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime())
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyToLayer(base Fs, layer Fs, name string) error {
|
||||||
|
bfh, err := base.Open(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer bfh.Close()
|
||||||
|
|
||||||
|
return copyFile(base, layer, name, bfh)
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyFileToLayer(base Fs, layer Fs, name string, flag int, perm os.FileMode) error {
|
||||||
|
bfh, err := base.OpenFile(name, flag, perm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer bfh.Close()
|
||||||
|
|
||||||
|
return copyFile(base, layer, name, bfh)
|
||||||
|
}
|
330
vendor/github.com/spf13/afero/util.go
generated
vendored
Normal file
330
vendor/github.com/spf13/afero/util.go
generated
vendored
Normal file
@ -0,0 +1,330 @@
|
|||||||
|
// Copyright ©2015 Steve Francia <spf@spf13.com>
|
||||||
|
// Portions Copyright ©2015 The Hugo Authors
|
||||||
|
// Portions Copyright 2016-present Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package afero
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
"golang.org/x/text/unicode/norm"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Filepath separator defined by os.Separator.
|
||||||
|
const FilePathSeparator = string(filepath.Separator)
|
||||||
|
|
||||||
|
// Takes a reader and a path and writes the content
|
||||||
|
func (a Afero) WriteReader(path string, r io.Reader) (err error) {
|
||||||
|
return WriteReader(a.Fs, path, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func WriteReader(fs Fs, path string, r io.Reader) (err error) {
|
||||||
|
dir, _ := filepath.Split(path)
|
||||||
|
ospath := filepath.FromSlash(dir)
|
||||||
|
|
||||||
|
if ospath != "" {
|
||||||
|
err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
|
||||||
|
if err != nil {
|
||||||
|
if err != os.ErrExist {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := fs.Create(path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(file, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Same as WriteReader but checks to see if file/directory already exists.
|
||||||
|
func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) {
|
||||||
|
return SafeWriteReader(a.Fs, path, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) {
|
||||||
|
dir, _ := filepath.Split(path)
|
||||||
|
ospath := filepath.FromSlash(dir)
|
||||||
|
|
||||||
|
if ospath != "" {
|
||||||
|
err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
exists, err := Exists(fs, path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if exists {
|
||||||
|
return fmt.Errorf("%v already exists", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := fs.Create(path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(file, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Afero) GetTempDir(subPath string) string {
|
||||||
|
return GetTempDir(a.Fs, subPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTempDir returns the default temp directory with trailing slash
|
||||||
|
// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx
|
||||||
|
func GetTempDir(fs Fs, subPath string) string {
|
||||||
|
addSlash := func(p string) string {
|
||||||
|
if FilePathSeparator != p[len(p)-1:] {
|
||||||
|
p = p + FilePathSeparator
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
dir := addSlash(os.TempDir())
|
||||||
|
|
||||||
|
if subPath != "" {
|
||||||
|
// preserve windows backslash :-(
|
||||||
|
if FilePathSeparator == "\\" {
|
||||||
|
subPath = strings.Replace(subPath, "\\", "____", -1)
|
||||||
|
}
|
||||||
|
dir = dir + UnicodeSanitize((subPath))
|
||||||
|
if FilePathSeparator == "\\" {
|
||||||
|
dir = strings.Replace(dir, "____", "\\", -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exists, _ := Exists(fs, dir); exists {
|
||||||
|
return addSlash(dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := fs.MkdirAll(dir, 0777)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
dir = addSlash(dir)
|
||||||
|
}
|
||||||
|
return dir
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rewrite string to remove non-standard path characters
|
||||||
|
func UnicodeSanitize(s string) string {
|
||||||
|
source := []rune(s)
|
||||||
|
target := make([]rune, 0, len(source))
|
||||||
|
|
||||||
|
for _, r := range source {
|
||||||
|
if unicode.IsLetter(r) ||
|
||||||
|
unicode.IsDigit(r) ||
|
||||||
|
unicode.IsMark(r) ||
|
||||||
|
r == '.' ||
|
||||||
|
r == '/' ||
|
||||||
|
r == '\\' ||
|
||||||
|
r == '_' ||
|
||||||
|
r == '-' ||
|
||||||
|
r == '%' ||
|
||||||
|
r == ' ' ||
|
||||||
|
r == '#' {
|
||||||
|
target = append(target, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transform characters with accents into plain forms.
|
||||||
|
func NeuterAccents(s string) string {
|
||||||
|
t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
|
||||||
|
result, _, _ := transform.String(t, string(s))
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMn(r rune) bool {
|
||||||
|
return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) {
|
||||||
|
return FileContainsBytes(a.Fs, filename, subslice)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if a file contains a specified byte slice.
|
||||||
|
func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) {
|
||||||
|
f, err := fs.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
return readerContainsAny(f, subslice), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) {
|
||||||
|
return FileContainsAnyBytes(a.Fs, filename, subslices)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if a file contains any of the specified byte slices.
|
||||||
|
func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) {
|
||||||
|
f, err := fs.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
return readerContainsAny(f, subslices...), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readerContains reports whether any of the subslices is within r.
|
||||||
|
func readerContainsAny(r io.Reader, subslices ...[]byte) bool {
|
||||||
|
|
||||||
|
if r == nil || len(subslices) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
largestSlice := 0
|
||||||
|
|
||||||
|
for _, sl := range subslices {
|
||||||
|
if len(sl) > largestSlice {
|
||||||
|
largestSlice = len(sl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if largestSlice == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
bufflen := largestSlice * 4
|
||||||
|
halflen := bufflen / 2
|
||||||
|
buff := make([]byte, bufflen)
|
||||||
|
var err error
|
||||||
|
var n, i int
|
||||||
|
|
||||||
|
for {
|
||||||
|
i++
|
||||||
|
if i == 1 {
|
||||||
|
n, err = io.ReadAtLeast(r, buff[:halflen], halflen)
|
||||||
|
} else {
|
||||||
|
if i != 2 {
|
||||||
|
// shift left to catch overlapping matches
|
||||||
|
copy(buff[:], buff[halflen:])
|
||||||
|
}
|
||||||
|
n, err = io.ReadAtLeast(r, buff[halflen:], halflen)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n > 0 {
|
||||||
|
for _, sl := range subslices {
|
||||||
|
if bytes.Contains(buff, sl) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Afero) DirExists(path string) (bool, error) {
|
||||||
|
return DirExists(a.Fs, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirExists checks if a path exists and is a directory.
|
||||||
|
func DirExists(fs Fs, path string) (bool, error) {
|
||||||
|
fi, err := fs.Stat(path)
|
||||||
|
if err == nil && fi.IsDir() {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Afero) IsDir(path string) (bool, error) {
|
||||||
|
return IsDir(a.Fs, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDir checks if a given path is a directory.
|
||||||
|
func IsDir(fs Fs, path string) (bool, error) {
|
||||||
|
fi, err := fs.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return fi.IsDir(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Afero) IsEmpty(path string) (bool, error) {
|
||||||
|
return IsEmpty(a.Fs, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEmpty checks if a given file or directory is empty.
|
||||||
|
func IsEmpty(fs Fs, path string) (bool, error) {
|
||||||
|
if b, _ := Exists(fs, path); !b {
|
||||||
|
return false, fmt.Errorf("%q path does not exist", path)
|
||||||
|
}
|
||||||
|
fi, err := fs.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if fi.IsDir() {
|
||||||
|
f, err := fs.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
list, err := f.Readdir(-1)
|
||||||
|
return len(list) == 0, nil
|
||||||
|
}
|
||||||
|
return fi.Size() == 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Afero) Exists(path string) (bool, error) {
|
||||||
|
return Exists(a.Fs, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if a file or directory exists.
|
||||||
|
func Exists(fs Fs, path string) (bool, error) {
|
||||||
|
_, err := fs.Stat(path)
|
||||||
|
if err == nil {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string {
|
||||||
|
combinedPath := filepath.Join(basePathFs.path, relativePath)
|
||||||
|
if parent, ok := basePathFs.source.(*BasePathFs); ok {
|
||||||
|
return FullBaseFsPath(parent, combinedPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
return combinedPath
|
||||||
|
}
|
3
vendor/golang.org/x/text/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/text/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# This source code refers to The Go Authors for copyright purposes.
|
||||||
|
# The master list of authors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/text/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/text/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# This source code was written by the Go contributors.
|
||||||
|
# The master list of contributors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/text/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/text/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/text/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/text/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
Additional IP Rights Grant (Patents)
|
||||||
|
|
||||||
|
"This implementation" means the copyrightable works distributed by
|
||||||
|
Google as part of the Go project.
|
||||||
|
|
||||||
|
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||||
|
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||||
|
patent license to make, have made, use, offer to sell, sell, import,
|
||||||
|
transfer and otherwise run, modify and propagate the contents of this
|
||||||
|
implementation of Go, where such license applies only to those patent
|
||||||
|
claims, both currently owned or controlled by Google and acquired in
|
||||||
|
the future, licensable by Google that are necessarily infringed by this
|
||||||
|
implementation of Go. This grant does not include claims that would be
|
||||||
|
infringed only as a consequence of further modification of this
|
||||||
|
implementation. If you or your agent or exclusive licensee institute or
|
||||||
|
order or agree to the institution of patent litigation against any
|
||||||
|
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||||
|
that this implementation of Go or any code incorporated within this
|
||||||
|
implementation of Go constitutes direct or contributory patent
|
||||||
|
infringement, or inducement of patent infringement, then any patent
|
||||||
|
rights granted to you under this License for this implementation of Go
|
||||||
|
shall terminate as of the date such litigation is filed.
|
709
vendor/golang.org/x/text/transform/transform.go
generated
vendored
Normal file
709
vendor/golang.org/x/text/transform/transform.go
generated
vendored
Normal file
@ -0,0 +1,709 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package transform provides reader and writer wrappers that transform the
|
||||||
|
// bytes passing through as well as various transformations. Example
|
||||||
|
// transformations provided by other packages include normalization and
|
||||||
|
// conversion between character sets.
|
||||||
|
package transform // import "golang.org/x/text/transform"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrShortDst means that the destination buffer was too short to
|
||||||
|
// receive all of the transformed bytes.
|
||||||
|
ErrShortDst = errors.New("transform: short destination buffer")
|
||||||
|
|
||||||
|
// ErrShortSrc means that the source buffer has insufficient data to
|
||||||
|
// complete the transformation.
|
||||||
|
ErrShortSrc = errors.New("transform: short source buffer")
|
||||||
|
|
||||||
|
// ErrEndOfSpan means that the input and output (the transformed input)
|
||||||
|
// are not identical.
|
||||||
|
ErrEndOfSpan = errors.New("transform: input and output are not identical")
|
||||||
|
|
||||||
|
// errInconsistentByteCount means that Transform returned success (nil
|
||||||
|
// error) but also returned nSrc inconsistent with the src argument.
|
||||||
|
errInconsistentByteCount = errors.New("transform: inconsistent byte count returned")
|
||||||
|
|
||||||
|
// errShortInternal means that an internal buffer is not large enough
|
||||||
|
// to make progress and the Transform operation must be aborted.
|
||||||
|
errShortInternal = errors.New("transform: short internal buffer")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Transformer transforms bytes.
|
||||||
|
type Transformer interface {
|
||||||
|
// Transform writes to dst the transformed bytes read from src, and
|
||||||
|
// returns the number of dst bytes written and src bytes read. The
|
||||||
|
// atEOF argument tells whether src represents the last bytes of the
|
||||||
|
// input.
|
||||||
|
//
|
||||||
|
// Callers should always process the nDst bytes produced and account
|
||||||
|
// for the nSrc bytes consumed before considering the error err.
|
||||||
|
//
|
||||||
|
// A nil error means that all of the transformed bytes (whether freshly
|
||||||
|
// transformed from src or left over from previous Transform calls)
|
||||||
|
// were written to dst. A nil error can be returned regardless of
|
||||||
|
// whether atEOF is true. If err is nil then nSrc must equal len(src);
|
||||||
|
// the converse is not necessarily true.
|
||||||
|
//
|
||||||
|
// ErrShortDst means that dst was too short to receive all of the
|
||||||
|
// transformed bytes. ErrShortSrc means that src had insufficient data
|
||||||
|
// to complete the transformation. If both conditions apply, then
|
||||||
|
// either error may be returned. Other than the error conditions listed
|
||||||
|
// here, implementations are free to report other errors that arise.
|
||||||
|
Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error)
|
||||||
|
|
||||||
|
// Reset resets the state and allows a Transformer to be reused.
|
||||||
|
Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanningTransformer extends the Transformer interface with a Span method
|
||||||
|
// that determines how much of the input already conforms to the Transformer.
|
||||||
|
type SpanningTransformer interface {
|
||||||
|
Transformer
|
||||||
|
|
||||||
|
// Span returns a position in src such that transforming src[:n] results in
|
||||||
|
// identical output src[:n] for these bytes. It does not necessarily return
|
||||||
|
// the largest such n. The atEOF argument tells whether src represents the
|
||||||
|
// last bytes of the input.
|
||||||
|
//
|
||||||
|
// Callers should always account for the n bytes consumed before
|
||||||
|
// considering the error err.
|
||||||
|
//
|
||||||
|
// A nil error means that all input bytes are known to be identical to the
|
||||||
|
// output produced by the Transformer. A nil error can be returned
|
||||||
|
// regardless of whether atEOF is true. If err is nil, then n must
|
||||||
|
// equal len(src); the converse is not necessarily true.
|
||||||
|
//
|
||||||
|
// ErrEndOfSpan means that the Transformer output may differ from the
|
||||||
|
// input after n bytes. Note that n may be len(src), meaning that the output
|
||||||
|
// would contain additional bytes after otherwise identical output.
|
||||||
|
// ErrShortSrc means that src had insufficient data to determine whether the
|
||||||
|
// remaining bytes would change. Other than the error conditions listed
|
||||||
|
// here, implementations are free to report other errors that arise.
|
||||||
|
//
|
||||||
|
// Calling Span can modify the Transformer state as a side effect. In
|
||||||
|
// effect, it does the transformation just as calling Transform would, only
|
||||||
|
// without copying to a destination buffer and only up to a point it can
|
||||||
|
// determine the input and output bytes are the same. This is obviously more
|
||||||
|
// limited than calling Transform, but can be more efficient in terms of
|
||||||
|
// copying and allocating buffers. Calls to Span and Transform may be
|
||||||
|
// interleaved.
|
||||||
|
Span(src []byte, atEOF bool) (n int, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NopResetter can be embedded by implementations of Transformer to add a nop
|
||||||
|
// Reset method.
|
||||||
|
type NopResetter struct{}
|
||||||
|
|
||||||
|
// Reset implements the Reset method of the Transformer interface.
|
||||||
|
func (NopResetter) Reset() {}
|
||||||
|
|
||||||
|
// Reader wraps another io.Reader by transforming the bytes read.
|
||||||
|
type Reader struct {
|
||||||
|
r io.Reader
|
||||||
|
t Transformer
|
||||||
|
err error
|
||||||
|
|
||||||
|
// dst[dst0:dst1] contains bytes that have been transformed by t but
|
||||||
|
// not yet copied out via Read.
|
||||||
|
dst []byte
|
||||||
|
dst0, dst1 int
|
||||||
|
|
||||||
|
// src[src0:src1] contains bytes that have been read from r but not
|
||||||
|
// yet transformed through t.
|
||||||
|
src []byte
|
||||||
|
src0, src1 int
|
||||||
|
|
||||||
|
// transformComplete is whether the transformation is complete,
|
||||||
|
// regardless of whether or not it was successful.
|
||||||
|
transformComplete bool
|
||||||
|
}
|
||||||
|
|
||||||
|
const defaultBufSize = 4096
|
||||||
|
|
||||||
|
// NewReader returns a new Reader that wraps r by transforming the bytes read
|
||||||
|
// via t. It calls Reset on t.
|
||||||
|
func NewReader(r io.Reader, t Transformer) *Reader {
|
||||||
|
t.Reset()
|
||||||
|
return &Reader{
|
||||||
|
r: r,
|
||||||
|
t: t,
|
||||||
|
dst: make([]byte, defaultBufSize),
|
||||||
|
src: make([]byte, defaultBufSize),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read implements the io.Reader interface.
|
||||||
|
func (r *Reader) Read(p []byte) (int, error) {
|
||||||
|
n, err := 0, error(nil)
|
||||||
|
for {
|
||||||
|
// Copy out any transformed bytes and return the final error if we are done.
|
||||||
|
if r.dst0 != r.dst1 {
|
||||||
|
n = copy(p, r.dst[r.dst0:r.dst1])
|
||||||
|
r.dst0 += n
|
||||||
|
if r.dst0 == r.dst1 && r.transformComplete {
|
||||||
|
return n, r.err
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
} else if r.transformComplete {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to transform some source bytes, or to flush the transformer if we
|
||||||
|
// are out of source bytes. We do this even if r.r.Read returned an error.
|
||||||
|
// As the io.Reader documentation says, "process the n > 0 bytes returned
|
||||||
|
// before considering the error".
|
||||||
|
if r.src0 != r.src1 || r.err != nil {
|
||||||
|
r.dst0 = 0
|
||||||
|
r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF)
|
||||||
|
r.src0 += n
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
if r.src0 != r.src1 {
|
||||||
|
r.err = errInconsistentByteCount
|
||||||
|
}
|
||||||
|
// The Transform call was successful; we are complete if we
|
||||||
|
// cannot read more bytes into src.
|
||||||
|
r.transformComplete = r.err != nil
|
||||||
|
continue
|
||||||
|
case err == ErrShortDst && (r.dst1 != 0 || n != 0):
|
||||||
|
// Make room in dst by copying out, and try again.
|
||||||
|
continue
|
||||||
|
case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil:
|
||||||
|
// Read more bytes into src via the code below, and try again.
|
||||||
|
default:
|
||||||
|
r.transformComplete = true
|
||||||
|
// The reader error (r.err) takes precedence over the
|
||||||
|
// transformer error (err) unless r.err is nil or io.EOF.
|
||||||
|
if r.err == nil || r.err == io.EOF {
|
||||||
|
r.err = err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move any untransformed source bytes to the start of the buffer
|
||||||
|
// and read more bytes.
|
||||||
|
if r.src0 != 0 {
|
||||||
|
r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1])
|
||||||
|
}
|
||||||
|
n, r.err = r.r.Read(r.src[r.src1:])
|
||||||
|
r.src1 += n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: implement ReadByte (and ReadRune??).
|
||||||
|
|
||||||
|
// Writer wraps another io.Writer by transforming the bytes read.
|
||||||
|
// The user needs to call Close to flush unwritten bytes that may
|
||||||
|
// be buffered.
|
||||||
|
type Writer struct {
|
||||||
|
w io.Writer
|
||||||
|
t Transformer
|
||||||
|
dst []byte
|
||||||
|
|
||||||
|
// src[:n] contains bytes that have not yet passed through t.
|
||||||
|
src []byte
|
||||||
|
n int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriter returns a new Writer that wraps w by transforming the bytes written
|
||||||
|
// via t. It calls Reset on t.
|
||||||
|
func NewWriter(w io.Writer, t Transformer) *Writer {
|
||||||
|
t.Reset()
|
||||||
|
return &Writer{
|
||||||
|
w: w,
|
||||||
|
t: t,
|
||||||
|
dst: make([]byte, defaultBufSize),
|
||||||
|
src: make([]byte, defaultBufSize),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write implements the io.Writer interface. If there are not enough
|
||||||
|
// bytes available to complete a Transform, the bytes will be buffered
|
||||||
|
// for the next write. Call Close to convert the remaining bytes.
|
||||||
|
func (w *Writer) Write(data []byte) (n int, err error) {
|
||||||
|
src := data
|
||||||
|
if w.n > 0 {
|
||||||
|
// Append bytes from data to the last remainder.
|
||||||
|
// TODO: limit the amount copied on first try.
|
||||||
|
n = copy(w.src[w.n:], data)
|
||||||
|
w.n += n
|
||||||
|
src = w.src[:w.n]
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
nDst, nSrc, err := w.t.Transform(w.dst, src, false)
|
||||||
|
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
|
||||||
|
return n, werr
|
||||||
|
}
|
||||||
|
src = src[nSrc:]
|
||||||
|
if w.n == 0 {
|
||||||
|
n += nSrc
|
||||||
|
} else if len(src) <= n {
|
||||||
|
// Enough bytes from w.src have been consumed. We make src point
|
||||||
|
// to data instead to reduce the copying.
|
||||||
|
w.n = 0
|
||||||
|
n -= len(src)
|
||||||
|
src = data[n:]
|
||||||
|
if n < len(data) && (err == nil || err == ErrShortSrc) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch err {
|
||||||
|
case ErrShortDst:
|
||||||
|
// This error is okay as long as we are making progress.
|
||||||
|
if nDst > 0 || nSrc > 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case ErrShortSrc:
|
||||||
|
if len(src) < len(w.src) {
|
||||||
|
m := copy(w.src, src)
|
||||||
|
// If w.n > 0, bytes from data were already copied to w.src and n
|
||||||
|
// was already set to the number of bytes consumed.
|
||||||
|
if w.n == 0 {
|
||||||
|
n += m
|
||||||
|
}
|
||||||
|
w.n = m
|
||||||
|
err = nil
|
||||||
|
} else if nDst > 0 || nSrc > 0 {
|
||||||
|
// Not enough buffer to store the remainder. Keep processing as
|
||||||
|
// long as there is progress. Without this case, transforms that
|
||||||
|
// require a lookahead larger than the buffer may result in an
|
||||||
|
// error. This is not something one may expect to be common in
|
||||||
|
// practice, but it may occur when buffers are set to small
|
||||||
|
// sizes during testing.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case nil:
|
||||||
|
if w.n > 0 {
|
||||||
|
err = errInconsistentByteCount
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close implements the io.Closer interface.
|
||||||
|
func (w *Writer) Close() error {
|
||||||
|
src := w.src[:w.n]
|
||||||
|
for {
|
||||||
|
nDst, nSrc, err := w.t.Transform(w.dst, src, true)
|
||||||
|
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
|
||||||
|
return werr
|
||||||
|
}
|
||||||
|
if err != ErrShortDst {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
src = src[nSrc:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type nop struct{ NopResetter }
|
||||||
|
|
||||||
|
func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
n := copy(dst, src)
|
||||||
|
if n < len(src) {
|
||||||
|
err = ErrShortDst
|
||||||
|
}
|
||||||
|
return n, n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nop) Span(src []byte, atEOF bool) (n int, err error) {
|
||||||
|
return len(src), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type discard struct{ NopResetter }
|
||||||
|
|
||||||
|
func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
return 0, len(src), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Discard is a Transformer for which all Transform calls succeed
|
||||||
|
// by consuming all bytes and writing nothing.
|
||||||
|
Discard Transformer = discard{}
|
||||||
|
|
||||||
|
// Nop is a SpanningTransformer that copies src to dst.
|
||||||
|
Nop SpanningTransformer = nop{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// chain is a sequence of links. A chain with N Transformers has N+1 links and
|
||||||
|
// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst
|
||||||
|
// buffers given to chain.Transform and the middle N-1 buffers are intermediate
|
||||||
|
// buffers owned by the chain. The i'th link transforms bytes from the i'th
|
||||||
|
// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer
|
||||||
|
// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N).
|
||||||
|
type chain struct {
|
||||||
|
link []link
|
||||||
|
err error
|
||||||
|
// errStart is the index at which the error occurred plus 1. Processing
|
||||||
|
// errStart at this level at the next call to Transform. As long as
|
||||||
|
// errStart > 0, chain will not consume any more source bytes.
|
||||||
|
errStart int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *chain) fatalError(errIndex int, err error) {
|
||||||
|
if i := errIndex + 1; i > c.errStart {
|
||||||
|
c.errStart = i
|
||||||
|
c.err = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type link struct {
|
||||||
|
t Transformer
|
||||||
|
// b[p:n] holds the bytes to be transformed by t.
|
||||||
|
b []byte
|
||||||
|
p int
|
||||||
|
n int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *link) src() []byte {
|
||||||
|
return l.b[l.p:l.n]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *link) dst() []byte {
|
||||||
|
return l.b[l.n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chain returns a Transformer that applies t in sequence.
|
||||||
|
func Chain(t ...Transformer) Transformer {
|
||||||
|
if len(t) == 0 {
|
||||||
|
return nop{}
|
||||||
|
}
|
||||||
|
c := &chain{link: make([]link, len(t)+1)}
|
||||||
|
for i, tt := range t {
|
||||||
|
c.link[i].t = tt
|
||||||
|
}
|
||||||
|
// Allocate intermediate buffers.
|
||||||
|
b := make([][defaultBufSize]byte, len(t)-1)
|
||||||
|
for i := range b {
|
||||||
|
c.link[i+1].b = b[i][:]
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset resets the state of Chain. It calls Reset on all the Transformers.
|
||||||
|
func (c *chain) Reset() {
|
||||||
|
for i, l := range c.link {
|
||||||
|
if l.t != nil {
|
||||||
|
l.t.Reset()
|
||||||
|
}
|
||||||
|
c.link[i].p, c.link[i].n = 0, 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: make chain use Span (is going to be fun to implement!)
|
||||||
|
|
||||||
|
// Transform applies the transformers of c in sequence.
|
||||||
|
func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
// Set up src and dst in the chain.
|
||||||
|
srcL := &c.link[0]
|
||||||
|
dstL := &c.link[len(c.link)-1]
|
||||||
|
srcL.b, srcL.p, srcL.n = src, 0, len(src)
|
||||||
|
dstL.b, dstL.n = dst, 0
|
||||||
|
var lastFull, needProgress bool // for detecting progress
|
||||||
|
|
||||||
|
// i is the index of the next Transformer to apply, for i in [low, high].
|
||||||
|
// low is the lowest index for which c.link[low] may still produce bytes.
|
||||||
|
// high is the highest index for which c.link[high] has a Transformer.
|
||||||
|
// The error returned by Transform determines whether to increase or
|
||||||
|
// decrease i. We try to completely fill a buffer before converting it.
|
||||||
|
for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; {
|
||||||
|
in, out := &c.link[i], &c.link[i+1]
|
||||||
|
nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i)
|
||||||
|
out.n += nDst
|
||||||
|
in.p += nSrc
|
||||||
|
if i > 0 && in.p == in.n {
|
||||||
|
in.p, in.n = 0, 0
|
||||||
|
}
|
||||||
|
needProgress, lastFull = lastFull, false
|
||||||
|
switch err0 {
|
||||||
|
case ErrShortDst:
|
||||||
|
// Process the destination buffer next. Return if we are already
|
||||||
|
// at the high index.
|
||||||
|
if i == high {
|
||||||
|
return dstL.n, srcL.p, ErrShortDst
|
||||||
|
}
|
||||||
|
if out.n != 0 {
|
||||||
|
i++
|
||||||
|
// If the Transformer at the next index is not able to process any
|
||||||
|
// source bytes there is nothing that can be done to make progress
|
||||||
|
// and the bytes will remain unprocessed. lastFull is used to
|
||||||
|
// detect this and break out of the loop with a fatal error.
|
||||||
|
lastFull = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// The destination buffer was too small, but is completely empty.
|
||||||
|
// Return a fatal error as this transformation can never complete.
|
||||||
|
c.fatalError(i, errShortInternal)
|
||||||
|
case ErrShortSrc:
|
||||||
|
if i == 0 {
|
||||||
|
// Save ErrShortSrc in err. All other errors take precedence.
|
||||||
|
err = ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Source bytes were depleted before filling up the destination buffer.
|
||||||
|
// Verify we made some progress, move the remaining bytes to the errStart
|
||||||
|
// and try to get more source bytes.
|
||||||
|
if needProgress && nSrc == 0 || in.n-in.p == len(in.b) {
|
||||||
|
// There were not enough source bytes to proceed while the source
|
||||||
|
// buffer cannot hold any more bytes. Return a fatal error as this
|
||||||
|
// transformation can never complete.
|
||||||
|
c.fatalError(i, errShortInternal)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// in.b is an internal buffer and we can make progress.
|
||||||
|
in.p, in.n = 0, copy(in.b, in.src())
|
||||||
|
fallthrough
|
||||||
|
case nil:
|
||||||
|
// if i == low, we have depleted the bytes at index i or any lower levels.
|
||||||
|
// In that case we increase low and i. In all other cases we decrease i to
|
||||||
|
// fetch more bytes before proceeding to the next index.
|
||||||
|
if i > low {
|
||||||
|
i--
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
c.fatalError(i, err0)
|
||||||
|
}
|
||||||
|
// Exhausted level low or fatal error: increase low and continue
|
||||||
|
// to process the bytes accepted so far.
|
||||||
|
i++
|
||||||
|
low = i
|
||||||
|
}
|
||||||
|
|
||||||
|
// If c.errStart > 0, this means we found a fatal error. We will clear
|
||||||
|
// all upstream buffers. At this point, no more progress can be made
|
||||||
|
// downstream, as Transform would have bailed while handling ErrShortDst.
|
||||||
|
if c.errStart > 0 {
|
||||||
|
for i := 1; i < c.errStart; i++ {
|
||||||
|
c.link[i].p, c.link[i].n = 0, 0
|
||||||
|
}
|
||||||
|
err, c.errStart, c.err = c.err, 0, nil
|
||||||
|
}
|
||||||
|
return dstL.n, srcL.p, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use runes.Remove instead.
|
||||||
|
func RemoveFunc(f func(r rune) bool) Transformer {
|
||||||
|
return removeF(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
type removeF func(r rune) bool
|
||||||
|
|
||||||
|
func (removeF) Reset() {}
|
||||||
|
|
||||||
|
// Transform implements the Transformer interface.
|
||||||
|
func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] {
|
||||||
|
|
||||||
|
if r = rune(src[0]); r < utf8.RuneSelf {
|
||||||
|
sz = 1
|
||||||
|
} else {
|
||||||
|
r, sz = utf8.DecodeRune(src)
|
||||||
|
|
||||||
|
if sz == 1 {
|
||||||
|
// Invalid rune.
|
||||||
|
if !atEOF && !utf8.FullRune(src) {
|
||||||
|
err = ErrShortSrc
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// We replace illegal bytes with RuneError. Not doing so might
|
||||||
|
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
|
||||||
|
// The resulting byte sequence may subsequently contain runes
|
||||||
|
// for which t(r) is true that were passed unnoticed.
|
||||||
|
if !t(r) {
|
||||||
|
if nDst+3 > len(dst) {
|
||||||
|
err = ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
nDst += copy(dst[nDst:], "\uFFFD")
|
||||||
|
}
|
||||||
|
nSrc++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !t(r) {
|
||||||
|
if nDst+sz > len(dst) {
|
||||||
|
err = ErrShortDst
|
||||||
|
break
|
||||||
|
}
|
||||||
|
nDst += copy(dst[nDst:], src[:sz])
|
||||||
|
}
|
||||||
|
nSrc += sz
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// grow returns a new []byte that is longer than b, and copies the first n bytes
|
||||||
|
// of b to the start of the new slice.
|
||||||
|
func grow(b []byte, n int) []byte {
|
||||||
|
m := len(b)
|
||||||
|
if m <= 32 {
|
||||||
|
m = 64
|
||||||
|
} else if m <= 256 {
|
||||||
|
m *= 2
|
||||||
|
} else {
|
||||||
|
m += m >> 1
|
||||||
|
}
|
||||||
|
buf := make([]byte, m)
|
||||||
|
copy(buf, b[:n])
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
const initialBufSize = 128
|
||||||
|
|
||||||
|
// String returns a string with the result of converting s[:n] using t, where
|
||||||
|
// n <= len(s). If err == nil, n will be len(s). It calls Reset on t.
|
||||||
|
func String(t Transformer, s string) (result string, n int, err error) {
|
||||||
|
t.Reset()
|
||||||
|
if s == "" {
|
||||||
|
// Fast path for the common case for empty input. Results in about a
|
||||||
|
// 86% reduction of running time for BenchmarkStringLowerEmpty.
|
||||||
|
if _, _, err := t.Transform(nil, nil, true); err == nil {
|
||||||
|
return "", 0, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate only once. Note that both dst and src escape when passed to
|
||||||
|
// Transform.
|
||||||
|
buf := [2 * initialBufSize]byte{}
|
||||||
|
dst := buf[:initialBufSize:initialBufSize]
|
||||||
|
src := buf[initialBufSize : 2*initialBufSize]
|
||||||
|
|
||||||
|
// The input string s is transformed in multiple chunks (starting with a
|
||||||
|
// chunk size of initialBufSize). nDst and nSrc are per-chunk (or
|
||||||
|
// per-Transform-call) indexes, pDst and pSrc are overall indexes.
|
||||||
|
nDst, nSrc := 0, 0
|
||||||
|
pDst, pSrc := 0, 0
|
||||||
|
|
||||||
|
// pPrefix is the length of a common prefix: the first pPrefix bytes of the
|
||||||
|
// result will equal the first pPrefix bytes of s. It is not guaranteed to
|
||||||
|
// be the largest such value, but if pPrefix, len(result) and len(s) are
|
||||||
|
// all equal after the final transform (i.e. calling Transform with atEOF
|
||||||
|
// being true returned nil error) then we don't need to allocate a new
|
||||||
|
// result string.
|
||||||
|
pPrefix := 0
|
||||||
|
for {
|
||||||
|
// Invariant: pDst == pPrefix && pSrc == pPrefix.
|
||||||
|
|
||||||
|
n := copy(src, s[pSrc:])
|
||||||
|
nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s))
|
||||||
|
pDst += nDst
|
||||||
|
pSrc += nSrc
|
||||||
|
|
||||||
|
// TODO: let transformers implement an optional Spanner interface, akin
|
||||||
|
// to norm's QuickSpan. This would even allow us to avoid any allocation.
|
||||||
|
if !bytes.Equal(dst[:nDst], src[:nSrc]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
pPrefix = pSrc
|
||||||
|
if err == ErrShortDst {
|
||||||
|
// A buffer can only be short if a transformer modifies its input.
|
||||||
|
break
|
||||||
|
} else if err == ErrShortSrc {
|
||||||
|
if nSrc == 0 {
|
||||||
|
// No progress was made.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Equal so far and !atEOF, so continue checking.
|
||||||
|
} else if err != nil || pPrefix == len(s) {
|
||||||
|
return string(s[:pPrefix]), pPrefix, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc.
|
||||||
|
|
||||||
|
// We have transformed the first pSrc bytes of the input s to become pDst
|
||||||
|
// transformed bytes. Those transformed bytes are discontiguous: the first
|
||||||
|
// pPrefix of them equal s[:pPrefix] and the last nDst of them equal
|
||||||
|
// dst[:nDst]. We copy them around, into a new dst buffer if necessary, so
|
||||||
|
// that they become one contiguous slice: dst[:pDst].
|
||||||
|
if pPrefix != 0 {
|
||||||
|
newDst := dst
|
||||||
|
if pDst > len(newDst) {
|
||||||
|
newDst = make([]byte, len(s)+nDst-nSrc)
|
||||||
|
}
|
||||||
|
copy(newDst[pPrefix:pDst], dst[:nDst])
|
||||||
|
copy(newDst[:pPrefix], s[:pPrefix])
|
||||||
|
dst = newDst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent duplicate Transform calls with atEOF being true at the end of
|
||||||
|
// the input. Also return if we have an unrecoverable error.
|
||||||
|
if (err == nil && pSrc == len(s)) ||
|
||||||
|
(err != nil && err != ErrShortDst && err != ErrShortSrc) {
|
||||||
|
return string(dst[:pDst]), pSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transform the remaining input, growing dst and src buffers as necessary.
|
||||||
|
for {
|
||||||
|
n := copy(src, s[pSrc:])
|
||||||
|
atEOF := pSrc+n == len(s)
|
||||||
|
nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], atEOF)
|
||||||
|
pDst += nDst
|
||||||
|
pSrc += nSrc
|
||||||
|
|
||||||
|
// If we got ErrShortDst or ErrShortSrc, do not grow as long as we can
|
||||||
|
// make progress. This may avoid excessive allocations.
|
||||||
|
if err == ErrShortDst {
|
||||||
|
if nDst == 0 {
|
||||||
|
dst = grow(dst, pDst)
|
||||||
|
}
|
||||||
|
} else if err == ErrShortSrc {
|
||||||
|
if atEOF {
|
||||||
|
return string(dst[:pDst]), pSrc, err
|
||||||
|
}
|
||||||
|
if nSrc == 0 {
|
||||||
|
src = grow(src, 0)
|
||||||
|
}
|
||||||
|
} else if err != nil || pSrc == len(s) {
|
||||||
|
return string(dst[:pDst]), pSrc, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns a new byte slice with the result of converting b[:n] using t,
|
||||||
|
// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t.
|
||||||
|
func Bytes(t Transformer, b []byte) (result []byte, n int, err error) {
|
||||||
|
return doAppend(t, 0, make([]byte, len(b)), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append appends the result of converting src[:n] using t to dst, where
|
||||||
|
// n <= len(src), If err == nil, n will be len(src). It calls Reset on t.
|
||||||
|
func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) {
|
||||||
|
if len(dst) == cap(dst) {
|
||||||
|
n := len(src) + len(dst) // It is okay for this to be 0.
|
||||||
|
b := make([]byte, n)
|
||||||
|
dst = b[:copy(b, dst)]
|
||||||
|
}
|
||||||
|
return doAppend(t, len(dst), dst[:cap(dst)], src)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) {
|
||||||
|
t.Reset()
|
||||||
|
pSrc := 0
|
||||||
|
for {
|
||||||
|
nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true)
|
||||||
|
pDst += nDst
|
||||||
|
pSrc += nSrc
|
||||||
|
if err != ErrShortDst {
|
||||||
|
return dst[:pDst], pSrc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grow the destination buffer, but do not grow as long as we can make
|
||||||
|
// progress. This may avoid excessive allocations.
|
||||||
|
if nDst == 0 {
|
||||||
|
dst = grow(dst, pDst)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
512
vendor/golang.org/x/text/unicode/norm/composition.go
generated
vendored
Normal file
512
vendor/golang.org/x/text/unicode/norm/composition.go
generated
vendored
Normal file
@ -0,0 +1,512 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package norm
|
||||||
|
|
||||||
|
import "unicode/utf8"
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxNonStarters = 30
|
||||||
|
// The maximum number of characters needed for a buffer is
|
||||||
|
// maxNonStarters + 1 for the starter + 1 for the GCJ
|
||||||
|
maxBufferSize = maxNonStarters + 2
|
||||||
|
maxNFCExpansion = 3 // NFC(0x1D160)
|
||||||
|
maxNFKCExpansion = 18 // NFKC(0xFDFA)
|
||||||
|
|
||||||
|
maxByteBufferSize = utf8.UTFMax * maxBufferSize // 128
|
||||||
|
)
|
||||||
|
|
||||||
|
// ssState is used for reporting the segment state after inserting a rune.
|
||||||
|
// It is returned by streamSafe.next.
|
||||||
|
type ssState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Indicates a rune was successfully added to the segment.
|
||||||
|
ssSuccess ssState = iota
|
||||||
|
// Indicates a rune starts a new segment and should not be added.
|
||||||
|
ssStarter
|
||||||
|
// Indicates a rune caused a segment overflow and a CGJ should be inserted.
|
||||||
|
ssOverflow
|
||||||
|
)
|
||||||
|
|
||||||
|
// streamSafe implements the policy of when a CGJ should be inserted.
|
||||||
|
type streamSafe uint8
|
||||||
|
|
||||||
|
// first inserts the first rune of a segment. It is a faster version of next if
|
||||||
|
// it is known p represents the first rune in a segment.
|
||||||
|
func (ss *streamSafe) first(p Properties) {
|
||||||
|
*ss = streamSafe(p.nTrailingNonStarters())
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert returns a ssState value to indicate whether a rune represented by p
|
||||||
|
// can be inserted.
|
||||||
|
func (ss *streamSafe) next(p Properties) ssState {
|
||||||
|
if *ss > maxNonStarters {
|
||||||
|
panic("streamSafe was not reset")
|
||||||
|
}
|
||||||
|
n := p.nLeadingNonStarters()
|
||||||
|
if *ss += streamSafe(n); *ss > maxNonStarters {
|
||||||
|
*ss = 0
|
||||||
|
return ssOverflow
|
||||||
|
}
|
||||||
|
// The Stream-Safe Text Processing prescribes that the counting can stop
|
||||||
|
// as soon as a starter is encountered. However, there are some starters,
|
||||||
|
// like Jamo V and T, that can combine with other runes, leaving their
|
||||||
|
// successive non-starters appended to the previous, possibly causing an
|
||||||
|
// overflow. We will therefore consider any rune with a non-zero nLead to
|
||||||
|
// be a non-starter. Note that it always hold that if nLead > 0 then
|
||||||
|
// nLead == nTrail.
|
||||||
|
if n == 0 {
|
||||||
|
*ss = streamSafe(p.nTrailingNonStarters())
|
||||||
|
return ssStarter
|
||||||
|
}
|
||||||
|
return ssSuccess
|
||||||
|
}
|
||||||
|
|
||||||
|
// backwards is used for checking for overflow and segment starts
|
||||||
|
// when traversing a string backwards. Users do not need to call first
|
||||||
|
// for the first rune. The state of the streamSafe retains the count of
|
||||||
|
// the non-starters loaded.
|
||||||
|
func (ss *streamSafe) backwards(p Properties) ssState {
|
||||||
|
if *ss > maxNonStarters {
|
||||||
|
panic("streamSafe was not reset")
|
||||||
|
}
|
||||||
|
c := *ss + streamSafe(p.nTrailingNonStarters())
|
||||||
|
if c > maxNonStarters {
|
||||||
|
return ssOverflow
|
||||||
|
}
|
||||||
|
*ss = c
|
||||||
|
if p.nLeadingNonStarters() == 0 {
|
||||||
|
return ssStarter
|
||||||
|
}
|
||||||
|
return ssSuccess
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss streamSafe) isMax() bool {
|
||||||
|
return ss == maxNonStarters
|
||||||
|
}
|
||||||
|
|
||||||
|
// GraphemeJoiner is inserted after maxNonStarters non-starter runes.
|
||||||
|
const GraphemeJoiner = "\u034F"
|
||||||
|
|
||||||
|
// reorderBuffer is used to normalize a single segment. Characters inserted with
|
||||||
|
// insert are decomposed and reordered based on CCC. The compose method can
|
||||||
|
// be used to recombine characters. Note that the byte buffer does not hold
|
||||||
|
// the UTF-8 characters in order. Only the rune array is maintained in sorted
|
||||||
|
// order. flush writes the resulting segment to a byte array.
|
||||||
|
type reorderBuffer struct {
|
||||||
|
rune [maxBufferSize]Properties // Per character info.
|
||||||
|
byte [maxByteBufferSize]byte // UTF-8 buffer. Referenced by runeInfo.pos.
|
||||||
|
nbyte uint8 // Number or bytes.
|
||||||
|
ss streamSafe // For limiting length of non-starter sequence.
|
||||||
|
nrune int // Number of runeInfos.
|
||||||
|
f formInfo
|
||||||
|
|
||||||
|
src input
|
||||||
|
nsrc int
|
||||||
|
tmpBytes input
|
||||||
|
|
||||||
|
out []byte
|
||||||
|
flushF func(*reorderBuffer) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *reorderBuffer) init(f Form, src []byte) {
|
||||||
|
rb.f = *formTable[f]
|
||||||
|
rb.src.setBytes(src)
|
||||||
|
rb.nsrc = len(src)
|
||||||
|
rb.ss = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *reorderBuffer) initString(f Form, src string) {
|
||||||
|
rb.f = *formTable[f]
|
||||||
|
rb.src.setString(src)
|
||||||
|
rb.nsrc = len(src)
|
||||||
|
rb.ss = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *reorderBuffer) setFlusher(out []byte, f func(*reorderBuffer) bool) {
|
||||||
|
rb.out = out
|
||||||
|
rb.flushF = f
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset discards all characters from the buffer.
|
||||||
|
func (rb *reorderBuffer) reset() {
|
||||||
|
rb.nrune = 0
|
||||||
|
rb.nbyte = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rb *reorderBuffer) doFlush() bool {
|
||||||
|
if rb.f.composing {
|
||||||
|
rb.compose()
|
||||||
|
}
|
||||||
|
res := rb.flushF(rb)
|
||||||
|
rb.reset()
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendFlush appends the normalized segment to rb.out.
|
||||||
|
func appendFlush(rb *reorderBuffer) bool {
|
||||||
|
for i := 0; i < rb.nrune; i++ {
|
||||||
|
start := rb.rune[i].pos
|
||||||
|
end := start + rb.rune[i].size
|
||||||
|
rb.out = append(rb.out, rb.byte[start:end]...)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// flush appends the normalized segment to out and resets rb.
|
||||||
|
func (rb *reorderBuffer) flush(out []byte) []byte {
|
||||||
|
for i := 0; i < rb.nrune; i++ {
|
||||||
|
start := rb.rune[i].pos
|
||||||
|
end := start + rb.rune[i].size
|
||||||
|
out = append(out, rb.byte[start:end]...)
|
||||||
|
}
|
||||||
|
rb.reset()
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// flushCopy copies the normalized segment to buf and resets rb.
|
||||||
|
// It returns the number of bytes written to buf.
|
||||||
|
func (rb *reorderBuffer) flushCopy(buf []byte) int {
|
||||||
|
p := 0
|
||||||
|
for i := 0; i < rb.nrune; i++ {
|
||||||
|
runep := rb.rune[i]
|
||||||
|
p += copy(buf[p:], rb.byte[runep.pos:runep.pos+runep.size])
|
||||||
|
}
|
||||||
|
rb.reset()
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// insertOrdered inserts a rune in the buffer, ordered by Canonical Combining Class.
|
||||||
|
// It returns false if the buffer is not large enough to hold the rune.
|
||||||
|
// It is used internally by insert and insertString only.
|
||||||
|
func (rb *reorderBuffer) insertOrdered(info Properties) {
|
||||||
|
n := rb.nrune
|
||||||
|
b := rb.rune[:]
|
||||||
|
cc := info.ccc
|
||||||
|
if cc > 0 {
|
||||||
|
// Find insertion position + move elements to make room.
|
||||||
|
for ; n > 0; n-- {
|
||||||
|
if b[n-1].ccc <= cc {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
b[n] = b[n-1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rb.nrune += 1
|
||||||
|
pos := uint8(rb.nbyte)
|
||||||
|
rb.nbyte += utf8.UTFMax
|
||||||
|
info.pos = pos
|
||||||
|
b[n] = info
|
||||||
|
}
|
||||||
|
|
||||||
|
// insertErr is an error code returned by insert. Using this type instead
|
||||||
|
// of error improves performance up to 20% for many of the benchmarks.
|
||||||
|
type insertErr int
|
||||||
|
|
||||||
|
const (
|
||||||
|
iSuccess insertErr = -iota
|
||||||
|
iShortDst
|
||||||
|
iShortSrc
|
||||||
|
)
|
||||||
|
|
||||||
|
// insertFlush inserts the given rune in the buffer ordered by CCC.
|
||||||
|
// If a decomposition with multiple segments are encountered, they leading
|
||||||
|
// ones are flushed.
|
||||||
|
// It returns a non-zero error code if the rune was not inserted.
|
||||||
|
func (rb *reorderBuffer) insertFlush(src input, i int, info Properties) insertErr {
|
||||||
|
if rune := src.hangul(i); rune != 0 {
|
||||||
|
rb.decomposeHangul(rune)
|
||||||
|
return iSuccess
|
||||||
|
}
|
||||||
|
if info.hasDecomposition() {
|
||||||
|
return rb.insertDecomposed(info.Decomposition())
|
||||||
|
}
|
||||||
|
rb.insertSingle(src, i, info)
|
||||||
|
return iSuccess
|
||||||
|
}
|
||||||
|
|
||||||
|
// insertUnsafe inserts the given rune in the buffer ordered by CCC.
|
||||||
|
// It is assumed there is sufficient space to hold the runes. It is the
|
||||||
|
// responsibility of the caller to ensure this. This can be done by checking
|
||||||
|
// the state returned by the streamSafe type.
|
||||||
|
func (rb *reorderBuffer) insertUnsafe(src input, i int, info Properties) {
|
||||||
|
if rune := src.hangul(i); rune != 0 {
|
||||||
|
rb.decomposeHangul(rune)
|
||||||
|
}
|
||||||
|
if info.hasDecomposition() {
|
||||||
|
// TODO: inline.
|
||||||
|
rb.insertDecomposed(info.Decomposition())
|
||||||
|
} else {
|
||||||
|
rb.insertSingle(src, i, info)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// insertDecomposed inserts an entry in to the reorderBuffer for each rune
|
||||||
|
// in dcomp. dcomp must be a sequence of decomposed UTF-8-encoded runes.
|
||||||
|
// It flushes the buffer on each new segment start.
|
||||||
|
func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr {
|
||||||
|
rb.tmpBytes.setBytes(dcomp)
|
||||||
|
// As the streamSafe accounting already handles the counting for modifiers,
|
||||||
|
// we don't have to call next. However, we do need to keep the accounting
|
||||||
|
// intact when flushing the buffer.
|
||||||
|
for i := 0; i < len(dcomp); {
|
||||||
|
info := rb.f.info(rb.tmpBytes, i)
|
||||||
|
if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() {
|
||||||
|
return iShortDst
|
||||||
|
}
|
||||||
|
i += copy(rb.byte[rb.nbyte:], dcomp[i:i+int(info.size)])
|
||||||
|
rb.insertOrdered(info)
|
||||||
|
}
|
||||||
|
return iSuccess
|
||||||
|
}
|
||||||
|
|
||||||
|
// insertSingle inserts an entry in the reorderBuffer for the rune at
|
||||||
|
// position i. info is the runeInfo for the rune at position i.
|
||||||
|
func (rb *reorderBuffer) insertSingle(src input, i int, info Properties) {
|
||||||
|
src.copySlice(rb.byte[rb.nbyte:], i, i+int(info.size))
|
||||||
|
rb.insertOrdered(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// insertCGJ inserts a Combining Grapheme Joiner (0x034f) into rb.
|
||||||
|
func (rb *reorderBuffer) insertCGJ() {
|
||||||
|
rb.insertSingle(input{str: GraphemeJoiner}, 0, Properties{size: uint8(len(GraphemeJoiner))})
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendRune inserts a rune at the end of the buffer. It is used for Hangul.
|
||||||
|
func (rb *reorderBuffer) appendRune(r rune) {
|
||||||
|
bn := rb.nbyte
|
||||||
|
sz := utf8.EncodeRune(rb.byte[bn:], rune(r))
|
||||||
|
rb.nbyte += utf8.UTFMax
|
||||||
|
rb.rune[rb.nrune] = Properties{pos: bn, size: uint8(sz)}
|
||||||
|
rb.nrune++
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignRune sets a rune at position pos. It is used for Hangul and recomposition.
|
||||||
|
func (rb *reorderBuffer) assignRune(pos int, r rune) {
|
||||||
|
bn := rb.rune[pos].pos
|
||||||
|
sz := utf8.EncodeRune(rb.byte[bn:], rune(r))
|
||||||
|
rb.rune[pos] = Properties{pos: bn, size: uint8(sz)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// runeAt returns the rune at position n. It is used for Hangul and recomposition.
|
||||||
|
func (rb *reorderBuffer) runeAt(n int) rune {
|
||||||
|
inf := rb.rune[n]
|
||||||
|
r, _ := utf8.DecodeRune(rb.byte[inf.pos : inf.pos+inf.size])
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// bytesAt returns the UTF-8 encoding of the rune at position n.
|
||||||
|
// It is used for Hangul and recomposition.
|
||||||
|
func (rb *reorderBuffer) bytesAt(n int) []byte {
|
||||||
|
inf := rb.rune[n]
|
||||||
|
return rb.byte[inf.pos : int(inf.pos)+int(inf.size)]
|
||||||
|
}
|
||||||
|
|
||||||
|
// For Hangul we combine algorithmically, instead of using tables.
|
||||||
|
const (
|
||||||
|
hangulBase = 0xAC00 // UTF-8(hangulBase) -> EA B0 80
|
||||||
|
hangulBase0 = 0xEA
|
||||||
|
hangulBase1 = 0xB0
|
||||||
|
hangulBase2 = 0x80
|
||||||
|
|
||||||
|
hangulEnd = hangulBase + jamoLVTCount // UTF-8(0xD7A4) -> ED 9E A4
|
||||||
|
hangulEnd0 = 0xED
|
||||||
|
hangulEnd1 = 0x9E
|
||||||
|
hangulEnd2 = 0xA4
|
||||||
|
|
||||||
|
jamoLBase = 0x1100 // UTF-8(jamoLBase) -> E1 84 00
|
||||||
|
jamoLBase0 = 0xE1
|
||||||
|
jamoLBase1 = 0x84
|
||||||
|
jamoLEnd = 0x1113
|
||||||
|
jamoVBase = 0x1161
|
||||||
|
jamoVEnd = 0x1176
|
||||||
|
jamoTBase = 0x11A7
|
||||||
|
jamoTEnd = 0x11C3
|
||||||
|
|
||||||
|
jamoTCount = 28
|
||||||
|
jamoVCount = 21
|
||||||
|
jamoVTCount = 21 * 28
|
||||||
|
jamoLVTCount = 19 * 21 * 28
|
||||||
|
)
|
||||||
|
|
||||||
|
const hangulUTF8Size = 3
|
||||||
|
|
||||||
|
func isHangul(b []byte) bool {
|
||||||
|
if len(b) < hangulUTF8Size {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
b0 := b[0]
|
||||||
|
if b0 < hangulBase0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
b1 := b[1]
|
||||||
|
switch {
|
||||||
|
case b0 == hangulBase0:
|
||||||
|
return b1 >= hangulBase1
|
||||||
|
case b0 < hangulEnd0:
|
||||||
|
return true
|
||||||
|
case b0 > hangulEnd0:
|
||||||
|
return false
|
||||||
|
case b1 < hangulEnd1:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return b1 == hangulEnd1 && b[2] < hangulEnd2
|
||||||
|
}
|
||||||
|
|
||||||
|
func isHangulString(b string) bool {
|
||||||
|
if len(b) < hangulUTF8Size {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
b0 := b[0]
|
||||||
|
if b0 < hangulBase0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
b1 := b[1]
|
||||||
|
switch {
|
||||||
|
case b0 == hangulBase0:
|
||||||
|
return b1 >= hangulBase1
|
||||||
|
case b0 < hangulEnd0:
|
||||||
|
return true
|
||||||
|
case b0 > hangulEnd0:
|
||||||
|
return false
|
||||||
|
case b1 < hangulEnd1:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return b1 == hangulEnd1 && b[2] < hangulEnd2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Caller must ensure len(b) >= 2.
|
||||||
|
func isJamoVT(b []byte) bool {
|
||||||
|
// True if (rune & 0xff00) == jamoLBase
|
||||||
|
return b[0] == jamoLBase0 && (b[1]&0xFC) == jamoLBase1
|
||||||
|
}
|
||||||
|
|
||||||
|
func isHangulWithoutJamoT(b []byte) bool {
|
||||||
|
c, _ := utf8.DecodeRune(b)
|
||||||
|
c -= hangulBase
|
||||||
|
return c < jamoLVTCount && c%jamoTCount == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// decomposeHangul writes the decomposed Hangul to buf and returns the number
|
||||||
|
// of bytes written. len(buf) should be at least 9.
|
||||||
|
func decomposeHangul(buf []byte, r rune) int {
|
||||||
|
const JamoUTF8Len = 3
|
||||||
|
r -= hangulBase
|
||||||
|
x := r % jamoTCount
|
||||||
|
r /= jamoTCount
|
||||||
|
utf8.EncodeRune(buf, jamoLBase+r/jamoVCount)
|
||||||
|
utf8.EncodeRune(buf[JamoUTF8Len:], jamoVBase+r%jamoVCount)
|
||||||
|
if x != 0 {
|
||||||
|
utf8.EncodeRune(buf[2*JamoUTF8Len:], jamoTBase+x)
|
||||||
|
return 3 * JamoUTF8Len
|
||||||
|
}
|
||||||
|
return 2 * JamoUTF8Len
|
||||||
|
}
|
||||||
|
|
||||||
|
// decomposeHangul algorithmically decomposes a Hangul rune into
|
||||||
|
// its Jamo components.
|
||||||
|
// See https://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul.
|
||||||
|
func (rb *reorderBuffer) decomposeHangul(r rune) {
|
||||||
|
r -= hangulBase
|
||||||
|
x := r % jamoTCount
|
||||||
|
r /= jamoTCount
|
||||||
|
rb.appendRune(jamoLBase + r/jamoVCount)
|
||||||
|
rb.appendRune(jamoVBase + r%jamoVCount)
|
||||||
|
if x != 0 {
|
||||||
|
rb.appendRune(jamoTBase + x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// combineHangul algorithmically combines Jamo character components into Hangul.
|
||||||
|
// See https://unicode.org/reports/tr15/#Hangul for details on combining Hangul.
|
||||||
|
func (rb *reorderBuffer) combineHangul(s, i, k int) {
|
||||||
|
b := rb.rune[:]
|
||||||
|
bn := rb.nrune
|
||||||
|
for ; i < bn; i++ {
|
||||||
|
cccB := b[k-1].ccc
|
||||||
|
cccC := b[i].ccc
|
||||||
|
if cccB == 0 {
|
||||||
|
s = k - 1
|
||||||
|
}
|
||||||
|
if s != k-1 && cccB >= cccC {
|
||||||
|
// b[i] is blocked by greater-equal cccX below it
|
||||||
|
b[k] = b[i]
|
||||||
|
k++
|
||||||
|
} else {
|
||||||
|
l := rb.runeAt(s) // also used to compare to hangulBase
|
||||||
|
v := rb.runeAt(i) // also used to compare to jamoT
|
||||||
|
switch {
|
||||||
|
case jamoLBase <= l && l < jamoLEnd &&
|
||||||
|
jamoVBase <= v && v < jamoVEnd:
|
||||||
|
// 11xx plus 116x to LV
|
||||||
|
rb.assignRune(s, hangulBase+
|
||||||
|
(l-jamoLBase)*jamoVTCount+(v-jamoVBase)*jamoTCount)
|
||||||
|
case hangulBase <= l && l < hangulEnd &&
|
||||||
|
jamoTBase < v && v < jamoTEnd &&
|
||||||
|
((l-hangulBase)%jamoTCount) == 0:
|
||||||
|
// ACxx plus 11Ax to LVT
|
||||||
|
rb.assignRune(s, l+v-jamoTBase)
|
||||||
|
default:
|
||||||
|
b[k] = b[i]
|
||||||
|
k++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rb.nrune = k
|
||||||
|
}
|
||||||
|
|
||||||
|
// compose recombines the runes in the buffer.
|
||||||
|
// It should only be used to recompose a single segment, as it will not
|
||||||
|
// handle alternations between Hangul and non-Hangul characters correctly.
|
||||||
|
func (rb *reorderBuffer) compose() {
|
||||||
|
// Lazily load the map used by the combine func below, but do
|
||||||
|
// it outside of the loop.
|
||||||
|
recompMapOnce.Do(buildRecompMap)
|
||||||
|
|
||||||
|
// UAX #15, section X5 , including Corrigendum #5
|
||||||
|
// "In any character sequence beginning with starter S, a character C is
|
||||||
|
// blocked from S if and only if there is some character B between S
|
||||||
|
// and C, and either B is a starter or it has the same or higher
|
||||||
|
// combining class as C."
|
||||||
|
bn := rb.nrune
|
||||||
|
if bn == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
k := 1
|
||||||
|
b := rb.rune[:]
|
||||||
|
for s, i := 0, 1; i < bn; i++ {
|
||||||
|
if isJamoVT(rb.bytesAt(i)) {
|
||||||
|
// Redo from start in Hangul mode. Necessary to support
|
||||||
|
// U+320E..U+321E in NFKC mode.
|
||||||
|
rb.combineHangul(s, i, k)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ii := b[i]
|
||||||
|
// We can only use combineForward as a filter if we later
|
||||||
|
// get the info for the combined character. This is more
|
||||||
|
// expensive than using the filter. Using combinesBackward()
|
||||||
|
// is safe.
|
||||||
|
if ii.combinesBackward() {
|
||||||
|
cccB := b[k-1].ccc
|
||||||
|
cccC := ii.ccc
|
||||||
|
blocked := false // b[i] blocked by starter or greater or equal CCC?
|
||||||
|
if cccB == 0 {
|
||||||
|
s = k - 1
|
||||||
|
} else {
|
||||||
|
blocked = s != k-1 && cccB >= cccC
|
||||||
|
}
|
||||||
|
if !blocked {
|
||||||
|
combined := combine(rb.runeAt(s), rb.runeAt(i))
|
||||||
|
if combined != 0 {
|
||||||
|
rb.assignRune(s, combined)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b[k] = b[i]
|
||||||
|
k++
|
||||||
|
}
|
||||||
|
rb.nrune = k
|
||||||
|
}
|
278
vendor/golang.org/x/text/unicode/norm/forminfo.go
generated
vendored
Normal file
278
vendor/golang.org/x/text/unicode/norm/forminfo.go
generated
vendored
Normal file
@ -0,0 +1,278 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package norm
|
||||||
|
|
||||||
|
import "encoding/binary"
|
||||||
|
|
||||||
|
// This file contains Form-specific logic and wrappers for data in tables.go.
|
||||||
|
|
||||||
|
// Rune info is stored in a separate trie per composing form. A composing form
|
||||||
|
// and its corresponding decomposing form share the same trie. Each trie maps
|
||||||
|
// a rune to a uint16. The values take two forms. For v >= 0x8000:
|
||||||
|
// bits
|
||||||
|
// 15: 1 (inverse of NFD_QC bit of qcInfo)
|
||||||
|
// 13..7: qcInfo (see below). isYesD is always true (no decompostion).
|
||||||
|
// 6..0: ccc (compressed CCC value).
|
||||||
|
// For v < 0x8000, the respective rune has a decomposition and v is an index
|
||||||
|
// into a byte array of UTF-8 decomposition sequences and additional info and
|
||||||
|
// has the form:
|
||||||
|
// <header> <decomp_byte>* [<tccc> [<lccc>]]
|
||||||
|
// The header contains the number of bytes in the decomposition (excluding this
|
||||||
|
// length byte). The two most significant bits of this length byte correspond
|
||||||
|
// to bit 5 and 4 of qcInfo (see below). The byte sequence itself starts at v+1.
|
||||||
|
// The byte sequence is followed by a trailing and leading CCC if the values
|
||||||
|
// for these are not zero. The value of v determines which ccc are appended
|
||||||
|
// to the sequences. For v < firstCCC, there are none, for v >= firstCCC,
|
||||||
|
// the sequence is followed by a trailing ccc, and for v >= firstLeadingCC
|
||||||
|
// there is an additional leading ccc. The value of tccc itself is the
|
||||||
|
// trailing CCC shifted left 2 bits. The two least-significant bits of tccc
|
||||||
|
// are the number of trailing non-starters.
|
||||||
|
|
||||||
|
const (
|
||||||
|
qcInfoMask = 0x3F // to clear all but the relevant bits in a qcInfo
|
||||||
|
headerLenMask = 0x3F // extract the length value from the header byte
|
||||||
|
headerFlagsMask = 0xC0 // extract the qcInfo bits from the header byte
|
||||||
|
)
|
||||||
|
|
||||||
|
// Properties provides access to normalization properties of a rune.
|
||||||
|
type Properties struct {
|
||||||
|
pos uint8 // start position in reorderBuffer; used in composition.go
|
||||||
|
size uint8 // length of UTF-8 encoding of this rune
|
||||||
|
ccc uint8 // leading canonical combining class (ccc if not decomposition)
|
||||||
|
tccc uint8 // trailing canonical combining class (ccc if not decomposition)
|
||||||
|
nLead uint8 // number of leading non-starters.
|
||||||
|
flags qcInfo // quick check flags
|
||||||
|
index uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
// functions dispatchable per form
|
||||||
|
type lookupFunc func(b input, i int) Properties
|
||||||
|
|
||||||
|
// formInfo holds Form-specific functions and tables.
|
||||||
|
type formInfo struct {
|
||||||
|
form Form
|
||||||
|
composing, compatibility bool // form type
|
||||||
|
info lookupFunc
|
||||||
|
nextMain iterFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
var formTable = []*formInfo{{
|
||||||
|
form: NFC,
|
||||||
|
composing: true,
|
||||||
|
compatibility: false,
|
||||||
|
info: lookupInfoNFC,
|
||||||
|
nextMain: nextComposed,
|
||||||
|
}, {
|
||||||
|
form: NFD,
|
||||||
|
composing: false,
|
||||||
|
compatibility: false,
|
||||||
|
info: lookupInfoNFC,
|
||||||
|
nextMain: nextDecomposed,
|
||||||
|
}, {
|
||||||
|
form: NFKC,
|
||||||
|
composing: true,
|
||||||
|
compatibility: true,
|
||||||
|
info: lookupInfoNFKC,
|
||||||
|
nextMain: nextComposed,
|
||||||
|
}, {
|
||||||
|
form: NFKD,
|
||||||
|
composing: false,
|
||||||
|
compatibility: true,
|
||||||
|
info: lookupInfoNFKC,
|
||||||
|
nextMain: nextDecomposed,
|
||||||
|
}}
|
||||||
|
|
||||||
|
// We do not distinguish between boundaries for NFC, NFD, etc. to avoid
|
||||||
|
// unexpected behavior for the user. For example, in NFD, there is a boundary
|
||||||
|
// after 'a'. However, 'a' might combine with modifiers, so from the application's
|
||||||
|
// perspective it is not a good boundary. We will therefore always use the
|
||||||
|
// boundaries for the combining variants.
|
||||||
|
|
||||||
|
// BoundaryBefore returns true if this rune starts a new segment and
|
||||||
|
// cannot combine with any rune on the left.
|
||||||
|
func (p Properties) BoundaryBefore() bool {
|
||||||
|
if p.ccc == 0 && !p.combinesBackward() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// We assume that the CCC of the first character in a decomposition
|
||||||
|
// is always non-zero if different from info.ccc and that we can return
|
||||||
|
// false at this point. This is verified by maketables.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoundaryAfter returns true if runes cannot combine with or otherwise
|
||||||
|
// interact with this or previous runes.
|
||||||
|
func (p Properties) BoundaryAfter() bool {
|
||||||
|
// TODO: loosen these conditions.
|
||||||
|
return p.isInert()
|
||||||
|
}
|
||||||
|
|
||||||
|
// We pack quick check data in 4 bits:
|
||||||
|
// 5: Combines forward (0 == false, 1 == true)
|
||||||
|
// 4..3: NFC_QC Yes(00), No (10), or Maybe (11)
|
||||||
|
// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition.
|
||||||
|
// 1..0: Number of trailing non-starters.
|
||||||
|
//
|
||||||
|
// When all 4 bits are zero, the character is inert, meaning it is never
|
||||||
|
// influenced by normalization.
|
||||||
|
type qcInfo uint8
|
||||||
|
|
||||||
|
func (p Properties) isYesC() bool { return p.flags&0x10 == 0 }
|
||||||
|
func (p Properties) isYesD() bool { return p.flags&0x4 == 0 }
|
||||||
|
|
||||||
|
func (p Properties) combinesForward() bool { return p.flags&0x20 != 0 }
|
||||||
|
func (p Properties) combinesBackward() bool { return p.flags&0x8 != 0 } // == isMaybe
|
||||||
|
func (p Properties) hasDecomposition() bool { return p.flags&0x4 != 0 } // == isNoD
|
||||||
|
|
||||||
|
func (p Properties) isInert() bool {
|
||||||
|
return p.flags&qcInfoMask == 0 && p.ccc == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Properties) multiSegment() bool {
|
||||||
|
return p.index >= firstMulti && p.index < endMulti
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Properties) nLeadingNonStarters() uint8 {
|
||||||
|
return p.nLead
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Properties) nTrailingNonStarters() uint8 {
|
||||||
|
return uint8(p.flags & 0x03)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decomposition returns the decomposition for the underlying rune
|
||||||
|
// or nil if there is none.
|
||||||
|
func (p Properties) Decomposition() []byte {
|
||||||
|
// TODO: create the decomposition for Hangul?
|
||||||
|
if p.index == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
i := p.index
|
||||||
|
n := decomps[i] & headerLenMask
|
||||||
|
i++
|
||||||
|
return decomps[i : i+uint16(n)]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the length of UTF-8 encoding of the rune.
|
||||||
|
func (p Properties) Size() int {
|
||||||
|
return int(p.size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CCC returns the canonical combining class of the underlying rune.
|
||||||
|
func (p Properties) CCC() uint8 {
|
||||||
|
if p.index >= firstCCCZeroExcept {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return ccc[p.ccc]
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeadCCC returns the CCC of the first rune in the decomposition.
|
||||||
|
// If there is no decomposition, LeadCCC equals CCC.
|
||||||
|
func (p Properties) LeadCCC() uint8 {
|
||||||
|
return ccc[p.ccc]
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrailCCC returns the CCC of the last rune in the decomposition.
|
||||||
|
// If there is no decomposition, TrailCCC equals CCC.
|
||||||
|
func (p Properties) TrailCCC() uint8 {
|
||||||
|
return ccc[p.tccc]
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildRecompMap() {
|
||||||
|
recompMap = make(map[uint32]rune, len(recompMapPacked)/8)
|
||||||
|
var buf [8]byte
|
||||||
|
for i := 0; i < len(recompMapPacked); i += 8 {
|
||||||
|
copy(buf[:], recompMapPacked[i:i+8])
|
||||||
|
key := binary.BigEndian.Uint32(buf[:4])
|
||||||
|
val := binary.BigEndian.Uint32(buf[4:])
|
||||||
|
recompMap[key] = rune(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recomposition
|
||||||
|
// We use 32-bit keys instead of 64-bit for the two codepoint keys.
|
||||||
|
// This clips off the bits of three entries, but we know this will not
|
||||||
|
// result in a collision. In the unlikely event that changes to
|
||||||
|
// UnicodeData.txt introduce collisions, the compiler will catch it.
|
||||||
|
// Note that the recomposition map for NFC and NFKC are identical.
|
||||||
|
|
||||||
|
// combine returns the combined rune or 0 if it doesn't exist.
|
||||||
|
//
|
||||||
|
// The caller is responsible for calling
|
||||||
|
// recompMapOnce.Do(buildRecompMap) sometime before this is called.
|
||||||
|
func combine(a, b rune) rune {
|
||||||
|
key := uint32(uint16(a))<<16 + uint32(uint16(b))
|
||||||
|
if recompMap == nil {
|
||||||
|
panic("caller error") // see func comment
|
||||||
|
}
|
||||||
|
return recompMap[key]
|
||||||
|
}
|
||||||
|
|
||||||
|
func lookupInfoNFC(b input, i int) Properties {
|
||||||
|
v, sz := b.charinfoNFC(i)
|
||||||
|
return compInfo(v, sz)
|
||||||
|
}
|
||||||
|
|
||||||
|
func lookupInfoNFKC(b input, i int) Properties {
|
||||||
|
v, sz := b.charinfoNFKC(i)
|
||||||
|
return compInfo(v, sz)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Properties returns properties for the first rune in s.
|
||||||
|
func (f Form) Properties(s []byte) Properties {
|
||||||
|
if f == NFC || f == NFD {
|
||||||
|
return compInfo(nfcData.lookup(s))
|
||||||
|
}
|
||||||
|
return compInfo(nfkcData.lookup(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropertiesString returns properties for the first rune in s.
|
||||||
|
func (f Form) PropertiesString(s string) Properties {
|
||||||
|
if f == NFC || f == NFD {
|
||||||
|
return compInfo(nfcData.lookupString(s))
|
||||||
|
}
|
||||||
|
return compInfo(nfkcData.lookupString(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// compInfo converts the information contained in v and sz
|
||||||
|
// to a Properties. See the comment at the top of the file
|
||||||
|
// for more information on the format.
|
||||||
|
func compInfo(v uint16, sz int) Properties {
|
||||||
|
if v == 0 {
|
||||||
|
return Properties{size: uint8(sz)}
|
||||||
|
} else if v >= 0x8000 {
|
||||||
|
p := Properties{
|
||||||
|
size: uint8(sz),
|
||||||
|
ccc: uint8(v),
|
||||||
|
tccc: uint8(v),
|
||||||
|
flags: qcInfo(v >> 8),
|
||||||
|
}
|
||||||
|
if p.ccc > 0 || p.combinesBackward() {
|
||||||
|
p.nLead = uint8(p.flags & 0x3)
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
// has decomposition
|
||||||
|
h := decomps[v]
|
||||||
|
f := (qcInfo(h&headerFlagsMask) >> 2) | 0x4
|
||||||
|
p := Properties{size: uint8(sz), flags: f, index: v}
|
||||||
|
if v >= firstCCC {
|
||||||
|
v += uint16(h&headerLenMask) + 1
|
||||||
|
c := decomps[v]
|
||||||
|
p.tccc = c >> 2
|
||||||
|
p.flags |= qcInfo(c & 0x3)
|
||||||
|
if v >= firstLeadingCCC {
|
||||||
|
p.nLead = c & 0x3
|
||||||
|
if v >= firstStarterWithNLead {
|
||||||
|
// We were tricked. Remove the decomposition.
|
||||||
|
p.flags &= 0x03
|
||||||
|
p.index = 0
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
p.ccc = decomps[v+1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
109
vendor/golang.org/x/text/unicode/norm/input.go
generated
vendored
Normal file
109
vendor/golang.org/x/text/unicode/norm/input.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package norm
|
||||||
|
|
||||||
|
import "unicode/utf8"
|
||||||
|
|
||||||
|
type input struct {
|
||||||
|
str string
|
||||||
|
bytes []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func inputBytes(str []byte) input {
|
||||||
|
return input{bytes: str}
|
||||||
|
}
|
||||||
|
|
||||||
|
func inputString(str string) input {
|
||||||
|
return input{str: str}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *input) setBytes(str []byte) {
|
||||||
|
in.str = ""
|
||||||
|
in.bytes = str
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *input) setString(str string) {
|
||||||
|
in.str = str
|
||||||
|
in.bytes = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *input) _byte(p int) byte {
|
||||||
|
if in.bytes == nil {
|
||||||
|
return in.str[p]
|
||||||
|
}
|
||||||
|
return in.bytes[p]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *input) skipASCII(p, max int) int {
|
||||||
|
if in.bytes == nil {
|
||||||
|
for ; p < max && in.str[p] < utf8.RuneSelf; p++ {
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for ; p < max && in.bytes[p] < utf8.RuneSelf; p++ {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *input) skipContinuationBytes(p int) int {
|
||||||
|
if in.bytes == nil {
|
||||||
|
for ; p < len(in.str) && !utf8.RuneStart(in.str[p]); p++ {
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for ; p < len(in.bytes) && !utf8.RuneStart(in.bytes[p]); p++ {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *input) appendSlice(buf []byte, b, e int) []byte {
|
||||||
|
if in.bytes != nil {
|
||||||
|
return append(buf, in.bytes[b:e]...)
|
||||||
|
}
|
||||||
|
for i := b; i < e; i++ {
|
||||||
|
buf = append(buf, in.str[i])
|
||||||
|
}
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *input) copySlice(buf []byte, b, e int) int {
|
||||||
|
if in.bytes == nil {
|
||||||
|
return copy(buf, in.str[b:e])
|
||||||
|
}
|
||||||
|
return copy(buf, in.bytes[b:e])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *input) charinfoNFC(p int) (uint16, int) {
|
||||||
|
if in.bytes == nil {
|
||||||
|
return nfcData.lookupString(in.str[p:])
|
||||||
|
}
|
||||||
|
return nfcData.lookup(in.bytes[p:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *input) charinfoNFKC(p int) (uint16, int) {
|
||||||
|
if in.bytes == nil {
|
||||||
|
return nfkcData.lookupString(in.str[p:])
|
||||||
|
}
|
||||||
|
return nfkcData.lookup(in.bytes[p:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *input) hangul(p int) (r rune) {
|
||||||
|
var size int
|
||||||
|
if in.bytes == nil {
|
||||||
|
if !isHangulString(in.str[p:]) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
r, size = utf8.DecodeRuneInString(in.str[p:])
|
||||||
|
} else {
|
||||||
|
if !isHangul(in.bytes[p:]) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
r, size = utf8.DecodeRune(in.bytes[p:])
|
||||||
|
}
|
||||||
|
if size != hangulUTF8Size {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
458
vendor/golang.org/x/text/unicode/norm/iter.go
generated
vendored
Normal file
458
vendor/golang.org/x/text/unicode/norm/iter.go
generated
vendored
Normal file
@ -0,0 +1,458 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package norm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MaxSegmentSize is the maximum size of a byte buffer needed to consider any
|
||||||
|
// sequence of starter and non-starter runes for the purpose of normalization.
|
||||||
|
const MaxSegmentSize = maxByteBufferSize
|
||||||
|
|
||||||
|
// An Iter iterates over a string or byte slice, while normalizing it
|
||||||
|
// to a given Form.
|
||||||
|
type Iter struct {
|
||||||
|
rb reorderBuffer
|
||||||
|
buf [maxByteBufferSize]byte
|
||||||
|
info Properties // first character saved from previous iteration
|
||||||
|
next iterFunc // implementation of next depends on form
|
||||||
|
asciiF iterFunc
|
||||||
|
|
||||||
|
p int // current position in input source
|
||||||
|
multiSeg []byte // remainder of multi-segment decomposition
|
||||||
|
}
|
||||||
|
|
||||||
|
type iterFunc func(*Iter) []byte
|
||||||
|
|
||||||
|
// Init initializes i to iterate over src after normalizing it to Form f.
|
||||||
|
func (i *Iter) Init(f Form, src []byte) {
|
||||||
|
i.p = 0
|
||||||
|
if len(src) == 0 {
|
||||||
|
i.setDone()
|
||||||
|
i.rb.nsrc = 0
|
||||||
|
return
|
||||||
|
}
|
||||||
|
i.multiSeg = nil
|
||||||
|
i.rb.init(f, src)
|
||||||
|
i.next = i.rb.f.nextMain
|
||||||
|
i.asciiF = nextASCIIBytes
|
||||||
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
|
i.rb.ss.first(i.info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitString initializes i to iterate over src after normalizing it to Form f.
|
||||||
|
func (i *Iter) InitString(f Form, src string) {
|
||||||
|
i.p = 0
|
||||||
|
if len(src) == 0 {
|
||||||
|
i.setDone()
|
||||||
|
i.rb.nsrc = 0
|
||||||
|
return
|
||||||
|
}
|
||||||
|
i.multiSeg = nil
|
||||||
|
i.rb.initString(f, src)
|
||||||
|
i.next = i.rb.f.nextMain
|
||||||
|
i.asciiF = nextASCIIString
|
||||||
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
|
i.rb.ss.first(i.info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek sets the segment to be returned by the next call to Next to start
|
||||||
|
// at position p. It is the responsibility of the caller to set p to the
|
||||||
|
// start of a segment.
|
||||||
|
func (i *Iter) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
var abs int64
|
||||||
|
switch whence {
|
||||||
|
case 0:
|
||||||
|
abs = offset
|
||||||
|
case 1:
|
||||||
|
abs = int64(i.p) + offset
|
||||||
|
case 2:
|
||||||
|
abs = int64(i.rb.nsrc) + offset
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("norm: invalid whence")
|
||||||
|
}
|
||||||
|
if abs < 0 {
|
||||||
|
return 0, fmt.Errorf("norm: negative position")
|
||||||
|
}
|
||||||
|
if int(abs) >= i.rb.nsrc {
|
||||||
|
i.setDone()
|
||||||
|
return int64(i.p), nil
|
||||||
|
}
|
||||||
|
i.p = int(abs)
|
||||||
|
i.multiSeg = nil
|
||||||
|
i.next = i.rb.f.nextMain
|
||||||
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
|
i.rb.ss.first(i.info)
|
||||||
|
return abs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// returnSlice returns a slice of the underlying input type as a byte slice.
|
||||||
|
// If the underlying is of type []byte, it will simply return a slice.
|
||||||
|
// If the underlying is of type string, it will copy the slice to the buffer
|
||||||
|
// and return that.
|
||||||
|
func (i *Iter) returnSlice(a, b int) []byte {
|
||||||
|
if i.rb.src.bytes == nil {
|
||||||
|
return i.buf[:copy(i.buf[:], i.rb.src.str[a:b])]
|
||||||
|
}
|
||||||
|
return i.rb.src.bytes[a:b]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pos returns the byte position at which the next call to Next will commence processing.
|
||||||
|
func (i *Iter) Pos() int {
|
||||||
|
return i.p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Iter) setDone() {
|
||||||
|
i.next = nextDone
|
||||||
|
i.p = i.rb.nsrc
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done returns true if there is no more input to process.
|
||||||
|
func (i *Iter) Done() bool {
|
||||||
|
return i.p >= i.rb.nsrc
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns f(i.input[i.Pos():n]), where n is a boundary of i.input.
|
||||||
|
// For any input a and b for which f(a) == f(b), subsequent calls
|
||||||
|
// to Next will return the same segments.
|
||||||
|
// Modifying runes are grouped together with the preceding starter, if such a starter exists.
|
||||||
|
// Although not guaranteed, n will typically be the smallest possible n.
|
||||||
|
func (i *Iter) Next() []byte {
|
||||||
|
return i.next(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func nextASCIIBytes(i *Iter) []byte {
|
||||||
|
p := i.p + 1
|
||||||
|
if p >= i.rb.nsrc {
|
||||||
|
p0 := i.p
|
||||||
|
i.setDone()
|
||||||
|
return i.rb.src.bytes[p0:p]
|
||||||
|
}
|
||||||
|
if i.rb.src.bytes[p] < utf8.RuneSelf {
|
||||||
|
p0 := i.p
|
||||||
|
i.p = p
|
||||||
|
return i.rb.src.bytes[p0:p]
|
||||||
|
}
|
||||||
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
|
i.next = i.rb.f.nextMain
|
||||||
|
return i.next(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func nextASCIIString(i *Iter) []byte {
|
||||||
|
p := i.p + 1
|
||||||
|
if p >= i.rb.nsrc {
|
||||||
|
i.buf[0] = i.rb.src.str[i.p]
|
||||||
|
i.setDone()
|
||||||
|
return i.buf[:1]
|
||||||
|
}
|
||||||
|
if i.rb.src.str[p] < utf8.RuneSelf {
|
||||||
|
i.buf[0] = i.rb.src.str[i.p]
|
||||||
|
i.p = p
|
||||||
|
return i.buf[:1]
|
||||||
|
}
|
||||||
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
|
i.next = i.rb.f.nextMain
|
||||||
|
return i.next(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func nextHangul(i *Iter) []byte {
|
||||||
|
p := i.p
|
||||||
|
next := p + hangulUTF8Size
|
||||||
|
if next >= i.rb.nsrc {
|
||||||
|
i.setDone()
|
||||||
|
} else if i.rb.src.hangul(next) == 0 {
|
||||||
|
i.rb.ss.next(i.info)
|
||||||
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
|
i.next = i.rb.f.nextMain
|
||||||
|
return i.next(i)
|
||||||
|
}
|
||||||
|
i.p = next
|
||||||
|
return i.buf[:decomposeHangul(i.buf[:], i.rb.src.hangul(p))]
|
||||||
|
}
|
||||||
|
|
||||||
|
func nextDone(i *Iter) []byte {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextMulti is used for iterating over multi-segment decompositions
|
||||||
|
// for decomposing normal forms.
|
||||||
|
func nextMulti(i *Iter) []byte {
|
||||||
|
j := 0
|
||||||
|
d := i.multiSeg
|
||||||
|
// skip first rune
|
||||||
|
for j = 1; j < len(d) && !utf8.RuneStart(d[j]); j++ {
|
||||||
|
}
|
||||||
|
for j < len(d) {
|
||||||
|
info := i.rb.f.info(input{bytes: d}, j)
|
||||||
|
if info.BoundaryBefore() {
|
||||||
|
i.multiSeg = d[j:]
|
||||||
|
return d[:j]
|
||||||
|
}
|
||||||
|
j += int(info.size)
|
||||||
|
}
|
||||||
|
// treat last segment as normal decomposition
|
||||||
|
i.next = i.rb.f.nextMain
|
||||||
|
return i.next(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextMultiNorm is used for iterating over multi-segment decompositions
|
||||||
|
// for composing normal forms.
|
||||||
|
func nextMultiNorm(i *Iter) []byte {
|
||||||
|
j := 0
|
||||||
|
d := i.multiSeg
|
||||||
|
for j < len(d) {
|
||||||
|
info := i.rb.f.info(input{bytes: d}, j)
|
||||||
|
if info.BoundaryBefore() {
|
||||||
|
i.rb.compose()
|
||||||
|
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
|
||||||
|
i.rb.insertUnsafe(input{bytes: d}, j, info)
|
||||||
|
i.multiSeg = d[j+int(info.size):]
|
||||||
|
return seg
|
||||||
|
}
|
||||||
|
i.rb.insertUnsafe(input{bytes: d}, j, info)
|
||||||
|
j += int(info.size)
|
||||||
|
}
|
||||||
|
i.multiSeg = nil
|
||||||
|
i.next = nextComposed
|
||||||
|
return doNormComposed(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextDecomposed is the implementation of Next for forms NFD and NFKD.
|
||||||
|
func nextDecomposed(i *Iter) (next []byte) {
|
||||||
|
outp := 0
|
||||||
|
inCopyStart, outCopyStart := i.p, 0
|
||||||
|
for {
|
||||||
|
if sz := int(i.info.size); sz <= 1 {
|
||||||
|
i.rb.ss = 0
|
||||||
|
p := i.p
|
||||||
|
i.p++ // ASCII or illegal byte. Either way, advance by 1.
|
||||||
|
if i.p >= i.rb.nsrc {
|
||||||
|
i.setDone()
|
||||||
|
return i.returnSlice(p, i.p)
|
||||||
|
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
|
||||||
|
i.next = i.asciiF
|
||||||
|
return i.returnSlice(p, i.p)
|
||||||
|
}
|
||||||
|
outp++
|
||||||
|
} else if d := i.info.Decomposition(); d != nil {
|
||||||
|
// Note: If leading CCC != 0, then len(d) == 2 and last is also non-zero.
|
||||||
|
// Case 1: there is a leftover to copy. In this case the decomposition
|
||||||
|
// must begin with a modifier and should always be appended.
|
||||||
|
// Case 2: no leftover. Simply return d if followed by a ccc == 0 value.
|
||||||
|
p := outp + len(d)
|
||||||
|
if outp > 0 {
|
||||||
|
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
|
||||||
|
// TODO: this condition should not be possible, but we leave it
|
||||||
|
// in for defensive purposes.
|
||||||
|
if p > len(i.buf) {
|
||||||
|
return i.buf[:outp]
|
||||||
|
}
|
||||||
|
} else if i.info.multiSegment() {
|
||||||
|
// outp must be 0 as multi-segment decompositions always
|
||||||
|
// start a new segment.
|
||||||
|
if i.multiSeg == nil {
|
||||||
|
i.multiSeg = d
|
||||||
|
i.next = nextMulti
|
||||||
|
return nextMulti(i)
|
||||||
|
}
|
||||||
|
// We are in the last segment. Treat as normal decomposition.
|
||||||
|
d = i.multiSeg
|
||||||
|
i.multiSeg = nil
|
||||||
|
p = len(d)
|
||||||
|
}
|
||||||
|
prevCC := i.info.tccc
|
||||||
|
if i.p += sz; i.p >= i.rb.nsrc {
|
||||||
|
i.setDone()
|
||||||
|
i.info = Properties{} // Force BoundaryBefore to succeed.
|
||||||
|
} else {
|
||||||
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
|
}
|
||||||
|
switch i.rb.ss.next(i.info) {
|
||||||
|
case ssOverflow:
|
||||||
|
i.next = nextCGJDecompose
|
||||||
|
fallthrough
|
||||||
|
case ssStarter:
|
||||||
|
if outp > 0 {
|
||||||
|
copy(i.buf[outp:], d)
|
||||||
|
return i.buf[:p]
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
copy(i.buf[outp:], d)
|
||||||
|
outp = p
|
||||||
|
inCopyStart, outCopyStart = i.p, outp
|
||||||
|
if i.info.ccc < prevCC {
|
||||||
|
goto doNorm
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else if r := i.rb.src.hangul(i.p); r != 0 {
|
||||||
|
outp = decomposeHangul(i.buf[:], r)
|
||||||
|
i.p += hangulUTF8Size
|
||||||
|
inCopyStart, outCopyStart = i.p, outp
|
||||||
|
if i.p >= i.rb.nsrc {
|
||||||
|
i.setDone()
|
||||||
|
break
|
||||||
|
} else if i.rb.src.hangul(i.p) != 0 {
|
||||||
|
i.next = nextHangul
|
||||||
|
return i.buf[:outp]
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
p := outp + sz
|
||||||
|
if p > len(i.buf) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
outp = p
|
||||||
|
i.p += sz
|
||||||
|
}
|
||||||
|
if i.p >= i.rb.nsrc {
|
||||||
|
i.setDone()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
prevCC := i.info.tccc
|
||||||
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
|
if v := i.rb.ss.next(i.info); v == ssStarter {
|
||||||
|
break
|
||||||
|
} else if v == ssOverflow {
|
||||||
|
i.next = nextCGJDecompose
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if i.info.ccc < prevCC {
|
||||||
|
goto doNorm
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if outCopyStart == 0 {
|
||||||
|
return i.returnSlice(inCopyStart, i.p)
|
||||||
|
} else if inCopyStart < i.p {
|
||||||
|
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
|
||||||
|
}
|
||||||
|
return i.buf[:outp]
|
||||||
|
doNorm:
|
||||||
|
// Insert what we have decomposed so far in the reorderBuffer.
|
||||||
|
// As we will only reorder, there will always be enough room.
|
||||||
|
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
|
||||||
|
i.rb.insertDecomposed(i.buf[0:outp])
|
||||||
|
return doNormDecomposed(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doNormDecomposed(i *Iter) []byte {
|
||||||
|
for {
|
||||||
|
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
|
||||||
|
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
|
||||||
|
i.setDone()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
|
if i.info.ccc == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if s := i.rb.ss.next(i.info); s == ssOverflow {
|
||||||
|
i.next = nextCGJDecompose
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// new segment or too many combining characters: exit normalization
|
||||||
|
return i.buf[:i.rb.flushCopy(i.buf[:])]
|
||||||
|
}
|
||||||
|
|
||||||
|
func nextCGJDecompose(i *Iter) []byte {
|
||||||
|
i.rb.ss = 0
|
||||||
|
i.rb.insertCGJ()
|
||||||
|
i.next = nextDecomposed
|
||||||
|
i.rb.ss.first(i.info)
|
||||||
|
buf := doNormDecomposed(i)
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextComposed is the implementation of Next for forms NFC and NFKC.
|
||||||
|
func nextComposed(i *Iter) []byte {
|
||||||
|
outp, startp := 0, i.p
|
||||||
|
var prevCC uint8
|
||||||
|
for {
|
||||||
|
if !i.info.isYesC() {
|
||||||
|
goto doNorm
|
||||||
|
}
|
||||||
|
prevCC = i.info.tccc
|
||||||
|
sz := int(i.info.size)
|
||||||
|
if sz == 0 {
|
||||||
|
sz = 1 // illegal rune: copy byte-by-byte
|
||||||
|
}
|
||||||
|
p := outp + sz
|
||||||
|
if p > len(i.buf) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
outp = p
|
||||||
|
i.p += sz
|
||||||
|
if i.p >= i.rb.nsrc {
|
||||||
|
i.setDone()
|
||||||
|
break
|
||||||
|
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
|
||||||
|
i.rb.ss = 0
|
||||||
|
i.next = i.asciiF
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
|
if v := i.rb.ss.next(i.info); v == ssStarter {
|
||||||
|
break
|
||||||
|
} else if v == ssOverflow {
|
||||||
|
i.next = nextCGJCompose
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if i.info.ccc < prevCC {
|
||||||
|
goto doNorm
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i.returnSlice(startp, i.p)
|
||||||
|
doNorm:
|
||||||
|
// reset to start position
|
||||||
|
i.p = startp
|
||||||
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
|
i.rb.ss.first(i.info)
|
||||||
|
if i.info.multiSegment() {
|
||||||
|
d := i.info.Decomposition()
|
||||||
|
info := i.rb.f.info(input{bytes: d}, 0)
|
||||||
|
i.rb.insertUnsafe(input{bytes: d}, 0, info)
|
||||||
|
i.multiSeg = d[int(info.size):]
|
||||||
|
i.next = nextMultiNorm
|
||||||
|
return nextMultiNorm(i)
|
||||||
|
}
|
||||||
|
i.rb.ss.first(i.info)
|
||||||
|
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
|
||||||
|
return doNormComposed(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doNormComposed(i *Iter) []byte {
|
||||||
|
// First rune should already be inserted.
|
||||||
|
for {
|
||||||
|
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
|
||||||
|
i.setDone()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||||
|
if s := i.rb.ss.next(i.info); s == ssStarter {
|
||||||
|
break
|
||||||
|
} else if s == ssOverflow {
|
||||||
|
i.next = nextCGJCompose
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
|
||||||
|
}
|
||||||
|
i.rb.compose()
|
||||||
|
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
|
||||||
|
return seg
|
||||||
|
}
|
||||||
|
|
||||||
|
func nextCGJCompose(i *Iter) []byte {
|
||||||
|
i.rb.ss = 0 // instead of first
|
||||||
|
i.rb.insertCGJ()
|
||||||
|
i.next = nextComposed
|
||||||
|
// Note that we treat any rune with nLeadingNonStarters > 0 as a non-starter,
|
||||||
|
// even if they are not. This is particularly dubious for U+FF9E and UFF9A.
|
||||||
|
// If we ever change that, insert a check here.
|
||||||
|
i.rb.ss.first(i.info)
|
||||||
|
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
|
||||||
|
return doNormComposed(i)
|
||||||
|
}
|
609
vendor/golang.org/x/text/unicode/norm/normalize.go
generated
vendored
Normal file
609
vendor/golang.org/x/text/unicode/norm/normalize.go
generated
vendored
Normal file
@ -0,0 +1,609 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Note: the file data_test.go that is generated should not be checked in.
|
||||||
|
//go:generate go run maketables.go triegen.go
|
||||||
|
//go:generate go test -tags test
|
||||||
|
|
||||||
|
// Package norm contains types and functions for normalizing Unicode strings.
|
||||||
|
package norm // import "golang.org/x/text/unicode/norm"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Form denotes a canonical representation of Unicode code points.
|
||||||
|
// The Unicode-defined normalization and equivalence forms are:
|
||||||
|
//
|
||||||
|
// NFC Unicode Normalization Form C
|
||||||
|
// NFD Unicode Normalization Form D
|
||||||
|
// NFKC Unicode Normalization Form KC
|
||||||
|
// NFKD Unicode Normalization Form KD
|
||||||
|
//
|
||||||
|
// For a Form f, this documentation uses the notation f(x) to mean
|
||||||
|
// the bytes or string x converted to the given form.
|
||||||
|
// A position n in x is called a boundary if conversion to the form can
|
||||||
|
// proceed independently on both sides:
|
||||||
|
// f(x) == append(f(x[0:n]), f(x[n:])...)
|
||||||
|
//
|
||||||
|
// References: https://unicode.org/reports/tr15/ and
|
||||||
|
// https://unicode.org/notes/tn5/.
|
||||||
|
type Form int
|
||||||
|
|
||||||
|
const (
|
||||||
|
NFC Form = iota
|
||||||
|
NFD
|
||||||
|
NFKC
|
||||||
|
NFKD
|
||||||
|
)
|
||||||
|
|
||||||
|
// Bytes returns f(b). May return b if f(b) = b.
|
||||||
|
func (f Form) Bytes(b []byte) []byte {
|
||||||
|
src := inputBytes(b)
|
||||||
|
ft := formTable[f]
|
||||||
|
n, ok := ft.quickSpan(src, 0, len(b), true)
|
||||||
|
if ok {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
out := make([]byte, n, len(b))
|
||||||
|
copy(out, b[0:n])
|
||||||
|
rb := reorderBuffer{f: *ft, src: src, nsrc: len(b), out: out, flushF: appendFlush}
|
||||||
|
return doAppendInner(&rb, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns f(s).
|
||||||
|
func (f Form) String(s string) string {
|
||||||
|
src := inputString(s)
|
||||||
|
ft := formTable[f]
|
||||||
|
n, ok := ft.quickSpan(src, 0, len(s), true)
|
||||||
|
if ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
out := make([]byte, n, len(s))
|
||||||
|
copy(out, s[0:n])
|
||||||
|
rb := reorderBuffer{f: *ft, src: src, nsrc: len(s), out: out, flushF: appendFlush}
|
||||||
|
return string(doAppendInner(&rb, n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNormal returns true if b == f(b).
|
||||||
|
func (f Form) IsNormal(b []byte) bool {
|
||||||
|
src := inputBytes(b)
|
||||||
|
ft := formTable[f]
|
||||||
|
bp, ok := ft.quickSpan(src, 0, len(b), true)
|
||||||
|
if ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
rb := reorderBuffer{f: *ft, src: src, nsrc: len(b)}
|
||||||
|
rb.setFlusher(nil, cmpNormalBytes)
|
||||||
|
for bp < len(b) {
|
||||||
|
rb.out = b[bp:]
|
||||||
|
if bp = decomposeSegment(&rb, bp, true); bp < 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
bp, _ = rb.f.quickSpan(rb.src, bp, len(b), true)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpNormalBytes(rb *reorderBuffer) bool {
|
||||||
|
b := rb.out
|
||||||
|
for i := 0; i < rb.nrune; i++ {
|
||||||
|
info := rb.rune[i]
|
||||||
|
if int(info.size) > len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
p := info.pos
|
||||||
|
pe := p + info.size
|
||||||
|
for ; p < pe; p++ {
|
||||||
|
if b[0] != rb.byte[p] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
b = b[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNormalString returns true if s == f(s).
|
||||||
|
func (f Form) IsNormalString(s string) bool {
|
||||||
|
src := inputString(s)
|
||||||
|
ft := formTable[f]
|
||||||
|
bp, ok := ft.quickSpan(src, 0, len(s), true)
|
||||||
|
if ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
rb := reorderBuffer{f: *ft, src: src, nsrc: len(s)}
|
||||||
|
rb.setFlusher(nil, func(rb *reorderBuffer) bool {
|
||||||
|
for i := 0; i < rb.nrune; i++ {
|
||||||
|
info := rb.rune[i]
|
||||||
|
if bp+int(info.size) > len(s) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
p := info.pos
|
||||||
|
pe := p + info.size
|
||||||
|
for ; p < pe; p++ {
|
||||||
|
if s[bp] != rb.byte[p] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
bp++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
for bp < len(s) {
|
||||||
|
if bp = decomposeSegment(&rb, bp, true); bp < 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
bp, _ = rb.f.quickSpan(rb.src, bp, len(s), true)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// patchTail fixes a case where a rune may be incorrectly normalized
|
||||||
|
// if it is followed by illegal continuation bytes. It returns the
|
||||||
|
// patched buffer and whether the decomposition is still in progress.
|
||||||
|
func patchTail(rb *reorderBuffer) bool {
|
||||||
|
info, p := lastRuneStart(&rb.f, rb.out)
|
||||||
|
if p == -1 || info.size == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
end := p + int(info.size)
|
||||||
|
extra := len(rb.out) - end
|
||||||
|
if extra > 0 {
|
||||||
|
// Potentially allocating memory. However, this only
|
||||||
|
// happens with ill-formed UTF-8.
|
||||||
|
x := make([]byte, 0)
|
||||||
|
x = append(x, rb.out[len(rb.out)-extra:]...)
|
||||||
|
rb.out = rb.out[:end]
|
||||||
|
decomposeToLastBoundary(rb)
|
||||||
|
rb.doFlush()
|
||||||
|
rb.out = append(rb.out, x...)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
buf := rb.out[p:]
|
||||||
|
rb.out = rb.out[:p]
|
||||||
|
decomposeToLastBoundary(rb)
|
||||||
|
if s := rb.ss.next(info); s == ssStarter {
|
||||||
|
rb.doFlush()
|
||||||
|
rb.ss.first(info)
|
||||||
|
} else if s == ssOverflow {
|
||||||
|
rb.doFlush()
|
||||||
|
rb.insertCGJ()
|
||||||
|
rb.ss = 0
|
||||||
|
}
|
||||||
|
rb.insertUnsafe(inputBytes(buf), 0, info)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendQuick(rb *reorderBuffer, i int) int {
|
||||||
|
if rb.nsrc == i {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
end, _ := rb.f.quickSpan(rb.src, i, rb.nsrc, true)
|
||||||
|
rb.out = rb.src.appendSlice(rb.out, i, end)
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append returns f(append(out, b...)).
|
||||||
|
// The buffer out must be nil, empty, or equal to f(out).
|
||||||
|
func (f Form) Append(out []byte, src ...byte) []byte {
|
||||||
|
return f.doAppend(out, inputBytes(src), len(src))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Form) doAppend(out []byte, src input, n int) []byte {
|
||||||
|
if n == 0 {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
ft := formTable[f]
|
||||||
|
// Attempt to do a quickSpan first so we can avoid initializing the reorderBuffer.
|
||||||
|
if len(out) == 0 {
|
||||||
|
p, _ := ft.quickSpan(src, 0, n, true)
|
||||||
|
out = src.appendSlice(out, 0, p)
|
||||||
|
if p == n {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
rb := reorderBuffer{f: *ft, src: src, nsrc: n, out: out, flushF: appendFlush}
|
||||||
|
return doAppendInner(&rb, p)
|
||||||
|
}
|
||||||
|
rb := reorderBuffer{f: *ft, src: src, nsrc: n}
|
||||||
|
return doAppend(&rb, out, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doAppend(rb *reorderBuffer, out []byte, p int) []byte {
|
||||||
|
rb.setFlusher(out, appendFlush)
|
||||||
|
src, n := rb.src, rb.nsrc
|
||||||
|
doMerge := len(out) > 0
|
||||||
|
if q := src.skipContinuationBytes(p); q > p {
|
||||||
|
// Move leading non-starters to destination.
|
||||||
|
rb.out = src.appendSlice(rb.out, p, q)
|
||||||
|
p = q
|
||||||
|
doMerge = patchTail(rb)
|
||||||
|
}
|
||||||
|
fd := &rb.f
|
||||||
|
if doMerge {
|
||||||
|
var info Properties
|
||||||
|
if p < n {
|
||||||
|
info = fd.info(src, p)
|
||||||
|
if !info.BoundaryBefore() || info.nLeadingNonStarters() > 0 {
|
||||||
|
if p == 0 {
|
||||||
|
decomposeToLastBoundary(rb)
|
||||||
|
}
|
||||||
|
p = decomposeSegment(rb, p, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if info.size == 0 {
|
||||||
|
rb.doFlush()
|
||||||
|
// Append incomplete UTF-8 encoding.
|
||||||
|
return src.appendSlice(rb.out, p, n)
|
||||||
|
}
|
||||||
|
if rb.nrune > 0 {
|
||||||
|
return doAppendInner(rb, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p = appendQuick(rb, p)
|
||||||
|
return doAppendInner(rb, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doAppendInner(rb *reorderBuffer, p int) []byte {
|
||||||
|
for n := rb.nsrc; p < n; {
|
||||||
|
p = decomposeSegment(rb, p, true)
|
||||||
|
p = appendQuick(rb, p)
|
||||||
|
}
|
||||||
|
return rb.out
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendString returns f(append(out, []byte(s))).
|
||||||
|
// The buffer out must be nil, empty, or equal to f(out).
|
||||||
|
func (f Form) AppendString(out []byte, src string) []byte {
|
||||||
|
return f.doAppend(out, inputString(src), len(src))
|
||||||
|
}
|
||||||
|
|
||||||
|
// QuickSpan returns a boundary n such that b[0:n] == f(b[0:n]).
|
||||||
|
// It is not guaranteed to return the largest such n.
|
||||||
|
func (f Form) QuickSpan(b []byte) int {
|
||||||
|
n, _ := formTable[f].quickSpan(inputBytes(b), 0, len(b), true)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Span implements transform.SpanningTransformer. It returns a boundary n such
|
||||||
|
// that b[0:n] == f(b[0:n]). It is not guaranteed to return the largest such n.
|
||||||
|
func (f Form) Span(b []byte, atEOF bool) (n int, err error) {
|
||||||
|
n, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), atEOF)
|
||||||
|
if n < len(b) {
|
||||||
|
if !ok {
|
||||||
|
err = transform.ErrEndOfSpan
|
||||||
|
} else {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanString returns a boundary n such that s[0:n] == f(s[0:n]).
|
||||||
|
// It is not guaranteed to return the largest such n.
|
||||||
|
func (f Form) SpanString(s string, atEOF bool) (n int, err error) {
|
||||||
|
n, ok := formTable[f].quickSpan(inputString(s), 0, len(s), atEOF)
|
||||||
|
if n < len(s) {
|
||||||
|
if !ok {
|
||||||
|
err = transform.ErrEndOfSpan
|
||||||
|
} else {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// quickSpan returns a boundary n such that src[0:n] == f(src[0:n]) and
|
||||||
|
// whether any non-normalized parts were found. If atEOF is false, n will
|
||||||
|
// not point past the last segment if this segment might be become
|
||||||
|
// non-normalized by appending other runes.
|
||||||
|
func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool) {
|
||||||
|
var lastCC uint8
|
||||||
|
ss := streamSafe(0)
|
||||||
|
lastSegStart := i
|
||||||
|
for n = end; i < n; {
|
||||||
|
if j := src.skipASCII(i, n); i != j {
|
||||||
|
i = j
|
||||||
|
lastSegStart = i - 1
|
||||||
|
lastCC = 0
|
||||||
|
ss = 0
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
info := f.info(src, i)
|
||||||
|
if info.size == 0 {
|
||||||
|
if atEOF {
|
||||||
|
// include incomplete runes
|
||||||
|
return n, true
|
||||||
|
}
|
||||||
|
return lastSegStart, true
|
||||||
|
}
|
||||||
|
// This block needs to be before the next, because it is possible to
|
||||||
|
// have an overflow for runes that are starters (e.g. with U+FF9E).
|
||||||
|
switch ss.next(info) {
|
||||||
|
case ssStarter:
|
||||||
|
lastSegStart = i
|
||||||
|
case ssOverflow:
|
||||||
|
return lastSegStart, false
|
||||||
|
case ssSuccess:
|
||||||
|
if lastCC > info.ccc {
|
||||||
|
return lastSegStart, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if f.composing {
|
||||||
|
if !info.isYesC() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !info.isYesD() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastCC = info.ccc
|
||||||
|
i += int(info.size)
|
||||||
|
}
|
||||||
|
if i == n {
|
||||||
|
if !atEOF {
|
||||||
|
n = lastSegStart
|
||||||
|
}
|
||||||
|
return n, true
|
||||||
|
}
|
||||||
|
return lastSegStart, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// QuickSpanString returns a boundary n such that s[0:n] == f(s[0:n]).
|
||||||
|
// It is not guaranteed to return the largest such n.
|
||||||
|
func (f Form) QuickSpanString(s string) int {
|
||||||
|
n, _ := formTable[f].quickSpan(inputString(s), 0, len(s), true)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstBoundary returns the position i of the first boundary in b
|
||||||
|
// or -1 if b contains no boundary.
|
||||||
|
func (f Form) FirstBoundary(b []byte) int {
|
||||||
|
return f.firstBoundary(inputBytes(b), len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Form) firstBoundary(src input, nsrc int) int {
|
||||||
|
i := src.skipContinuationBytes(0)
|
||||||
|
if i >= nsrc {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
fd := formTable[f]
|
||||||
|
ss := streamSafe(0)
|
||||||
|
// We should call ss.first here, but we can't as the first rune is
|
||||||
|
// skipped already. This means FirstBoundary can't really determine
|
||||||
|
// CGJ insertion points correctly. Luckily it doesn't have to.
|
||||||
|
for {
|
||||||
|
info := fd.info(src, i)
|
||||||
|
if info.size == 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if s := ss.next(info); s != ssSuccess {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
i += int(info.size)
|
||||||
|
if i >= nsrc {
|
||||||
|
if !info.BoundaryAfter() && !ss.isMax() {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return nsrc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstBoundaryInString returns the position i of the first boundary in s
|
||||||
|
// or -1 if s contains no boundary.
|
||||||
|
func (f Form) FirstBoundaryInString(s string) int {
|
||||||
|
return f.firstBoundary(inputString(s), len(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextBoundary reports the index of the boundary between the first and next
|
||||||
|
// segment in b or -1 if atEOF is false and there are not enough bytes to
|
||||||
|
// determine this boundary.
|
||||||
|
func (f Form) NextBoundary(b []byte, atEOF bool) int {
|
||||||
|
return f.nextBoundary(inputBytes(b), len(b), atEOF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextBoundaryInString reports the index of the boundary between the first and
|
||||||
|
// next segment in b or -1 if atEOF is false and there are not enough bytes to
|
||||||
|
// determine this boundary.
|
||||||
|
func (f Form) NextBoundaryInString(s string, atEOF bool) int {
|
||||||
|
return f.nextBoundary(inputString(s), len(s), atEOF)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Form) nextBoundary(src input, nsrc int, atEOF bool) int {
|
||||||
|
if nsrc == 0 {
|
||||||
|
if atEOF {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
fd := formTable[f]
|
||||||
|
info := fd.info(src, 0)
|
||||||
|
if info.size == 0 {
|
||||||
|
if atEOF {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
ss := streamSafe(0)
|
||||||
|
ss.first(info)
|
||||||
|
|
||||||
|
for i := int(info.size); i < nsrc; i += int(info.size) {
|
||||||
|
info = fd.info(src, i)
|
||||||
|
if info.size == 0 {
|
||||||
|
if atEOF {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
// TODO: Using streamSafe to determine the boundary isn't the same as
|
||||||
|
// using BoundaryBefore. Determine which should be used.
|
||||||
|
if s := ss.next(info); s != ssSuccess {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !atEOF && !info.BoundaryAfter() && !ss.isMax() {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return nsrc
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastBoundary returns the position i of the last boundary in b
|
||||||
|
// or -1 if b contains no boundary.
|
||||||
|
func (f Form) LastBoundary(b []byte) int {
|
||||||
|
return lastBoundary(formTable[f], b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func lastBoundary(fd *formInfo, b []byte) int {
|
||||||
|
i := len(b)
|
||||||
|
info, p := lastRuneStart(fd, b)
|
||||||
|
if p == -1 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if info.size == 0 { // ends with incomplete rune
|
||||||
|
if p == 0 { // starts with incomplete rune
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
i = p
|
||||||
|
info, p = lastRuneStart(fd, b[:i])
|
||||||
|
if p == -1 { // incomplete UTF-8 encoding or non-starter bytes without a starter
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if p+int(info.size) != i { // trailing non-starter bytes: illegal UTF-8
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
if info.BoundaryAfter() {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
ss := streamSafe(0)
|
||||||
|
v := ss.backwards(info)
|
||||||
|
for i = p; i >= 0 && v != ssStarter; i = p {
|
||||||
|
info, p = lastRuneStart(fd, b[:i])
|
||||||
|
if v = ss.backwards(info); v == ssOverflow {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if p+int(info.size) != i {
|
||||||
|
if p == -1 { // no boundary found
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return i // boundary after an illegal UTF-8 encoding
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
// decomposeSegment scans the first segment in src into rb. It inserts 0x034f
|
||||||
|
// (Grapheme Joiner) when it encounters a sequence of more than 30 non-starters
|
||||||
|
// and returns the number of bytes consumed from src or iShortDst or iShortSrc.
|
||||||
|
func decomposeSegment(rb *reorderBuffer, sp int, atEOF bool) int {
|
||||||
|
// Force one character to be consumed.
|
||||||
|
info := rb.f.info(rb.src, sp)
|
||||||
|
if info.size == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if s := rb.ss.next(info); s == ssStarter {
|
||||||
|
// TODO: this could be removed if we don't support merging.
|
||||||
|
if rb.nrune > 0 {
|
||||||
|
goto end
|
||||||
|
}
|
||||||
|
} else if s == ssOverflow {
|
||||||
|
rb.insertCGJ()
|
||||||
|
goto end
|
||||||
|
}
|
||||||
|
if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
|
||||||
|
return int(err)
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
sp += int(info.size)
|
||||||
|
if sp >= rb.nsrc {
|
||||||
|
if !atEOF && !info.BoundaryAfter() {
|
||||||
|
return int(iShortSrc)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
info = rb.f.info(rb.src, sp)
|
||||||
|
if info.size == 0 {
|
||||||
|
if !atEOF {
|
||||||
|
return int(iShortSrc)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if s := rb.ss.next(info); s == ssStarter {
|
||||||
|
break
|
||||||
|
} else if s == ssOverflow {
|
||||||
|
rb.insertCGJ()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
|
||||||
|
return int(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
end:
|
||||||
|
if !rb.doFlush() {
|
||||||
|
return int(iShortDst)
|
||||||
|
}
|
||||||
|
return sp
|
||||||
|
}
|
||||||
|
|
||||||
|
// lastRuneStart returns the runeInfo and position of the last
|
||||||
|
// rune in buf or the zero runeInfo and -1 if no rune was found.
|
||||||
|
func lastRuneStart(fd *formInfo, buf []byte) (Properties, int) {
|
||||||
|
p := len(buf) - 1
|
||||||
|
for ; p >= 0 && !utf8.RuneStart(buf[p]); p-- {
|
||||||
|
}
|
||||||
|
if p < 0 {
|
||||||
|
return Properties{}, -1
|
||||||
|
}
|
||||||
|
return fd.info(inputBytes(buf), p), p
|
||||||
|
}
|
||||||
|
|
||||||
|
// decomposeToLastBoundary finds an open segment at the end of the buffer
|
||||||
|
// and scans it into rb. Returns the buffer minus the last segment.
|
||||||
|
func decomposeToLastBoundary(rb *reorderBuffer) {
|
||||||
|
fd := &rb.f
|
||||||
|
info, i := lastRuneStart(fd, rb.out)
|
||||||
|
if int(info.size) != len(rb.out)-i {
|
||||||
|
// illegal trailing continuation bytes
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if info.BoundaryAfter() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var add [maxNonStarters + 1]Properties // stores runeInfo in reverse order
|
||||||
|
padd := 0
|
||||||
|
ss := streamSafe(0)
|
||||||
|
p := len(rb.out)
|
||||||
|
for {
|
||||||
|
add[padd] = info
|
||||||
|
v := ss.backwards(info)
|
||||||
|
if v == ssOverflow {
|
||||||
|
// Note that if we have an overflow, it the string we are appending to
|
||||||
|
// is not correctly normalized. In this case the behavior is undefined.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
padd++
|
||||||
|
p -= int(info.size)
|
||||||
|
if v == ssStarter || p < 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
info, i = lastRuneStart(fd, rb.out[:p])
|
||||||
|
if int(info.size) != p-i {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rb.ss = ss
|
||||||
|
// Copy bytes for insertion as we may need to overwrite rb.out.
|
||||||
|
var buf [maxBufferSize * utf8.UTFMax]byte
|
||||||
|
cp := buf[:copy(buf[:], rb.out[p:])]
|
||||||
|
rb.out = rb.out[:p]
|
||||||
|
for padd--; padd >= 0; padd-- {
|
||||||
|
info = add[padd]
|
||||||
|
rb.insertUnsafe(inputBytes(cp), 0, info)
|
||||||
|
cp = cp[info.size:]
|
||||||
|
}
|
||||||
|
}
|
125
vendor/golang.org/x/text/unicode/norm/readwriter.go
generated
vendored
Normal file
125
vendor/golang.org/x/text/unicode/norm/readwriter.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package norm
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
type normWriter struct {
|
||||||
|
rb reorderBuffer
|
||||||
|
w io.Writer
|
||||||
|
buf []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write implements the standard write interface. If the last characters are
|
||||||
|
// not at a normalization boundary, the bytes will be buffered for the next
|
||||||
|
// write. The remaining bytes will be written on close.
|
||||||
|
func (w *normWriter) Write(data []byte) (n int, err error) {
|
||||||
|
// Process data in pieces to keep w.buf size bounded.
|
||||||
|
const chunk = 4000
|
||||||
|
|
||||||
|
for len(data) > 0 {
|
||||||
|
// Normalize into w.buf.
|
||||||
|
m := len(data)
|
||||||
|
if m > chunk {
|
||||||
|
m = chunk
|
||||||
|
}
|
||||||
|
w.rb.src = inputBytes(data[:m])
|
||||||
|
w.rb.nsrc = m
|
||||||
|
w.buf = doAppend(&w.rb, w.buf, 0)
|
||||||
|
data = data[m:]
|
||||||
|
n += m
|
||||||
|
|
||||||
|
// Write out complete prefix, save remainder.
|
||||||
|
// Note that lastBoundary looks back at most 31 runes.
|
||||||
|
i := lastBoundary(&w.rb.f, w.buf)
|
||||||
|
if i == -1 {
|
||||||
|
i = 0
|
||||||
|
}
|
||||||
|
if i > 0 {
|
||||||
|
if _, err = w.w.Write(w.buf[:i]); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
bn := copy(w.buf, w.buf[i:])
|
||||||
|
w.buf = w.buf[:bn]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close forces data that remains in the buffer to be written.
|
||||||
|
func (w *normWriter) Close() error {
|
||||||
|
if len(w.buf) > 0 {
|
||||||
|
_, err := w.w.Write(w.buf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writer returns a new writer that implements Write(b)
|
||||||
|
// by writing f(b) to w. The returned writer may use an
|
||||||
|
// internal buffer to maintain state across Write calls.
|
||||||
|
// Calling its Close method writes any buffered data to w.
|
||||||
|
func (f Form) Writer(w io.Writer) io.WriteCloser {
|
||||||
|
wr := &normWriter{rb: reorderBuffer{}, w: w}
|
||||||
|
wr.rb.init(f, nil)
|
||||||
|
return wr
|
||||||
|
}
|
||||||
|
|
||||||
|
type normReader struct {
|
||||||
|
rb reorderBuffer
|
||||||
|
r io.Reader
|
||||||
|
inbuf []byte
|
||||||
|
outbuf []byte
|
||||||
|
bufStart int
|
||||||
|
lastBoundary int
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read implements the standard read interface.
|
||||||
|
func (r *normReader) Read(p []byte) (int, error) {
|
||||||
|
for {
|
||||||
|
if r.lastBoundary-r.bufStart > 0 {
|
||||||
|
n := copy(p, r.outbuf[r.bufStart:r.lastBoundary])
|
||||||
|
r.bufStart += n
|
||||||
|
if r.lastBoundary-r.bufStart > 0 {
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
return n, r.err
|
||||||
|
}
|
||||||
|
if r.err != nil {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
outn := copy(r.outbuf, r.outbuf[r.lastBoundary:])
|
||||||
|
r.outbuf = r.outbuf[0:outn]
|
||||||
|
r.bufStart = 0
|
||||||
|
|
||||||
|
n, err := r.r.Read(r.inbuf)
|
||||||
|
r.rb.src = inputBytes(r.inbuf[0:n])
|
||||||
|
r.rb.nsrc, r.err = n, err
|
||||||
|
if n > 0 {
|
||||||
|
r.outbuf = doAppend(&r.rb, r.outbuf, 0)
|
||||||
|
}
|
||||||
|
if err == io.EOF {
|
||||||
|
r.lastBoundary = len(r.outbuf)
|
||||||
|
} else {
|
||||||
|
r.lastBoundary = lastBoundary(&r.rb.f, r.outbuf)
|
||||||
|
if r.lastBoundary == -1 {
|
||||||
|
r.lastBoundary = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reader returns a new reader that implements Read
|
||||||
|
// by reading data from r and returning f(data).
|
||||||
|
func (f Form) Reader(r io.Reader) io.Reader {
|
||||||
|
const chunk = 4000
|
||||||
|
buf := make([]byte, chunk)
|
||||||
|
rr := &normReader{rb: reorderBuffer{}, r: r, inbuf: buf}
|
||||||
|
rr.rb.init(f, buf)
|
||||||
|
return rr
|
||||||
|
}
|
7657
vendor/golang.org/x/text/unicode/norm/tables10.0.0.go
generated
vendored
Normal file
7657
vendor/golang.org/x/text/unicode/norm/tables10.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7693
vendor/golang.org/x/text/unicode/norm/tables11.0.0.go
generated
vendored
Normal file
7693
vendor/golang.org/x/text/unicode/norm/tables11.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7710
vendor/golang.org/x/text/unicode/norm/tables12.0.0.go
generated
vendored
Normal file
7710
vendor/golang.org/x/text/unicode/norm/tables12.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7760
vendor/golang.org/x/text/unicode/norm/tables13.0.0.go
generated
vendored
Normal file
7760
vendor/golang.org/x/text/unicode/norm/tables13.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7637
vendor/golang.org/x/text/unicode/norm/tables9.0.0.go
generated
vendored
Normal file
7637
vendor/golang.org/x/text/unicode/norm/tables9.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
88
vendor/golang.org/x/text/unicode/norm/transform.go
generated
vendored
Normal file
88
vendor/golang.org/x/text/unicode/norm/transform.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package norm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/text/transform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reset implements the Reset method of the transform.Transformer interface.
|
||||||
|
func (Form) Reset() {}
|
||||||
|
|
||||||
|
// Transform implements the Transform method of the transform.Transformer
|
||||||
|
// interface. It may need to write segments of up to MaxSegmentSize at once.
|
||||||
|
// Users should either catch ErrShortDst and allow dst to grow or have dst be at
|
||||||
|
// least of size MaxTransformChunkSize to be guaranteed of progress.
|
||||||
|
func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
// Cap the maximum number of src bytes to check.
|
||||||
|
b := src
|
||||||
|
eof := atEOF
|
||||||
|
if ns := len(dst); ns < len(b) {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
eof = false
|
||||||
|
b = b[:ns]
|
||||||
|
}
|
||||||
|
i, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), eof)
|
||||||
|
n := copy(dst, b[:i])
|
||||||
|
if !ok {
|
||||||
|
nDst, nSrc, err = f.transform(dst[n:], src[n:], atEOF)
|
||||||
|
return nDst + n, nSrc + n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil && n < len(src) && !atEOF {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
}
|
||||||
|
return n, n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func flushTransform(rb *reorderBuffer) bool {
|
||||||
|
// Write out (must fully fit in dst, or else it is an ErrShortDst).
|
||||||
|
if len(rb.out) < rb.nrune*utf8.UTFMax {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
rb.out = rb.out[rb.flushCopy(rb.out):]
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
var errs = []error{nil, transform.ErrShortDst, transform.ErrShortSrc}
|
||||||
|
|
||||||
|
// transform implements the transform.Transformer interface. It is only called
|
||||||
|
// when quickSpan does not pass for a given string.
|
||||||
|
func (f Form) transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||||
|
// TODO: get rid of reorderBuffer. See CL 23460044.
|
||||||
|
rb := reorderBuffer{}
|
||||||
|
rb.init(f, src)
|
||||||
|
for {
|
||||||
|
// Load segment into reorder buffer.
|
||||||
|
rb.setFlusher(dst[nDst:], flushTransform)
|
||||||
|
end := decomposeSegment(&rb, nSrc, atEOF)
|
||||||
|
if end < 0 {
|
||||||
|
return nDst, nSrc, errs[-end]
|
||||||
|
}
|
||||||
|
nDst = len(dst) - len(rb.out)
|
||||||
|
nSrc = end
|
||||||
|
|
||||||
|
// Next quickSpan.
|
||||||
|
end = rb.nsrc
|
||||||
|
eof := atEOF
|
||||||
|
if n := nSrc + len(dst) - nDst; n < end {
|
||||||
|
err = transform.ErrShortDst
|
||||||
|
end = n
|
||||||
|
eof = false
|
||||||
|
}
|
||||||
|
end, ok := rb.f.quickSpan(rb.src, nSrc, end, eof)
|
||||||
|
n := copy(dst[nDst:], rb.src.bytes[nSrc:end])
|
||||||
|
nSrc += n
|
||||||
|
nDst += n
|
||||||
|
if ok {
|
||||||
|
if err == nil && n < rb.nsrc && !atEOF {
|
||||||
|
err = transform.ErrShortSrc
|
||||||
|
}
|
||||||
|
return nDst, nSrc, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
54
vendor/golang.org/x/text/unicode/norm/trie.go
generated
vendored
Normal file
54
vendor/golang.org/x/text/unicode/norm/trie.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package norm
|
||||||
|
|
||||||
|
type valueRange struct {
|
||||||
|
value uint16 // header: value:stride
|
||||||
|
lo, hi byte // header: lo:n
|
||||||
|
}
|
||||||
|
|
||||||
|
type sparseBlocks struct {
|
||||||
|
values []valueRange
|
||||||
|
offset []uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
var nfcSparse = sparseBlocks{
|
||||||
|
values: nfcSparseValues[:],
|
||||||
|
offset: nfcSparseOffset[:],
|
||||||
|
}
|
||||||
|
|
||||||
|
var nfkcSparse = sparseBlocks{
|
||||||
|
values: nfkcSparseValues[:],
|
||||||
|
offset: nfkcSparseOffset[:],
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
nfcData = newNfcTrie(0)
|
||||||
|
nfkcData = newNfkcTrie(0)
|
||||||
|
)
|
||||||
|
|
||||||
|
// lookupValue determines the type of block n and looks up the value for b.
|
||||||
|
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
|
||||||
|
// is a list of ranges with an accompanying value. Given a matching range r,
|
||||||
|
// the value for b is by r.value + (b - r.lo) * stride.
|
||||||
|
func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
|
||||||
|
offset := t.offset[n]
|
||||||
|
header := t.values[offset]
|
||||||
|
lo := offset + 1
|
||||||
|
hi := lo + uint16(header.lo)
|
||||||
|
for lo < hi {
|
||||||
|
m := lo + (hi-lo)/2
|
||||||
|
r := t.values[m]
|
||||||
|
if r.lo <= b && b <= r.hi {
|
||||||
|
return r.value + uint16(b-r.lo)*header.value
|
||||||
|
}
|
||||||
|
if b < r.lo {
|
||||||
|
hi = m
|
||||||
|
} else {
|
||||||
|
lo = m + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
390
vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
generated
vendored
390
vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
generated
vendored
@ -1,390 +0,0 @@
|
|||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: google/protobuf/timestamp.proto
|
|
||||||
|
|
||||||
// Package timestamppb contains generated types for google/protobuf/timestamp.proto.
|
|
||||||
//
|
|
||||||
// The Timestamp message represents a timestamp,
|
|
||||||
// an instant in time since the Unix epoch (January 1st, 1970).
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Conversion to a Go Time
|
|
||||||
//
|
|
||||||
// The AsTime method can be used to convert a Timestamp message to a
|
|
||||||
// standard Go time.Time value in UTC:
|
|
||||||
//
|
|
||||||
// t := ts.AsTime()
|
|
||||||
// ... // make use of t as a time.Time
|
|
||||||
//
|
|
||||||
// Converting to a time.Time is a common operation so that the extensive
|
|
||||||
// set of time-based operations provided by the time package can be leveraged.
|
|
||||||
// See https://golang.org/pkg/time for more information.
|
|
||||||
//
|
|
||||||
// The AsTime method performs the conversion on a best-effort basis. Timestamps
|
|
||||||
// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive)
|
|
||||||
// are normalized during the conversion to a time.Time. To manually check for
|
|
||||||
// invalid Timestamps per the documented limitations in timestamp.proto,
|
|
||||||
// additionally call the CheckValid method:
|
|
||||||
//
|
|
||||||
// if err := ts.CheckValid(); err != nil {
|
|
||||||
// ... // handle error
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Conversion from a Go Time
|
|
||||||
//
|
|
||||||
// The timestamppb.New function can be used to construct a Timestamp message
|
|
||||||
// from a standard Go time.Time value:
|
|
||||||
//
|
|
||||||
// ts := timestamppb.New(t)
|
|
||||||
// ... // make use of ts as a *timestamppb.Timestamp
|
|
||||||
//
|
|
||||||
// In order to construct a Timestamp representing the current time, use Now:
|
|
||||||
//
|
|
||||||
// ts := timestamppb.Now()
|
|
||||||
// ... // make use of ts as a *timestamppb.Timestamp
|
|
||||||
//
|
|
||||||
package timestamppb
|
|
||||||
|
|
||||||
import (
|
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
reflect "reflect"
|
|
||||||
sync "sync"
|
|
||||||
time "time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Timestamp represents a point in time independent of any time zone or local
|
|
||||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
|
||||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
|
||||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
|
||||||
// Gregorian calendar backwards to year one.
|
|
||||||
//
|
|
||||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
|
||||||
// second table is needed for interpretation, using a [24-hour linear
|
|
||||||
// smear](https://developers.google.com/time/smear).
|
|
||||||
//
|
|
||||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
|
||||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
|
||||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
|
||||||
//
|
|
||||||
// # Examples
|
|
||||||
//
|
|
||||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
|
||||||
//
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds(time(NULL));
|
|
||||||
// timestamp.set_nanos(0);
|
|
||||||
//
|
|
||||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
|
||||||
//
|
|
||||||
// struct timeval tv;
|
|
||||||
// gettimeofday(&tv, NULL);
|
|
||||||
//
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds(tv.tv_sec);
|
|
||||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
|
||||||
//
|
|
||||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
|
||||||
//
|
|
||||||
// FILETIME ft;
|
|
||||||
// GetSystemTimeAsFileTime(&ft);
|
|
||||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
|
||||||
//
|
|
||||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
|
||||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
|
||||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
|
||||||
//
|
|
||||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
|
||||||
//
|
|
||||||
// long millis = System.currentTimeMillis();
|
|
||||||
//
|
|
||||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
|
||||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Example 5: Compute Timestamp from Java `Instant.now()`.
|
|
||||||
//
|
|
||||||
// Instant now = Instant.now();
|
|
||||||
//
|
|
||||||
// Timestamp timestamp =
|
|
||||||
// Timestamp.newBuilder().setSeconds(now.getEpochSecond())
|
|
||||||
// .setNanos(now.getNano()).build();
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Example 6: Compute Timestamp from current time in Python.
|
|
||||||
//
|
|
||||||
// timestamp = Timestamp()
|
|
||||||
// timestamp.GetCurrentTime()
|
|
||||||
//
|
|
||||||
// # JSON Mapping
|
|
||||||
//
|
|
||||||
// In JSON format, the Timestamp type is encoded as a string in the
|
|
||||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
|
||||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
|
||||||
// where {year} is always expressed using four digits while {month}, {day},
|
|
||||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
|
||||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
|
||||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
|
||||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
|
||||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
|
||||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
|
||||||
//
|
|
||||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
|
||||||
// 01:30 UTC on January 15, 2017.
|
|
||||||
//
|
|
||||||
// In JavaScript, one can convert a Date object to this format using the
|
|
||||||
// standard
|
|
||||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
|
||||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
|
||||||
// to this format using
|
|
||||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
|
||||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
|
||||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
|
||||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
|
||||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
type Timestamp struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
// Represents seconds of UTC time since Unix epoch
|
|
||||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
|
||||||
// 9999-12-31T23:59:59Z inclusive.
|
|
||||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
|
||||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
|
||||||
// second values with fractions must still have non-negative nanos values
|
|
||||||
// that count forward in time. Must be from 0 to 999,999,999
|
|
||||||
// inclusive.
|
|
||||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now constructs a new Timestamp from the current time.
|
|
||||||
func Now() *Timestamp {
|
|
||||||
return New(time.Now())
|
|
||||||
}
|
|
||||||
|
|
||||||
// New constructs a new Timestamp from the provided time.Time.
|
|
||||||
func New(t time.Time) *Timestamp {
|
|
||||||
return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsTime converts x to a time.Time.
|
|
||||||
func (x *Timestamp) AsTime() time.Time {
|
|
||||||
return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid reports whether the timestamp is valid.
|
|
||||||
// It is equivalent to CheckValid == nil.
|
|
||||||
func (x *Timestamp) IsValid() bool {
|
|
||||||
return x.check() == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckValid returns an error if the timestamp is invalid.
|
|
||||||
// In particular, it checks whether the value represents a date that is
|
|
||||||
// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.
|
|
||||||
// An error is reported for a nil Timestamp.
|
|
||||||
func (x *Timestamp) CheckValid() error {
|
|
||||||
switch x.check() {
|
|
||||||
case invalidNil:
|
|
||||||
return protoimpl.X.NewError("invalid nil Timestamp")
|
|
||||||
case invalidUnderflow:
|
|
||||||
return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x)
|
|
||||||
case invalidOverflow:
|
|
||||||
return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x)
|
|
||||||
case invalidNanos:
|
|
||||||
return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x)
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
_ = iota
|
|
||||||
invalidNil
|
|
||||||
invalidUnderflow
|
|
||||||
invalidOverflow
|
|
||||||
invalidNanos
|
|
||||||
)
|
|
||||||
|
|
||||||
func (x *Timestamp) check() uint {
|
|
||||||
const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive
|
|
||||||
const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive
|
|
||||||
secs := x.GetSeconds()
|
|
||||||
nanos := x.GetNanos()
|
|
||||||
switch {
|
|
||||||
case x == nil:
|
|
||||||
return invalidNil
|
|
||||||
case secs < minTimestamp:
|
|
||||||
return invalidUnderflow
|
|
||||||
case secs > maxTimestamp:
|
|
||||||
return invalidOverflow
|
|
||||||
case nanos < 0 || nanos >= 1e9:
|
|
||||||
return invalidNanos
|
|
||||||
default:
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Timestamp) Reset() {
|
|
||||||
*x = Timestamp{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Timestamp) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Timestamp) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *Timestamp) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead.
|
|
||||||
func (*Timestamp) Descriptor() ([]byte, []int) {
|
|
||||||
return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Timestamp) GetSeconds() int64 {
|
|
||||||
if x != nil {
|
|
||||||
return x.Seconds
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Timestamp) GetNanos() int32 {
|
|
||||||
if x != nil {
|
|
||||||
return x.Nanos
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
|
|
||||||
|
|
||||||
var file_google_protobuf_timestamp_proto_rawDesc = []byte{
|
|
||||||
0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
|
||||||
0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
|
||||||
0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
|
||||||
0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
|
|
||||||
0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
|
|
||||||
0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e,
|
|
||||||
0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42,
|
|
||||||
0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
|
|
||||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
|
|
||||||
0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
|
||||||
0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
|
|
||||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77,
|
|
||||||
0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01,
|
|
||||||
0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
|
||||||
0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
|
|
||||||
0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
|
|
||||||
file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc
|
|
||||||
)
|
|
||||||
|
|
||||||
func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte {
|
|
||||||
file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() {
|
|
||||||
file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData)
|
|
||||||
})
|
|
||||||
return file_google_protobuf_timestamp_proto_rawDescData
|
|
||||||
}
|
|
||||||
|
|
||||||
var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
|
||||||
var file_google_protobuf_timestamp_proto_goTypes = []interface{}{
|
|
||||||
(*Timestamp)(nil), // 0: google.protobuf.Timestamp
|
|
||||||
}
|
|
||||||
var file_google_protobuf_timestamp_proto_depIdxs = []int32{
|
|
||||||
0, // [0:0] is the sub-list for method output_type
|
|
||||||
0, // [0:0] is the sub-list for method input_type
|
|
||||||
0, // [0:0] is the sub-list for extension type_name
|
|
||||||
0, // [0:0] is the sub-list for extension extendee
|
|
||||||
0, // [0:0] is the sub-list for field type_name
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { file_google_protobuf_timestamp_proto_init() }
|
|
||||||
func file_google_protobuf_timestamp_proto_init() {
|
|
||||||
if File_google_protobuf_timestamp_proto != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !protoimpl.UnsafeEnabled {
|
|
||||||
file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*Timestamp); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
type x struct{}
|
|
||||||
out := protoimpl.TypeBuilder{
|
|
||||||
File: protoimpl.DescBuilder{
|
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
|
||||||
RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc,
|
|
||||||
NumEnums: 0,
|
|
||||||
NumMessages: 1,
|
|
||||||
NumExtensions: 0,
|
|
||||||
NumServices: 0,
|
|
||||||
},
|
|
||||||
GoTypes: file_google_protobuf_timestamp_proto_goTypes,
|
|
||||||
DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs,
|
|
||||||
MessageInfos: file_google_protobuf_timestamp_proto_msgTypes,
|
|
||||||
}.Build()
|
|
||||||
File_google_protobuf_timestamp_proto = out.File
|
|
||||||
file_google_protobuf_timestamp_proto_rawDesc = nil
|
|
||||||
file_google_protobuf_timestamp_proto_goTypes = nil
|
|
||||||
file_google_protobuf_timestamp_proto_depIdxs = nil
|
|
||||||
}
|
|
8
vendor/modules.txt
vendored
8
vendor/modules.txt
vendored
@ -18,6 +18,10 @@ github.com/rivo/uniseg
|
|||||||
github.com/russross/blackfriday/v2
|
github.com/russross/blackfriday/v2
|
||||||
# github.com/shurcooL/sanitized_anchor_name v1.0.0
|
# github.com/shurcooL/sanitized_anchor_name v1.0.0
|
||||||
github.com/shurcooL/sanitized_anchor_name
|
github.com/shurcooL/sanitized_anchor_name
|
||||||
|
# github.com/spf13/afero v1.8.0
|
||||||
|
## explicit
|
||||||
|
github.com/spf13/afero
|
||||||
|
github.com/spf13/afero/mem
|
||||||
# github.com/urfave/cli/v2 v2.3.0
|
# github.com/urfave/cli/v2 v2.3.0
|
||||||
## explicit
|
## explicit
|
||||||
github.com/urfave/cli/v2
|
github.com/urfave/cli/v2
|
||||||
@ -30,6 +34,9 @@ golang.org/x/sys/unix
|
|||||||
golang.org/x/sys/windows
|
golang.org/x/sys/windows
|
||||||
# golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
# golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||||
golang.org/x/term
|
golang.org/x/term
|
||||||
|
# golang.org/x/text v0.3.4
|
||||||
|
golang.org/x/text/transform
|
||||||
|
golang.org/x/text/unicode/norm
|
||||||
# google.golang.org/protobuf v1.27.1
|
# google.golang.org/protobuf v1.27.1
|
||||||
## explicit
|
## explicit
|
||||||
google.golang.org/protobuf/encoding/prototext
|
google.golang.org/protobuf/encoding/prototext
|
||||||
@ -57,4 +64,3 @@ google.golang.org/protobuf/reflect/protoreflect
|
|||||||
google.golang.org/protobuf/reflect/protoregistry
|
google.golang.org/protobuf/reflect/protoregistry
|
||||||
google.golang.org/protobuf/runtime/protoiface
|
google.golang.org/protobuf/runtime/protoiface
|
||||||
google.golang.org/protobuf/runtime/protoimpl
|
google.golang.org/protobuf/runtime/protoimpl
|
||||||
google.golang.org/protobuf/types/known/timestamppb
|
|
||||||
|
Loading…
Reference in New Issue
Block a user