summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoropenshift-ci[bot] <75433959+openshift-ci[bot]@users.noreply.github.com>2021-08-17 19:38:14 +0000
committerGitHub <noreply@github.com>2021-08-17 19:38:14 +0000
commit741ef8111f808bfd6e992eabbfc2dccc36f0b1a9 (patch)
tree7e19bec71b890e1f6edc71b5a5d741700e46c2a5
parent39cab790cc90c77d4b4a156f5f885077b267cbd9 (diff)
parent204ac5d464dc1df9abd714f96041a8801229dc31 (diff)
Merge pull request #11250 from TomSweeneyRedHat/dev/tsweeney/bumpcstorage
[v3.3] Bump c/storage to v1.34.1 and c/image to v5.15.1
-rw-r--r--go.mod8
-rw-r--r--go.sum18
-rw-r--r--vendor/github.com/BurntSushi/toml/.gitignore5
-rw-r--r--vendor/github.com/BurntSushi/toml/.travis.yml15
-rw-r--r--vendor/github.com/BurntSushi/toml/COMPATIBLE4
-rw-r--r--vendor/github.com/BurntSushi/toml/Makefile19
-rw-r--r--vendor/github.com/BurntSushi/toml/README.md74
-rw-r--r--vendor/github.com/BurntSushi/toml/decode.go180
-rw-r--r--vendor/github.com/BurntSushi/toml/decode_go116.go18
-rw-r--r--vendor/github.com/BurntSushi/toml/decode_meta.go36
-rw-r--r--vendor/github.com/BurntSushi/toml/deprecated.go33
-rw-r--r--vendor/github.com/BurntSushi/toml/doc.go28
-rw-r--r--vendor/github.com/BurntSushi/toml/encode.go376
-rw-r--r--vendor/github.com/BurntSushi/toml/encoding_types.go19
-rw-r--r--vendor/github.com/BurntSushi/toml/encoding_types_1.1.go18
-rw-r--r--vendor/github.com/BurntSushi/toml/go.mod3
-rw-r--r--vendor/github.com/BurntSushi/toml/go.sum0
-rw-r--r--vendor/github.com/BurntSushi/toml/internal/tz.go36
-rw-r--r--vendor/github.com/BurntSushi/toml/lex.go524
-rw-r--r--vendor/github.com/BurntSushi/toml/parse.go585
-rw-r--r--vendor/github.com/BurntSushi/toml/session.vim1
-rw-r--r--vendor/github.com/BurntSushi/toml/type_check.go21
-rw-r--r--vendor/github.com/containers/image/v5/version/version.go2
-rw-r--r--vendor/github.com/containers/storage/.cirrus.yml2
-rw-r--r--vendor/github.com/containers/storage/Makefile4
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/check_115.go42
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/check_116.go42
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go29
-rw-r--r--vendor/github.com/containers/storage/go.mod6
-rw-r--r--vendor/github.com/containers/storage/go.sum10
-rw-r--r--vendor/github.com/containers/storage/layers.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go61
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go11
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go8
-rw-r--r--vendor/github.com/containers/storage/types/utils.go8
-rw-r--r--vendor/github.com/klauspost/compress/.gitattributes2
-rw-r--r--vendor/github.com/klauspost/compress/.gitignore25
-rw-r--r--vendor/github.com/klauspost/compress/.goreleaser.yml137
-rw-r--r--vendor/github.com/klauspost/compress/README.md426
-rw-r--r--vendor/github.com/klauspost/compress/compressible.go85
-rw-r--r--vendor/github.com/klauspost/compress/gen.sh4
-rw-r--r--vendor/github.com/klauspost/compress/go.mod5
-rw-r--r--vendor/github.com/klauspost/compress/go.sum2
-rw-r--r--vendor/github.com/klauspost/compress/huff0/compress.go64
-rw-r--r--vendor/github.com/klauspost/compress/huff0/huff0.go62
-rw-r--r--vendor/github.com/klauspost/compress/s2sx.mod5
-rw-r--r--vendor/github.com/klauspost/compress/s2sx.sum2
-rw-r--r--vendor/github.com/klauspost/compress/zstd/README.md32
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder.go5
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_base.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_best.go161
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_better.go64
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_dfast.go61
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_fast.go37
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder.go31
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder_options.go2
-rw-r--r--vendor/github.com/klauspost/compress/zstd/hash.go60
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go9
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go22
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go21
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/selinux.go2
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go64
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go4
-rw-r--r--vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md6
-rw-r--r--vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go9
-rw-r--r--vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md54
-rw-r--r--vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go103
-rw-r--r--vendor/modules.txt13
70 files changed, 2803 insertions, 1046 deletions
diff --git a/go.mod b/go.mod
index d47622cf3..058eb49f3 100644
--- a/go.mod
+++ b/go.mod
@@ -3,7 +3,7 @@ module github.com/containers/podman/v3
go 1.13
require (
- github.com/BurntSushi/toml v0.3.1
+ github.com/BurntSushi/toml v0.4.1
github.com/blang/semver v3.5.1+incompatible
github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37
github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7
@@ -14,10 +14,10 @@ require (
github.com/containers/buildah v1.22.0
github.com/containers/common v0.42.1
github.com/containers/conmon v2.0.20+incompatible
- github.com/containers/image/v5 v5.15.0
+ github.com/containers/image/v5 v5.15.1
github.com/containers/ocicrypt v1.1.2
github.com/containers/psgo v1.5.2
- github.com/containers/storage v1.33.1
+ github.com/containers/storage v1.34.1
github.com/coreos/go-systemd/v22 v22.3.2
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cri-o/ocicni v0.2.1-0.20210621164014-d0acc7862283
@@ -49,7 +49,7 @@ require (
github.com/opencontainers/runc v1.0.1
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/runtime-tools v0.9.0
- github.com/opencontainers/selinux v1.8.2
+ github.com/opencontainers/selinux v1.8.4
github.com/pkg/errors v0.9.1
github.com/pmezard/go-difflib v1.0.0
github.com/rootless-containers/rootlesskit v0.14.4
diff --git a/go.sum b/go.sum
index 08f2356e9..aeb54d37c 100644
--- a/go.sum
+++ b/go.sum
@@ -53,8 +53,9 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
+github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
@@ -244,8 +245,9 @@ github.com/containers/common v0.42.1/go.mod h1:AaF3ipZfgezsctDuhzLkq4Vl+LkEy7J74
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.14.0/go.mod h1:SxiBKOcKuT+4yTjD0AskjO+UwFvNcVOJ9qlAw1HNSPU=
-github.com/containers/image/v5 v5.15.0 h1:NduhN20ptHNlf0uRny5iTJa2OodB9SLMEB4hKKbzBBs=
github.com/containers/image/v5 v5.15.0/go.mod h1:gzdBcooi6AFdiqfzirUqv90hUyHyI0MMdaqKzACKr2s=
+github.com/containers/image/v5 v5.15.1 h1:WteFDi2CFT9bN7icKvO7e8mVqVkL1b/ZqSEgvWC1RjY=
+github.com/containers/image/v5 v5.15.1/go.mod h1:C7WCwe2hDDYrq39mK6MEgpw8mf/lPTdeofQSJcSFl6k=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@@ -258,8 +260,10 @@ github.com/containers/psgo v1.5.2/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzP
github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
github.com/containers/storage v1.32.6/go.mod h1:mdB+b89p+jU8zpzLTVXA0gWMmIo0WrkfGMh1R8O2IQw=
github.com/containers/storage v1.33.0/go.mod h1:FUZPF4nJijX8ixdhByZJXf02cvbyLi6dyDwXdIe8QVY=
-github.com/containers/storage v1.33.1 h1:RHUPZ7vQxwoeOoMoKUDsVun4f9Wi8BTXmr/wQiruBYU=
github.com/containers/storage v1.33.1/go.mod h1:FUZPF4nJijX8ixdhByZJXf02cvbyLi6dyDwXdIe8QVY=
+github.com/containers/storage v1.34.0/go.mod h1:t6I+hTgPU0/tVxQ75vw406wDi/TXwYBqZp4QZV9N7b8=
+github.com/containers/storage v1.34.1 h1:PsBGMH7hwuQ3MOr7qTgPznFrE8ebfIbwQbg2gKvg0lE=
+github.com/containers/storage v1.34.1/go.mod h1:FY2TcbfgCLMU4lYoKnlZeZXeH353TOTbpDEA+sAcqAY=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -574,8 +578,10 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ=
github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s=
+github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -739,8 +745,10 @@ github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pK
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/opencontainers/selinux v1.8.2 h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc=
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
+github.com/opencontainers/selinux v1.8.3/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo=
+github.com/opencontainers/selinux v1.8.4 h1:krlgQ6/j9CkCXT5oW0yVXdQFOME3NjKuuAZXuR6O7P4=
+github.com/opencontainers/selinux v1.8.4/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo=
github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 h1:WaxyNFpmIDu4i6so9r6LVFIbSaXqsj8oitMitt86ae4=
github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore
index 0cd380037..cd11be965 100644
--- a/vendor/github.com/BurntSushi/toml/.gitignore
+++ b/vendor/github.com/BurntSushi/toml/.gitignore
@@ -1,5 +1,2 @@
-TAGS
-tags
-.*.swp
-tomlcheck/tomlcheck
toml.test
+/toml-test
diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml
deleted file mode 100644
index 8b8afc4f0..000000000
--- a/vendor/github.com/BurntSushi/toml/.travis.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-language: go
-go:
- - 1.1
- - 1.2
- - 1.3
- - 1.4
- - 1.5
- - 1.6
- - tip
-install:
- - go install ./...
- - go get github.com/BurntSushi/toml-test
-script:
- - export PATH="$PATH:$HOME/gopath/bin"
- - make test
diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE
index 6efcfd0ce..f621b0119 100644
--- a/vendor/github.com/BurntSushi/toml/COMPATIBLE
+++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE
@@ -1,3 +1 @@
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
-
+Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile
deleted file mode 100644
index 3600848d3..000000000
--- a/vendor/github.com/BurntSushi/toml/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-install:
- go install ./...
-
-test: install
- go test -v
- toml-test toml-test-decoder
- toml-test -encoder toml-test-encoder
-
-fmt:
- gofmt -w *.go */*.go
- colcheck *.go */*.go
-
-tags:
- find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
-
-push:
- git push origin master
- git push github master
-
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
index 7c1b37ecc..64410cf75 100644
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -6,27 +6,22 @@ packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
-Spec: https://github.com/toml-lang/toml
+Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
+Documentation: https://godocs.io/github.com/BurntSushi/toml
-Documentation: https://godoc.org/github.com/BurntSushi/toml
+See the [releases page](https://github.com/BurntSushi/toml/releases) for a
+changelog; this information is also in the git tag annotations (e.g. `git show
+v0.4.0`).
-Installation:
+This library requires Go 1.13 or newer; install it with:
-```bash
-go get github.com/BurntSushi/toml
-```
-
-Try the toml validator:
+ $ go get github.com/BurntSushi/toml
-```bash
-go get github.com/BurntSushi/toml/cmd/tomlv
-tomlv some-toml-file.toml
-```
+It also comes with a TOML validator CLI tool:
-[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
+ $ go get github.com/BurntSushi/toml/cmd/tomlv
+ $ tomlv some-toml-file.toml
### Testing
@@ -36,8 +31,8 @@ and the encoder.
### Examples
-This package works similarly to how the Go standard library handles `XML`
-and `JSON`. Namely, data is loaded into Go values via reflection.
+This package works similarly to how the Go standard library handles XML and
+JSON. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
@@ -54,11 +49,11 @@ Which could be defined in Go as:
```go
type Config struct {
- Age int
- Cats []string
- Pi float64
- Perfection []int
- DOB time.Time // requires `import time`
+ Age int
+ Cats []string
+ Pi float64
+ Perfection []int
+ DOB time.Time // requires `import time`
}
```
@@ -84,6 +79,9 @@ type TOML struct {
}
```
+Beware that like other most other decoders **only exported fields** are
+considered when encoding and decoding; private fields are silently ignored.
+
### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into
@@ -103,19 +101,19 @@ Which can be decoded with:
```go
type song struct {
- Name string
- Duration duration
+ Name string
+ Duration duration
}
type songs struct {
- Song []song
+ Song []song
}
var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil {
- log.Fatal(err)
+ log.Fatal(err)
}
for _, s := range favorites.Song {
- fmt.Printf("%s (%s)\n", s.Name, s.Duration)
+ fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
```
@@ -134,6 +132,9 @@ func (d *duration) UnmarshalText(text []byte) error {
}
```
+To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
+a similar way.
+
### More complex usage
Here's an example of how to load the example from the official spec page:
@@ -180,23 +181,23 @@ And the corresponding Go types are:
```go
type tomlConfig struct {
- Title string
- Owner ownerInfo
- DB database `toml:"database"`
+ Title string
+ Owner ownerInfo
+ DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
- Org string `toml:"organization"`
- Bio string
- DOB time.Time
+ Org string `toml:"organization"`
+ Bio string
+ DOB time.Time
}
type database struct {
- Server string
- Ports []int
+ Server string
+ Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
@@ -207,7 +208,7 @@ type server struct {
}
type clients struct {
- Data [][]interface{}
+ Data [][]interface{}
Hosts []string
}
```
@@ -216,3 +217,4 @@ Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.
+
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
index b0fd51d5b..d3d3b8397 100644
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -1,19 +1,17 @@
package toml
import (
+ "encoding"
"fmt"
"io"
"io/ioutil"
"math"
+ "os"
"reflect"
"strings"
"time"
)
-func e(format string, args ...interface{}) error {
- return fmt.Errorf("toml: "+format, args...)
-}
-
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
@@ -27,30 +25,21 @@ func Unmarshal(p []byte, v interface{}) error {
}
// Primitive is a TOML value that hasn't been decoded into a Go value.
-// When using the various `Decode*` functions, the type `Primitive` may
-// be given to any value, and its decoding will be delayed.
//
-// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
+// This type can be used for any value, which will cause decoding to be delayed.
+// You can use the PrimitiveDecode() function to "manually" decode these values.
//
-// The underlying representation of a `Primitive` value is subject to change.
-// Do not rely on it.
+// NOTE: The underlying representation of a `Primitive` value is subject to
+// change. Do not rely on it.
//
-// N.B. Primitive values are still parsed, so using them will only avoid
-// the overhead of reflection. They can be useful when you don't know the
-// exact type of TOML data until run time.
+// NOTE: Primitive values are still parsed, so using them will only avoid the
+// overhead of reflection. They can be useful when you don't know the exact type
+// of TOML data until runtime.
type Primitive struct {
undecoded interface{}
context Key
}
-// DEPRECATED!
-//
-// Use MetaData.PrimitiveDecode instead.
-func PrimitiveDecode(primValue Primitive, v interface{}) error {
- md := MetaData{decoded: make(map[string]bool)}
- return md.unify(primValue.undecoded, rvalue(v))
-}
-
// PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions,
@@ -68,43 +57,51 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
return md.unify(primValue.undecoded, rvalue(v))
}
-// Decode will decode the contents of `data` in TOML format into a pointer
-// `v`.
+// Decoder decodes TOML data.
//
-// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
-// used interchangeably.)
+// TOML tables correspond to Go structs or maps (dealer's choice – they can be
+// used interchangeably).
//
-// TOML arrays of tables correspond to either a slice of structs or a slice
-// of maps.
+// TOML table arrays correspond to either a slice of structs or a slice of maps.
//
-// TOML datetimes correspond to Go `time.Time` values.
+// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
+// in the local timezone.
//
-// All other TOML types (float, string, int, bool and array) correspond
-// to the obvious Go types.
+// All other TOML types (float, string, int, bool and array) correspond to the
+// obvious Go types.
//
-// An exception to the above rules is if a type implements the
-// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
-// (floats, strings, integers, booleans and datetimes) will be converted to
-// a byte string and given to the value's UnmarshalText method. See the
-// Unmarshaler example for a demonstration with time duration strings.
+// An exception to the above rules is if a type implements the TextUnmarshaler
+// interface, in which case any primitive TOML value (floats, strings, integers,
+// booleans, datetimes) will be converted to a []byte and given to the value's
+// UnmarshalText method. See the Unmarshaler example for a demonstration with
+// time duration strings.
//
// Key mapping
//
-// TOML keys can map to either keys in a Go map or field names in a Go
-// struct. The special `toml` struct tag may be used to map TOML keys to
-// struct fields that don't match the key name exactly. (See the example.)
-// A case insensitive match to struct names will be tried if an exact match
-// can't be found.
+// TOML keys can map to either keys in a Go map or field names in a Go struct.
+// The special `toml` struct tag can be used to map TOML keys to struct fields
+// that don't match the key name exactly (see the example). A case insensitive
+// match to struct names will be tried if an exact match can't be found.
//
-// The mapping between TOML values and Go values is loose. That is, there
-// may exist TOML values that cannot be placed into your representation, and
-// there may be parts of your representation that do not correspond to
-// TOML values. This loose mapping can be made stricter by using the IsDefined
-// and/or Undecoded methods on the MetaData returned.
+// The mapping between TOML values and Go values is loose. That is, there may
+// exist TOML values that cannot be placed into your representation, and there
+// may be parts of your representation that do not correspond to TOML values.
+// This loose mapping can be made stricter by using the IsDefined and/or
+// Undecoded methods on the MetaData returned.
//
-// This decoder will not handle cyclic types. If a cyclic type is passed,
-// `Decode` will not terminate.
-func Decode(data string, v interface{}) (MetaData, error) {
+// This decoder does not handle cyclic types. Decode will not terminate if a
+// cyclic type is passed.
+type Decoder struct {
+ r io.Reader
+}
+
+// NewDecoder creates a new Decoder.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r}
+}
+
+// Decode TOML data in to the pointer `v`.
+func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
@@ -112,7 +109,15 @@ func Decode(data string, v interface{}) (MetaData, error) {
if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
}
- p, err := parse(data)
+
+ // TODO: have parser should read from io.Reader? Or at the very least, make
+ // it read from []byte rather than string
+ data, err := ioutil.ReadAll(dec.r)
+ if err != nil {
+ return MetaData{}, err
+ }
+
+ p, err := parse(string(data))
if err != nil {
return MetaData{}, err
}
@@ -123,24 +128,22 @@ func Decode(data string, v interface{}) (MetaData, error) {
return md, md.unify(p.mapping, indirect(rv))
}
-// DecodeFile is just like Decode, except it will automatically read the
-// contents of the file at `fpath` and decode it for you.
-func DecodeFile(fpath string, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadFile(fpath)
- if err != nil {
- return MetaData{}, err
- }
- return Decode(string(bs), v)
+// Decode the TOML data in to the pointer v.
+//
+// See the documentation on Decoder for a description of the decoding process.
+func Decode(data string, v interface{}) (MetaData, error) {
+ return NewDecoder(strings.NewReader(data)).Decode(v)
}
-// DecodeReader is just like Decode, except it will consume all bytes
-// from the reader and decode it for you.
-func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadAll(r)
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at path and decode it for you.
+func DecodeFile(path string, v interface{}) (MetaData, error) {
+ fp, err := os.Open(path)
if err != nil {
return MetaData{}, err
}
- return Decode(string(bs), v)
+ defer fp.Close()
+ return NewDecoder(fp).Decode(v)
}
// unify performs a sort of type unification based on the structure of `rv`,
@@ -149,8 +152,8 @@ func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
-
// Special case. Look for a `Primitive` value.
+ // TODO: #76 would make this superfluous after implemented.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// Save the undecoded data and the key context into the primitive
// value.
@@ -170,25 +173,17 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
}
}
- // Special case. Handle time.Time values specifically.
- // TODO: Remove this code when we decide to drop support for Go 1.1.
- // This isn't necessary in Go 1.2 because time.Time satisfies the encoding
- // interfaces.
- if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
- return md.unifyDatetime(data, rv)
- }
-
// Special case. Look for a value satisfying the TextUnmarshaler interface.
- if v, ok := rv.Interface().(TextUnmarshaler); ok {
+ if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
return md.unifyText(data, v)
}
- // BUG(burntsushi)
+ // TODO:
// The behavior here is incorrect whenever a Go type satisfies the
- // encoding.TextUnmarshaler interface but also corresponds to a TOML
- // hash or array. In particular, the unmarshaler should only be applied
- // to primitive TOML values. But at this point, it will be applied to
- // all kinds of values and produce an incorrect error whenever those values
- // are hashes or arrays (including arrays of tables).
+ // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
+ // array. In particular, the unmarshaler should only be applied to primitive
+ // TOML values. But at this point, it will be applied to all kinds of values
+ // and produce an incorrect error whenever those values are hashes or arrays
+ // (including arrays of tables).
k := rv.Kind()
@@ -277,6 +272,12 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
}
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+ if k := rv.Type().Key().Kind(); k != reflect.String {
+ return fmt.Errorf(
+ "toml: cannot decode to a map with non-string key type (%s in %q)",
+ k, rv.Type())
+ }
+
tmap, ok := mapping.(map[string]interface{})
if !ok {
if tmap == nil {
@@ -312,10 +313,8 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
}
return badtype("slice", data)
}
- sliceLen := datav.Len()
- if sliceLen != rv.Len() {
- return e("expected array length %d; got TOML array of length %d",
- rv.Len(), sliceLen)
+ if l := datav.Len(); l != rv.Len() {
+ return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
}
return md.unifySliceArray(datav, rv)
}
@@ -337,11 +336,10 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
}
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
- sliceLen := data.Len()
- for i := 0; i < sliceLen; i++ {
- v := data.Index(i).Interface()
- sliceval := indirect(rv.Index(i))
- if err := md.unify(v, sliceval); err != nil {
+ l := data.Len()
+ for i := 0; i < l; i++ {
+ err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i)))
+ if err != nil {
return err
}
}
@@ -439,7 +437,7 @@ func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
+func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case TextMarshaler:
@@ -482,7 +480,7 @@ func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanSet() {
pv := v.Addr()
- if _, ok := pv.Interface().(TextUnmarshaler); ok {
+ if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
return pv
}
}
@@ -498,12 +496,16 @@ func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() {
return true
}
- if _, ok := rv.Interface().(TextUnmarshaler); ok {
+ if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
return true
}
return false
}
+func e(format string, args ...interface{}) error {
+ return fmt.Errorf("toml: "+format, args...)
+}
+
func badtype(expected string, data interface{}) error {
return e("cannot load TOML value of type %T into a Go %s", data, expected)
}
diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go
new file mode 100644
index 000000000..38aa75fdc
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode_go116.go
@@ -0,0 +1,18 @@
+// +build go1.16
+
+package toml
+
+import (
+ "io/fs"
+)
+
+// DecodeFS is just like Decode, except it will automatically read the contents
+// of the file at `path` from a fs.FS instance.
+func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
+ fp, err := fsys.Open(path)
+ if err != nil {
+ return MetaData{}, err
+ }
+ defer fp.Close()
+ return NewDecoder(fp).Decode(v)
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
index b9914a679..ad8899c6c 100644
--- a/vendor/github.com/BurntSushi/toml/decode_meta.go
+++ b/vendor/github.com/BurntSushi/toml/decode_meta.go
@@ -2,9 +2,9 @@ package toml
import "strings"
-// MetaData allows access to meta information about TOML data that may not
-// be inferrable via reflection. In particular, whether a key has been defined
-// and the TOML type of a key.
+// MetaData allows access to meta information about TOML data that may not be
+// inferable via reflection. In particular, whether a key has been defined and
+// the TOML type of a key.
type MetaData struct {
mapping map[string]interface{}
types map[string]tomlType
@@ -13,10 +13,11 @@ type MetaData struct {
context Key // Used only during decoding.
}
-// IsDefined returns true if the key given exists in the TOML data. The key
-// should be specified hierarchially. e.g.,
+// IsDefined reports if the key exists in the TOML data.
+//
+// The key should be specified hierarchically, for example to access the TOML
+// key "a.b.c" you would use:
//
-// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c")
//
// IsDefined will return false if an empty key given. Keys are case sensitive.
@@ -41,8 +42,8 @@ func (md *MetaData) IsDefined(key ...string) bool {
// Type returns a string representation of the type of the key specified.
//
-// Type will return the empty string if given an empty key or a key that
-// does not exist. Keys are case sensitive.
+// Type will return the empty string if given an empty key or a key that does
+// not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok {
@@ -51,13 +52,11 @@ func (md *MetaData) Type(key ...string) string {
return ""
}
-// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
-// to get values of this type.
+// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
+// values of this type.
type Key []string
-func (k Key) String() string {
- return strings.Join(k, ".")
-}
+func (k Key) String() string { return strings.Join(k, ".") }
func (k Key) maybeQuotedAll() string {
var ss []string
@@ -68,6 +67,9 @@ func (k Key) maybeQuotedAll() string {
}
func (k Key) maybeQuoted(i int) string {
+ if k[i] == "" {
+ return `""`
+ }
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
@@ -76,7 +78,7 @@ func (k Key) maybeQuoted(i int) string {
}
}
if quote {
- return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
+ return `"` + quotedReplacer.Replace(k[i]) + `"`
}
return k[i]
}
@@ -89,10 +91,10 @@ func (k Key) add(piece string) Key {
}
// Keys returns a slice of every key in the TOML data, including key groups.
-// Each key is itself a slice, where the first element is the top of the
-// hierarchy and the last is the most specific.
//
-// The list will have the same order as the keys appeared in the TOML data.
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific. The list will have the same
+// order as the keys appeared in the TOML data.
//
// All keys returned are non-empty.
func (md *MetaData) Keys() []Key {
diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go
new file mode 100644
index 000000000..db89eac1d
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/deprecated.go
@@ -0,0 +1,33 @@
+package toml
+
+import (
+ "encoding"
+ "io"
+)
+
+// DEPRECATED!
+//
+// Use the identical encoding.TextMarshaler instead. It is defined here to
+// support Go 1.1 and older.
+type TextMarshaler encoding.TextMarshaler
+
+// DEPRECATED!
+//
+// Use the identical encoding.TextUnmarshaler instead. It is defined here to
+// support Go 1.1 and older.
+type TextUnmarshaler encoding.TextUnmarshaler
+
+// DEPRECATED!
+//
+// Use MetaData.PrimitiveDecode instead.
+func PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md := MetaData{decoded: make(map[string]bool)}
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// DEPRECATED!
+//
+// Use NewDecoder(reader).Decode(&v) instead.
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
+ return NewDecoder(r).Decode(v)
+}
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
index b371f396e..099c4a77d 100644
--- a/vendor/github.com/BurntSushi/toml/doc.go
+++ b/vendor/github.com/BurntSushi/toml/doc.go
@@ -1,27 +1,13 @@
/*
-Package toml provides facilities for decoding and encoding TOML configuration
-files via reflection. There is also support for delaying decoding with
-the Primitive type, and querying the set of keys in a TOML document with the
-MetaData type.
+Package toml implements decoding and encoding of TOML files.
-The specification implemented: https://github.com/toml-lang/toml
+This package supports TOML v1.0.0, as listed on https://toml.io
-The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
-whether a file is a valid TOML document. It can also be used to print the
-type of each key in a TOML document.
+There is also support for delaying decoding with the Primitive type, and
+querying the set of keys in a TOML document with the MetaData type.
-Testing
-
-There are two important types of tests used for this package. The first is
-contained inside '*_test.go' files and uses the standard Go unit testing
-framework. These tests are primarily devoted to holistically testing the
-decoder and encoder.
-
-The second type of testing is used to verify the implementation's adherence
-to the TOML specification. These tests have been factored into their own
-project: https://github.com/BurntSushi/toml-test
-
-The reason the tests are in a separate project is so that they can be used by
-any implementation of TOML. Namely, it is language agnostic.
+The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
+and can be used to verify if TOML document is valid. It can also be used to
+print the type of each key.
*/
package toml
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
index d905c21a2..10d88ac63 100644
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -2,48 +2,92 @@ package toml
import (
"bufio"
+ "encoding"
"errors"
"fmt"
"io"
+ "math"
"reflect"
"sort"
"strconv"
"strings"
"time"
+
+ "github.com/BurntSushi/toml/internal"
)
type tomlEncodeError struct{ error }
var (
- errArrayMixedElementTypes = errors.New(
- "toml: cannot encode array with mixed element types")
- errArrayNilElement = errors.New(
- "toml: cannot encode array with nil element")
- errNonString = errors.New(
- "toml: cannot encode a map with non-string key type")
- errAnonNonStruct = errors.New(
- "toml: cannot encode an anonymous field that is not a struct")
- errArrayNoTable = errors.New(
- "toml: TOML array element cannot contain a table")
- errNoKey = errors.New(
- "toml: top-level values must be Go maps or structs")
- errAnything = errors.New("") // used in testing
+ errArrayNilElement = errors.New("toml: cannot encode array with nil element")
+ errNonString = errors.New("toml: cannot encode a map with non-string key type")
+ errAnonNonStruct = errors.New("toml: cannot encode an anonymous field that is not a struct")
+ errNoKey = errors.New("toml: top-level values must be Go maps or structs")
+ errAnything = errors.New("") // used in testing
)
var quotedReplacer = strings.NewReplacer(
- "\t", "\\t",
- "\n", "\\n",
- "\r", "\\r",
"\"", "\\\"",
"\\", "\\\\",
+ "\x00", `\u0000`,
+ "\x01", `\u0001`,
+ "\x02", `\u0002`,
+ "\x03", `\u0003`,
+ "\x04", `\u0004`,
+ "\x05", `\u0005`,
+ "\x06", `\u0006`,
+ "\x07", `\u0007`,
+ "\b", `\b`,
+ "\t", `\t`,
+ "\n", `\n`,
+ "\x0b", `\u000b`,
+ "\f", `\f`,
+ "\r", `\r`,
+ "\x0e", `\u000e`,
+ "\x0f", `\u000f`,
+ "\x10", `\u0010`,
+ "\x11", `\u0011`,
+ "\x12", `\u0012`,
+ "\x13", `\u0013`,
+ "\x14", `\u0014`,
+ "\x15", `\u0015`,
+ "\x16", `\u0016`,
+ "\x17", `\u0017`,
+ "\x18", `\u0018`,
+ "\x19", `\u0019`,
+ "\x1a", `\u001a`,
+ "\x1b", `\u001b`,
+ "\x1c", `\u001c`,
+ "\x1d", `\u001d`,
+ "\x1e", `\u001e`,
+ "\x1f", `\u001f`,
+ "\x7f", `\u007f`,
)
-// Encoder controls the encoding of Go values to a TOML document to some
-// io.Writer.
+// Encoder encodes a Go to a TOML document.
+//
+// The mapping between Go values and TOML values should be precisely the same as
+// for the Decode* functions. Similarly, the TextMarshaler interface is
+// supported by encoding the resulting bytes as strings. If you want to write
+// arbitrary binary data then you will need to use something like base64 since
+// TOML does not have any binary types.
+//
+// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
+// are encoded first.
+//
+// Go maps will be sorted alphabetically by key for deterministic output.
//
-// The indentation level can be controlled with the Indent field.
+// Encoding Go values without a corresponding TOML representation will return an
+// error. Examples of this includes maps with non-string keys, slices with nil
+// elements, embedded non-struct types, and nested slices containing maps or
+// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
+// is okay, as is []map[string][]string).
+//
+// NOTE: Only exported keys are encoded due to the use of reflection. Unexported
+// keys are silently discarded.
type Encoder struct {
- // A single indentation level. By default it is two spaces.
+ // The string to use for a single indentation level. The default is two
+ // spaces.
Indent string
// hasWritten is whether we have written any output to w yet.
@@ -51,8 +95,7 @@ type Encoder struct {
w *bufio.Writer
}
-// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
-// given. By default, a single indentation level is 2 spaces.
+// NewEncoder create a new Encoder.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: bufio.NewWriter(w),
@@ -60,29 +103,10 @@ func NewEncoder(w io.Writer) *Encoder {
}
}
-// Encode writes a TOML representation of the Go value to the underlying
-// io.Writer. If the value given cannot be encoded to a valid TOML document,
-// then an error is returned.
-//
-// The mapping between Go values and TOML values should be precisely the same
-// as for the Decode* functions. Similarly, the TextMarshaler interface is
-// supported by encoding the resulting bytes as strings. (If you want to write
-// arbitrary binary data then you will need to use something like base64 since
-// TOML does not have any binary types.)
-//
-// When encoding TOML hashes (i.e., Go maps or structs), keys without any
-// sub-hashes are encoded first.
+// Encode writes a TOML representation of the Go value to the Encoder's writer.
//
-// If a Go map is encoded, then its keys are sorted alphabetically for
-// deterministic output. More control over this behavior may be provided if
-// there is demand for it.
-//
-// Encoding Go values without a corresponding TOML representation---like map
-// types with non-string keys---will cause an error to be returned. Similarly
-// for mixed arrays/slices, arrays/slices with nil elements, embedded
-// non-struct types and nested slices containing maps or structs.
-// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
-// and so is []map[string][]string.)
+// An error is returned if the value given cannot be encoded to a valid TOML
+// document.
func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
@@ -110,9 +134,13 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is).
- switch rv.Interface().(type) {
- case time.Time, TextMarshaler:
- enc.keyEqElement(key, rv)
+ switch t := rv.Interface().(type) {
+ case time.Time, encoding.TextMarshaler:
+ enc.writeKeyValue(key, rv, false)
+ return
+ // TODO: #76 would make this superfluous after implemented.
+ case Primitive:
+ enc.encode(key, reflect.ValueOf(t.undecoded))
return
}
@@ -123,12 +151,12 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
- enc.keyEqElement(key, rv)
+ enc.writeKeyValue(key, rv, false)
case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv)
} else {
- enc.keyEqElement(key, rv)
+ enc.writeKeyValue(key, rv, false)
}
case reflect.Interface:
if rv.IsNil() {
@@ -148,22 +176,32 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
case reflect.Struct:
enc.eTable(key, rv)
default:
- panic(e("unsupported type for key '%s': %s", key, k))
+ encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k))
}
}
-// eElement encodes any value that can be an array element (primitives and
-// arrays).
+// eElement encodes any value that can be an array element.
func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) {
- case time.Time:
- // Special case time.Time as a primitive. Has to come before
- // TextMarshaler below because time.Time implements
- // encoding.TextMarshaler, but we need to always use UTC.
- enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
+ case time.Time: // Using TextMarshaler adds extra quotes, which we don't want.
+ format := time.RFC3339Nano
+ switch v.Location() {
+ case internal.LocalDatetime:
+ format = "2006-01-02T15:04:05.999999999"
+ case internal.LocalDate:
+ format = "2006-01-02"
+ case internal.LocalTime:
+ format = "15:04:05.999999999"
+ }
+ switch v.Location() {
+ default:
+ enc.wf(v.Format(format))
+ case internal.LocalDatetime, internal.LocalDate, internal.LocalTime:
+ enc.wf(v.In(time.UTC).Format(format))
+ }
return
- case TextMarshaler:
- // Special case. Use text marshaler if it's available for this value.
+ case encoding.TextMarshaler:
+ // Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil {
encPanic(err)
} else {
@@ -171,32 +209,49 @@ func (enc *Encoder) eElement(rv reflect.Value) {
}
return
}
+
switch rv.Kind() {
+ case reflect.String:
+ enc.writeQuoted(rv.String())
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64:
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
- case reflect.Uint, reflect.Uint8, reflect.Uint16,
- reflect.Uint32, reflect.Uint64:
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
+ f := rv.Float()
+ if math.IsNaN(f) {
+ enc.wf("nan")
+ } else if math.IsInf(f, 0) {
+ enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+ } else {
+ enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
+ }
case reflect.Float64:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
+ f := rv.Float()
+ if math.IsNaN(f) {
+ enc.wf("nan")
+ } else if math.IsInf(f, 0) {
+ enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+ } else {
+ enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
+ }
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
+ case reflect.Struct:
+ enc.eStruct(nil, rv, true)
+ case reflect.Map:
+ enc.eMap(nil, rv, true)
case reflect.Interface:
enc.eElement(rv.Elem())
- case reflect.String:
- enc.writeQuoted(rv.String())
default:
- panic(e("unexpected primitive type: %s", rv.Kind()))
+ encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
}
}
-// By the TOML spec, all floats must have a decimal with at least one
-// number on either side.
+// By the TOML spec, all floats must have a decimal with at least one number on
+// either side.
func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") {
return fstr + ".0"
@@ -230,16 +285,14 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
if isNil(trv) {
continue
}
- panicIfInvalidKey(key)
enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
- enc.eMapOrStruct(key, trv)
+ enc.eMapOrStruct(key, trv, false)
}
}
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
- panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.)
@@ -249,21 +302,22 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
}
- enc.eMapOrStruct(key, rv)
+ enc.eMapOrStruct(key, rv, false)
}
-func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
switch rv := eindirect(rv); rv.Kind() {
case reflect.Map:
- enc.eMap(key, rv)
+ enc.eMap(key, rv, inline)
case reflect.Struct:
- enc.eStruct(key, rv)
+ enc.eStruct(key, rv, inline)
default:
+ // Should never happen?
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
}
}
-func (enc *Encoder) eMap(key Key, rv reflect.Value) {
+func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
rt := rv.Type()
if rt.Key().Kind() != reflect.String {
encPanic(errNonString)
@@ -281,57 +335,76 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value) {
}
}
- var writeMapKeys = func(mapKeys []string) {
+ var writeMapKeys = func(mapKeys []string, trailC bool) {
sort.Strings(mapKeys)
- for _, mapKey := range mapKeys {
- mrv := rv.MapIndex(reflect.ValueOf(mapKey))
- if isNil(mrv) {
- // Don't write anything for nil fields.
+ for i, mapKey := range mapKeys {
+ val := rv.MapIndex(reflect.ValueOf(mapKey))
+ if isNil(val) {
continue
}
- enc.encode(key.add(mapKey), mrv)
+
+ if inline {
+ enc.writeKeyValue(Key{mapKey}, val, true)
+ if trailC || i != len(mapKeys)-1 {
+ enc.wf(", ")
+ }
+ } else {
+ enc.encode(key.add(mapKey), val)
+ }
}
}
- writeMapKeys(mapKeysDirect)
- writeMapKeys(mapKeysSub)
+
+ if inline {
+ enc.wf("{")
+ }
+ writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0)
+ writeMapKeys(mapKeysSub, false)
+ if inline {
+ enc.wf("}")
+ }
}
-func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
+func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
// Write keys for fields directly under this key first, because if we write
- // a field that creates a new table, then all keys under it will be in that
+ // a field that creates a new table then all keys under it will be in that
// table (not the one we're writing here).
- rt := rv.Type()
- var fieldsDirect, fieldsSub [][]int
- var addFields func(rt reflect.Type, rv reflect.Value, start []int)
+ //
+ // Fields is a [][]int: for fieldsDirect this always has one entry (the
+ // struct index). For fieldsSub it contains two entries: the parent field
+ // index from tv, and the field indexes for the fields of the sub.
+ var (
+ rt = rv.Type()
+ fieldsDirect, fieldsSub [][]int
+ addFields func(rt reflect.Type, rv reflect.Value, start []int)
+ )
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
- // skip unexported fields
- if f.PkgPath != "" && !f.Anonymous {
+ if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
continue
}
+
frv := rv.Field(i)
+
+ // Treat anonymous struct fields with tag names as though they are
+ // not anonymous, like encoding/json does.
+ //
+ // Non-struct anonymous fields use the normal encoding logic.
if f.Anonymous {
t := f.Type
switch t.Kind() {
case reflect.Struct:
- // Treat anonymous struct fields with
- // tag names as though they are not
- // anonymous, like encoding/json does.
if getOptions(f.Tag).name == "" {
- addFields(t, frv, f.Index)
+ addFields(t, frv, append(start, f.Index...))
continue
}
case reflect.Ptr:
- if t.Elem().Kind() == reflect.Struct &&
- getOptions(f.Tag).name == "" {
+ if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
if !frv.IsNil() {
- addFields(t.Elem(), frv.Elem(), f.Index)
+ addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
}
continue
}
- // Fall through to the normal field encoding logic below
- // for non-struct anonymous fields.
}
}
@@ -344,35 +417,49 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
}
addFields(rt, rv, nil)
- var writeFields = func(fields [][]int) {
+ writeFields := func(fields [][]int) {
for _, fieldIndex := range fields {
- sft := rt.FieldByIndex(fieldIndex)
- sf := rv.FieldByIndex(fieldIndex)
- if isNil(sf) {
- // Don't write anything for nil fields.
+ fieldType := rt.FieldByIndex(fieldIndex)
+ fieldVal := rv.FieldByIndex(fieldIndex)
+
+ if isNil(fieldVal) { /// Don't write anything for nil fields.
continue
}
- opts := getOptions(sft.Tag)
+ opts := getOptions(fieldType.Tag)
if opts.skip {
continue
}
- keyName := sft.Name
+ keyName := fieldType.Name
if opts.name != "" {
keyName = opts.name
}
- if opts.omitempty && isEmpty(sf) {
+ if opts.omitempty && isEmpty(fieldVal) {
continue
}
- if opts.omitzero && isZero(sf) {
+ if opts.omitzero && isZero(fieldVal) {
continue
}
- enc.encode(key.add(keyName), sf)
+ if inline {
+ enc.writeKeyValue(Key{keyName}, fieldVal, true)
+ if fieldIndex[0] != len(fields)-1 {
+ enc.wf(", ")
+ }
+ } else {
+ enc.encode(key.add(keyName), fieldVal)
+ }
}
}
+
+ if inline {
+ enc.wf("{")
+ }
writeFields(fieldsDirect)
writeFields(fieldsSub)
+ if inline {
+ enc.wf("}")
+ }
}
// tomlTypeName returns the TOML type name of the Go value's type. It is
@@ -411,13 +498,26 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
switch rv.Interface().(type) {
case time.Time:
return tomlDatetime
- case TextMarshaler:
+ case encoding.TextMarshaler:
return tomlString
default:
+ // Someone used a pointer receiver: we can make it work for pointer
+ // values.
+ if rv.CanAddr() {
+ _, ok := rv.Addr().Interface().(encoding.TextMarshaler)
+ if ok {
+ return tomlString
+ }
+ }
return tomlHash
}
default:
- panic("unexpected reflect.Kind: " + rv.Kind().String())
+ _, ok := rv.Interface().(encoding.TextMarshaler)
+ if ok {
+ return tomlString
+ }
+ encPanic(errors.New("unsupported type: " + rv.Kind().String()))
+ panic("") // Need *some* return value
}
}
@@ -430,29 +530,18 @@ func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil
}
- firstType := tomlTypeOfGo(rv.Index(0))
- if firstType == nil {
- encPanic(errArrayNilElement)
- }
+ /// Don't allow nil.
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
- elem := rv.Index(i)
- switch elemType := tomlTypeOfGo(elem); {
- case elemType == nil:
+ if tomlTypeOfGo(rv.Index(i)) == nil {
encPanic(errArrayNilElement)
- case !typeEqual(firstType, elemType):
- encPanic(errArrayMixedElementTypes)
}
}
- // If we have a nested array, then we must make sure that the nested
- // array contains ONLY primitives.
- // This checks arbitrarily nested arrays.
- if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
- nest := tomlArrayType(eindirect(rv.Index(0)))
- if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
- encPanic(errArrayNoTable)
- }
+
+ firstType := tomlTypeOfGo(rv.Index(0))
+ if firstType == nil {
+ encPanic(errArrayNilElement)
}
return firstType
}
@@ -511,14 +600,20 @@ func (enc *Encoder) newline() {
}
}
-func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
+// Write a key/value pair:
+//
+// key = <any value>
+//
+// If inline is true it won't add a newline at the end.
+func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
if len(key) == 0 {
encPanic(errNoKey)
}
- panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
- enc.newline()
+ if !inline {
+ enc.newline()
+ }
}
func (enc *Encoder) wf(format string, v ...interface{}) {
@@ -553,16 +648,3 @@ func isNil(rv reflect.Value) bool {
return false
}
}
-
-func panicIfInvalidKey(key Key) {
- for _, k := range key {
- if len(k) == 0 {
- encPanic(e("Key '%s' is not a valid table name. Key names "+
- "cannot be empty.", key.maybeQuotedAll()))
- }
- }
-}
-
-func isValidKeyName(s string) bool {
- return len(s) != 0
-}
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go
deleted file mode 100644
index d36e1dd60..000000000
--- a/vendor/github.com/BurntSushi/toml/encoding_types.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build go1.2
-
-package toml
-
-// In order to support Go 1.1, we define our own TextMarshaler and
-// TextUnmarshaler types. For Go 1.2+, we just alias them with the
-// standard library interfaces.
-
-import (
- "encoding"
-)
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler encoding.TextMarshaler
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
deleted file mode 100644
index e8d503d04..000000000
--- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !go1.2
-
-package toml
-
-// These interfaces were introduced in Go 1.2, so we add them manually when
-// compiling for Go 1.1.
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler interface {
- MarshalText() (text []byte, err error)
-}
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler interface {
- UnmarshalText(text []byte) error
-}
diff --git a/vendor/github.com/BurntSushi/toml/go.mod b/vendor/github.com/BurntSushi/toml/go.mod
new file mode 100644
index 000000000..82989481d
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/go.mod
@@ -0,0 +1,3 @@
+module github.com/BurntSushi/toml
+
+go 1.16
diff --git a/vendor/github.com/BurntSushi/toml/go.sum b/vendor/github.com/BurntSushi/toml/go.sum
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/go.sum
diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go
new file mode 100644
index 000000000..022f15bc2
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/internal/tz.go
@@ -0,0 +1,36 @@
+package internal
+
+import "time"
+
+// Timezones used for local datetime, date, and time TOML types.
+//
+// The exact way times and dates without a timezone should be interpreted is not
+// well-defined in the TOML specification and left to the implementation. These
+// defaults to current local timezone offset of the computer, but this can be
+// changed by changing these variables before decoding.
+//
+// TODO:
+// Ideally we'd like to offer people the ability to configure the used timezone
+// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit
+// tricky: the reason we use three different variables for this is to support
+// round-tripping – without these specific TZ names we wouldn't know which
+// format to use.
+//
+// There isn't a good way to encode this right now though, and passing this sort
+// of information also ties in to various related issues such as string format
+// encoding, encoding of comments, etc.
+//
+// So, for the time being, just put this in internal until we can write a good
+// comprehensive API for doing all of this.
+//
+// The reason they're exported is because they're referred from in e.g.
+// internal/tag.
+//
+// Note that this behaviour is valid according to the TOML spec as the exact
+// behaviour is left up to implementations.
+var (
+ localOffset = func() int { _, o := time.Now().Zone(); return o }()
+ LocalDatetime = time.FixedZone("datetime-local", localOffset)
+ LocalDate = time.FixedZone("date-local", localOffset)
+ LocalTime = time.FixedZone("time-local", localOffset)
+)
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
index e0a742a88..adc4eb5d5 100644
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -2,6 +2,8 @@ package toml
import (
"fmt"
+ "reflect"
+ "runtime"
"strings"
"unicode"
"unicode/utf8"
@@ -29,6 +31,7 @@ const (
itemArrayTableStart
itemArrayTableEnd
itemKeyStart
+ itemKeyEnd
itemCommentStart
itemInlineTableStart
itemInlineTableEnd
@@ -64,9 +67,9 @@ type lexer struct {
state stateFn
items chan item
- // Allow for backing up up to three runes.
+ // Allow for backing up up to four runes.
// This is necessary because TOML contains 3-rune tokens (""" and ''').
- prevWidths [3]int
+ prevWidths [4]int
nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call
// next again.
@@ -93,6 +96,7 @@ func (lx *lexer) nextItem() item {
return item
default:
lx.state = lx.state(lx)
+ //fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack)
}
}
}
@@ -137,7 +141,7 @@ func (lx *lexer) emitTrim(typ itemType) {
func (lx *lexer) next() (r rune) {
if lx.atEOF {
- panic("next called after EOF")
+ panic("BUG in lexer: next called after EOF")
}
if lx.pos >= len(lx.input) {
lx.atEOF = true
@@ -147,12 +151,19 @@ func (lx *lexer) next() (r rune) {
if lx.input[lx.pos] == '\n' {
lx.line++
}
+ lx.prevWidths[3] = lx.prevWidths[2]
lx.prevWidths[2] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[0]
- if lx.nprev < 3 {
+ if lx.nprev < 4 {
lx.nprev++
}
+
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
+ if r == utf8.RuneError {
+ lx.errorf("invalid UTF-8 byte at position %d (line %d): 0x%02x", lx.pos, lx.line, lx.input[lx.pos])
+ return utf8.RuneError
+ }
+
lx.prevWidths[0] = w
lx.pos += w
return r
@@ -163,18 +174,19 @@ func (lx *lexer) ignore() {
lx.start = lx.pos
}
-// backup steps back one rune. Can be called only twice between calls to next.
+// backup steps back one rune. Can be called 4 times between calls to next.
func (lx *lexer) backup() {
if lx.atEOF {
lx.atEOF = false
return
}
if lx.nprev < 1 {
- panic("backed up too far")
+ panic("BUG in lexer: backed up too far")
}
w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2]
+ lx.prevWidths[2] = lx.prevWidths[3]
lx.nprev--
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
@@ -269,8 +281,9 @@ func lexTopEnd(lx *lexer) stateFn {
lx.emit(itemEOF)
return nil
}
- return lx.errorf("expected a top-level item to end with a newline, "+
- "comment, or EOF, but got %q instead", r)
+ return lx.errorf(
+ "expected a top-level item to end with a newline, comment, or EOF, but got %q instead",
+ r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
@@ -297,8 +310,9 @@ func lexTableEnd(lx *lexer) stateFn {
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
- return lx.errorf("expected end of table array name delimiter %q, "+
- "but got %q instead", arrayTableEnd, r)
+ return lx.errorf(
+ "expected end of table array name delimiter %q, but got %q instead",
+ arrayTableEnd, r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
@@ -308,30 +322,17 @@ func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == tableEnd || r == eof:
- return lx.errorf("unexpected end of table name " +
- "(table names cannot be empty)")
+ return lx.errorf("unexpected end of table name (table names cannot be empty)")
case r == tableSep:
- return lx.errorf("unexpected table separator " +
- "(table names cannot be empty)")
+ return lx.errorf("unexpected table separator (table names cannot be empty)")
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.push(lexTableNameEnd)
- return lexValue // reuse string lexing
+ return lexQuotedName
default:
- return lexBareTableName
- }
-}
-
-// lexBareTableName lexes the name of a table. It assumes that at least one
-// valid character for the table has already been read.
-func lexBareTableName(lx *lexer) stateFn {
- r := lx.next()
- if isBareKeyChar(r) {
- return lexBareTableName
+ lx.push(lexTableNameEnd)
+ return lexBareName
}
- lx.backup()
- lx.emit(itemText)
- return lexTableNameEnd
}
// lexTableNameEnd reads the end of a piece of a table name, optionally
@@ -347,63 +348,101 @@ func lexTableNameEnd(lx *lexer) stateFn {
case r == tableEnd:
return lx.pop()
default:
- return lx.errorf("expected '.' or ']' to end table name, "+
- "but got %q instead", r)
+ return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r)
}
}
-// lexKeyStart consumes a key name up until the first non-whitespace character.
-// lexKeyStart will ignore whitespace.
-func lexKeyStart(lx *lexer) stateFn {
- r := lx.peek()
+// lexBareName lexes one part of a key or table.
+//
+// It assumes that at least one valid character for the table has already been
+// read.
+//
+// Lexes only one part, e.g. only 'a' inside 'a.b'.
+func lexBareName(lx *lexer) stateFn {
+ r := lx.next()
+ if isBareKeyChar(r) {
+ return lexBareName
+ }
+ lx.backup()
+ lx.emit(itemText)
+ return lx.pop()
+}
+
+// lexBareName lexes one part of a key or table.
+//
+// It assumes that at least one valid character for the table has already been
+// read.
+//
+// Lexes only one part, e.g. only '"a"' inside '"a".b'.
+func lexQuotedName(lx *lexer) stateFn {
+ r := lx.next()
switch {
- case r == keySep:
- return lx.errorf("unexpected key separator %q", keySep)
- case isWhitespace(r) || isNL(r):
- lx.next()
- return lexSkip(lx, lexKeyStart)
- case r == stringStart || r == rawStringStart:
- lx.ignore()
- lx.emit(itemKeyStart)
- lx.push(lexKeyEnd)
- return lexValue // reuse string lexing
+ case isWhitespace(r):
+ return lexSkip(lx, lexValue)
+ case r == stringStart:
+ lx.ignore() // ignore the '"'
+ return lexString
+ case r == rawStringStart:
+ lx.ignore() // ignore the "'"
+ return lexRawString
+ case r == eof:
+ return lx.errorf("unexpected EOF; expected value")
default:
+ return lx.errorf("expected value but found %q instead", r)
+ }
+}
+
+// lexKeyStart consumes all key parts until a '='.
+func lexKeyStart(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.peek(); {
+ case r == '=' || r == eof:
+ return lx.errorf("unexpected '=': key name appears blank")
+ case r == '.':
+ return lx.errorf("unexpected '.': keys cannot start with a '.'")
+ case r == stringStart || r == rawStringStart:
lx.ignore()
+ fallthrough
+ default: // Bare key
lx.emit(itemKeyStart)
- return lexBareKey
+ return lexKeyNameStart
}
}
-// lexBareKey consumes the text of a bare key. Assumes that the first character
-// (which is not whitespace) has not yet been consumed.
-func lexBareKey(lx *lexer) stateFn {
- switch r := lx.next(); {
- case isBareKeyChar(r):
- return lexBareKey
- case isWhitespace(r):
- lx.backup()
- lx.emit(itemText)
- return lexKeyEnd
- case r == keySep:
- lx.backup()
- lx.emit(itemText)
- return lexKeyEnd
+func lexKeyNameStart(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.peek(); {
+ case r == '=' || r == eof:
+ return lx.errorf("unexpected '='")
+ case r == '.':
+ return lx.errorf("unexpected '.'")
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.push(lexKeyEnd)
+ return lexQuotedName
default:
- return lx.errorf("bare keys cannot contain %q", r)
+ lx.push(lexKeyEnd)
+ return lexBareName
}
}
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
// separator).
func lexKeyEnd(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
switch r := lx.next(); {
- case r == keySep:
- return lexSkip(lx, lexValue)
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
+ case r == eof:
+ return lx.errorf("unexpected EOF; expected key separator %q", keySep)
+ case r == '.':
+ lx.ignore()
+ return lexKeyNameStart
+ case r == '=':
+ lx.emit(itemKeyEnd)
+ return lexSkip(lx, lexValue)
default:
- return lx.errorf("expected key separator %q, but got %q instead",
- keySep, r)
+ return lx.errorf("expected '.' or '=', but got %q instead", r)
}
}
@@ -450,10 +489,15 @@ func lexValue(lx *lexer) stateFn {
}
lx.ignore() // ignore the "'"
return lexRawString
- case '+', '-':
- return lexNumberStart
case '.': // special error case, be kind to users
return lx.errorf("floats must start with a digit, not '.'")
+ case 'i', 'n':
+ if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) {
+ lx.emit(itemFloat)
+ return lx.pop()
+ }
+ case '-', '+':
+ return lexDecimalNumberStart
}
if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the
@@ -463,6 +507,9 @@ func lexValue(lx *lexer) stateFn {
lx.backup()
return lexBool
}
+ if r == eof {
+ return lx.errorf("unexpected EOF; expected value")
+ }
return lx.errorf("expected value but found %q instead", r)
}
@@ -507,9 +554,8 @@ func lexArrayValueEnd(lx *lexer) stateFn {
return lexArrayEnd
}
return lx.errorf(
- "expected a comma or array terminator %q, but got %q instead",
- arrayEnd, r,
- )
+ "expected a comma or array terminator %q, but got %s instead",
+ arrayEnd, runeOrEOF(r))
}
// lexArrayEnd finishes the lexing of an array.
@@ -546,8 +592,7 @@ func lexInlineTableValue(lx *lexer) stateFn {
// key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
+ switch r := lx.next(); {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
@@ -557,12 +602,25 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
return lexCommentStart
case r == comma:
lx.ignore()
+ lx.skip(isWhitespace)
+ if lx.peek() == '}' {
+ return lx.errorf("trailing comma not allowed in inline tables")
+ }
return lexInlineTableValue
case r == inlineTableEnd:
return lexInlineTableEnd
+ default:
+ return lx.errorf(
+ "expected a comma or an inline table terminator %q, but got %s instead",
+ inlineTableEnd, runeOrEOF(r))
+ }
+}
+
+func runeOrEOF(r rune) string {
+ if r == eof {
+ return "end of file"
}
- return lx.errorf("expected a comma or an inline table terminator %q, "+
- "but got %q instead", inlineTableEnd, r)
+ return "'" + string(r) + "'"
}
// lexInlineTableEnd finishes the lexing of an inline table.
@@ -579,7 +637,9 @@ func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
- return lx.errorf("unexpected EOF")
+ return lx.errorf(`unexpected EOF; expected '"'`)
+ case isControl(r) || r == '\r':
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == '\\':
@@ -598,19 +658,40 @@ func lexString(lx *lexer) stateFn {
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
- switch lx.next() {
+ r := lx.next()
+ switch r {
case eof:
- return lx.errorf("unexpected EOF")
+ return lx.errorf(`unexpected EOF; expected '"""'`)
+ case '\r':
+ if lx.peek() != '\n' {
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+ }
+ return lexMultilineString
case '\\':
return lexMultilineStringEscape
case stringEnd:
+ /// Found " → try to read two more "".
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
- lx.backup()
+ /// Peek ahead: the string can contain " and "", including at the
+ /// end: """str"""""
+ /// 6 or more at the end, however, is an error.
+ if lx.peek() == stringEnd {
+ /// Check if we already lexed 5 's; if so we have 6 now, and
+ /// that's just too many man!
+ if strings.HasSuffix(lx.current(), `"""""`) {
+ return lx.errorf(`unexpected '""""""'`)
+ }
+ lx.backup()
+ lx.backup()
+ return lexMultilineString
+ }
+
+ lx.backup() /// backup: don't include the """ in the item.
lx.backup()
lx.backup()
lx.emit(itemMultilineString)
- lx.next()
+ lx.next() /// Read over ''' again and discard it.
lx.next()
lx.next()
lx.ignore()
@@ -619,6 +700,10 @@ func lexMultilineString(lx *lexer) stateFn {
lx.backup()
}
}
+
+ if isControl(r) {
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+ }
return lexMultilineString
}
@@ -628,7 +713,9 @@ func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
- return lx.errorf("unexpected EOF")
+ return lx.errorf(`unexpected EOF; expected "'"`)
+ case isControl(r) || r == '\r':
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd:
@@ -645,17 +732,38 @@ func lexRawString(lx *lexer) stateFn {
// a string. It assumes that the beginning "'''" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
- switch lx.next() {
+ r := lx.next()
+ switch r {
case eof:
- return lx.errorf("unexpected EOF")
+ return lx.errorf(`unexpected EOF; expected "'''"`)
+ case '\r':
+ if lx.peek() != '\n' {
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+ }
+ return lexMultilineRawString
case rawStringEnd:
+ /// Found ' → try to read two more ''.
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
- lx.backup()
+ /// Peek ahead: the string can contain ' and '', including at the
+ /// end: '''str'''''
+ /// 6 or more at the end, however, is an error.
+ if lx.peek() == rawStringEnd {
+ /// Check if we already lexed 5 's; if so we have 6 now, and
+ /// that's just too many man!
+ if strings.HasSuffix(lx.current(), "'''''") {
+ return lx.errorf(`unexpected "''''''"`)
+ }
+ lx.backup()
+ lx.backup()
+ return lexMultilineRawString
+ }
+
+ lx.backup() /// backup: don't include the ''' in the item.
lx.backup()
lx.backup()
lx.emit(itemRawMultilineString)
- lx.next()
+ lx.next() /// Read over ''' again and discard it.
lx.next()
lx.next()
lx.ignore()
@@ -664,6 +772,10 @@ func lexMultilineRawString(lx *lexer) stateFn {
lx.backup()
}
}
+
+ if isControl(r) {
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+ }
return lexMultilineRawString
}
@@ -694,6 +806,10 @@ func lexStringEscape(lx *lexer) stateFn {
fallthrough
case '"':
fallthrough
+ case ' ', '\t':
+ // Inside """ .. """ strings you can use \ to escape newlines, and any
+ // amount of whitespace can be between the \ and \n.
+ fallthrough
case '\\':
return lx.pop()
case 'u':
@@ -701,8 +817,7 @@ func lexStringEscape(lx *lexer) stateFn {
case 'U':
return lexLongUnicodeEscape
}
- return lx.errorf("invalid escape character %q; only the following "+
- "escape characters are allowed: "+
+ return lx.errorf("invalid escape character %q; only the following escape characters are allowed: "+
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
}
@@ -711,8 +826,9 @@ func lexShortUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 4; i++ {
r = lx.next()
if !isHexadecimal(r) {
- return lx.errorf(`expected four hexadecimal digits after '\u', `+
- "but got %q instead", lx.current())
+ return lx.errorf(
+ `expected four hexadecimal digits after '\u', but got %q instead`,
+ lx.current())
}
}
return lx.pop()
@@ -723,28 +839,33 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 8; i++ {
r = lx.next()
if !isHexadecimal(r) {
- return lx.errorf(`expected eight hexadecimal digits after '\U', `+
- "but got %q instead", lx.current())
+ return lx.errorf(
+ `expected eight hexadecimal digits after '\U', but got %q instead`,
+ lx.current())
}
}
return lx.pop()
}
-// lexNumberOrDateStart consumes either an integer, a float, or datetime.
+// lexNumberOrDateStart processes the first character of a value which begins
+// with a digit. It exists to catch values starting with '0', so that
+// lexBaseNumberOrDate can differentiate base prefixed integers from other
+// types.
func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next()
- if isDigit(r) {
- return lexNumberOrDate
- }
switch r {
- case '_':
- return lexNumber
- case 'e', 'E':
- return lexFloat
- case '.':
- return lx.errorf("floats must start with a digit, not '.'")
+ case '0':
+ return lexBaseNumberOrDate
}
- return lx.errorf("expected a digit but got %q", r)
+
+ if !isDigit(r) {
+ // The only way to reach this state is if the value starts
+ // with a digit, so specifically treat anything else as an
+ // error.
+ return lx.errorf("expected a digit but got %q", r)
+ }
+
+ return lexNumberOrDate
}
// lexNumberOrDate consumes either an integer, float or datetime.
@@ -754,10 +875,10 @@ func lexNumberOrDate(lx *lexer) stateFn {
return lexNumberOrDate
}
switch r {
- case '-':
+ case '-', ':':
return lexDatetime
case '_':
- return lexNumber
+ return lexDecimalNumber
case '.', 'e', 'E':
return lexFloat
}
@@ -775,41 +896,156 @@ func lexDatetime(lx *lexer) stateFn {
return lexDatetime
}
switch r {
- case '-', 'T', ':', '.', 'Z', '+':
+ case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+':
return lexDatetime
}
lx.backup()
- lx.emit(itemDatetime)
+ lx.emitTrim(itemDatetime)
return lx.pop()
}
-// lexNumberStart consumes either an integer or a float. It assumes that a sign
-// has already been read, but that *no* digits have been consumed.
-// lexNumberStart will move to the appropriate integer or float states.
-func lexNumberStart(lx *lexer) stateFn {
- // We MUST see a digit. Even floats have to start with a digit.
+// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
+func lexHexInteger(lx *lexer) stateFn {
r := lx.next()
- if !isDigit(r) {
- if r == '.' {
- return lx.errorf("floats must start with a digit, not '.'")
+ if isHexadecimal(r) {
+ return lexHexInteger
+ }
+ switch r {
+ case '_':
+ return lexHexInteger
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexOctalInteger consumes an octal integer after seeing the '0o' prefix.
+func lexOctalInteger(lx *lexer) stateFn {
+ r := lx.next()
+ if isOctal(r) {
+ return lexOctalInteger
+ }
+ switch r {
+ case '_':
+ return lexOctalInteger
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix.
+func lexBinaryInteger(lx *lexer) stateFn {
+ r := lx.next()
+ if isBinary(r) {
+ return lexBinaryInteger
+ }
+ switch r {
+ case '_':
+ return lexBinaryInteger
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDecimalNumber consumes a decimal float or integer.
+func lexDecimalNumber(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexDecimalNumber
+ }
+ switch r {
+ case '.', 'e', 'E':
+ return lexFloat
+ case '_':
+ return lexDecimalNumber
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDecimalNumber consumes the first digit of a number beginning with a sign.
+// It assumes the sign has already been consumed. Values which start with a sign
+// are only allowed to be decimal integers or floats.
+//
+// The special "nan" and "inf" values are also recognized.
+func lexDecimalNumberStart(lx *lexer) stateFn {
+ r := lx.next()
+
+ // Special error cases to give users better error messages
+ switch r {
+ case 'i':
+ if !lx.accept('n') || !lx.accept('f') {
+ return lx.errorf("invalid float: '%s'", lx.current())
}
- return lx.errorf("expected a digit but got %q", r)
+ lx.emit(itemFloat)
+ return lx.pop()
+ case 'n':
+ if !lx.accept('a') || !lx.accept('n') {
+ return lx.errorf("invalid float: '%s'", lx.current())
+ }
+ lx.emit(itemFloat)
+ return lx.pop()
+ case '0':
+ p := lx.peek()
+ switch p {
+ case 'b', 'o', 'x':
+ return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p)
+ }
+ case '.':
+ return lx.errorf("floats must start with a digit, not '.'")
+ }
+
+ if isDigit(r) {
+ return lexDecimalNumber
}
- return lexNumber
+
+ return lx.errorf("expected a digit but got %q", r)
}
-// lexNumber consumes an integer or a float after seeing the first digit.
-func lexNumber(lx *lexer) stateFn {
+// lexBaseNumberOrDate differentiates between the possible values which
+// start with '0'. It assumes that before reaching this state, the initial '0'
+// has been consumed.
+func lexBaseNumberOrDate(lx *lexer) stateFn {
r := lx.next()
+ // Note: All datetimes start with at least two digits, so we don't
+ // handle date characters (':', '-', etc.) here.
if isDigit(r) {
- return lexNumber
+ return lexNumberOrDate
}
switch r {
case '_':
- return lexNumber
+ // Can only be decimal, because there can't be an underscore
+ // between the '0' and the base designator, and dates can't
+ // contain underscores.
+ return lexDecimalNumber
case '.', 'e', 'E':
return lexFloat
+ case 'b':
+ r = lx.peek()
+ if !isBinary(r) {
+ lx.errorf("not a binary number: '%s%c'", lx.current(), r)
+ }
+ return lexBinaryInteger
+ case 'o':
+ r = lx.peek()
+ if !isOctal(r) {
+ lx.errorf("not an octal number: '%s%c'", lx.current(), r)
+ }
+ return lexOctalInteger
+ case 'x':
+ r = lx.peek()
+ if !isHexadecimal(r) {
+ lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
+ }
+ return lexHexInteger
}
lx.backup()
@@ -867,21 +1103,22 @@ func lexCommentStart(lx *lexer) stateFn {
// It will consume *up to* the first newline character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
- r := lx.peek()
- if isNL(r) || r == eof {
+ switch r := lx.next(); {
+ case isNL(r) || r == eof:
+ lx.backup()
lx.emit(itemText)
return lx.pop()
+ case isControl(r):
+ return lx.errorf("control characters are not allowed inside comments: '0x%02x'", r)
+ default:
+ return lexComment
}
- lx.next()
- return lexComment
}
// lexSkip ignores all slurped input and moves on to the next state.
func lexSkip(lx *lexer, nextState stateFn) stateFn {
- return func(lx *lexer) stateFn {
- lx.ignore()
- return nextState
- }
+ lx.ignore()
+ return nextState
}
// isWhitespace returns true if `r` is a whitespace character according
@@ -894,6 +1131,16 @@ func isNL(r rune) bool {
return r == '\n' || r == '\r'
}
+// Control characters except \n, \t
+func isControl(r rune) bool {
+ switch r {
+ case '\t', '\r', '\n':
+ return false
+ default:
+ return (r >= 0x00 && r <= 0x1f) || r == 0x7f
+ }
+}
+
func isDigit(r rune) bool {
return r >= '0' && r <= '9'
}
@@ -904,6 +1151,14 @@ func isHexadecimal(r rune) bool {
(r >= 'A' && r <= 'F')
}
+func isOctal(r rune) bool {
+ return r >= '0' && r <= '7'
+}
+
+func isBinary(r rune) bool {
+ return r == '0' || r == '1'
+}
+
func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
@@ -912,6 +1167,17 @@ func isBareKeyChar(r rune) bool {
r == '-'
}
+func (s stateFn) String() string {
+ name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name()
+ if i := strings.LastIndexByte(name, '.'); i > -1 {
+ name = name[i+1:]
+ }
+ if s == nil {
+ name = "<nil>"
+ }
+ return name + "()"
+}
+
func (itype itemType) String() string {
switch itype {
case itemError:
@@ -938,12 +1204,18 @@ func (itype itemType) String() string {
return "TableEnd"
case itemKeyStart:
return "KeyStart"
+ case itemKeyEnd:
+ return "KeyEnd"
case itemArray:
return "Array"
case itemArrayEnd:
return "ArrayEnd"
case itemCommentStart:
return "CommentStart"
+ case itemInlineTableStart:
+ return "InlineTableStart"
+ case itemInlineTableEnd:
+ return "InlineTableEnd"
}
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
index 50869ef92..d9ae5db94 100644
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -1,12 +1,14 @@
package toml
import (
+ "errors"
"fmt"
"strconv"
"strings"
"time"
- "unicode"
"unicode/utf8"
+
+ "github.com/BurntSushi/toml/internal"
)
type parser struct {
@@ -14,39 +16,54 @@ type parser struct {
types map[string]tomlType
lx *lexer
- // A list of keys in the order that they appear in the TOML data.
- ordered []Key
-
- // the full key for the current hash in scope
- context Key
-
- // the base key name for everything except hashes
- currentKey string
-
- // rough approximation of line number
- approxLine int
-
- // A map of 'key.group.names' to whether they were created implicitly.
- implicits map[string]bool
+ ordered []Key // List of keys in the order that they appear in the TOML data.
+ context Key // Full key for the current hash in scope.
+ currentKey string // Base key name for everything except hashes.
+ approxLine int // Rough approximation of line number
+ implicits map[string]bool // Record implied keys (e.g. 'key.group.names').
}
-type parseError string
+// ParseError is used when a file can't be parsed: for example invalid integer
+// literals, duplicate keys, etc.
+type ParseError struct {
+ Message string
+ Line int
+ LastKey string
+}
-func (pe parseError) Error() string {
- return string(pe)
+func (pe ParseError) Error() string {
+ return fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
+ pe.Line, pe.LastKey, pe.Message)
}
func parse(data string) (p *parser, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
- if err, ok = r.(parseError); ok {
+ if err, ok = r.(ParseError); ok {
return
}
panic(r)
}
}()
+ // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
+ // which mangles stuff.
+ if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
+ data = data[2:]
+ }
+
+ // Examine first few bytes for NULL bytes; this probably means it's a UTF-16
+ // file (second byte in surrogate pair being NULL). Again, do this here to
+ // avoid having to deal with UTF-8/16 stuff in the lexer.
+ ex := 6
+ if len(data) < 6 {
+ ex = len(data)
+ }
+ if strings.ContainsRune(data[:ex], 0) {
+ return nil, errors.New("files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8")
+ }
+
p = &parser{
mapping: make(map[string]interface{}),
types: make(map[string]tomlType),
@@ -66,13 +83,17 @@ func parse(data string) (p *parser, err error) {
}
func (p *parser) panicf(format string, v ...interface{}) {
- msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
- p.approxLine, p.current(), fmt.Sprintf(format, v...))
- panic(parseError(msg))
+ msg := fmt.Sprintf(format, v...)
+ panic(ParseError{
+ Message: msg,
+ Line: p.approxLine,
+ LastKey: p.current(),
+ })
}
func (p *parser) next() item {
it := p.lx.nextItem()
+ //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
if it.typ == itemError {
p.panicf("%s", it.val)
}
@@ -97,44 +118,63 @@ func (p *parser) assertEqual(expected, got itemType) {
func (p *parser) topLevel(item item) {
switch item.typ {
- case itemCommentStart:
+ case itemCommentStart: // # ..
p.approxLine = item.line
p.expect(itemText)
- case itemTableStart:
- kg := p.next()
- p.approxLine = kg.line
+ case itemTableStart: // [ .. ]
+ name := p.next()
+ p.approxLine = name.line
var key Key
- for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
- key = append(key, p.keyString(kg))
+ for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
+ key = append(key, p.keyString(name))
}
- p.assertEqual(itemTableEnd, kg.typ)
+ p.assertEqual(itemTableEnd, name.typ)
- p.establishContext(key, false)
+ p.addContext(key, false)
p.setType("", tomlHash)
p.ordered = append(p.ordered, key)
- case itemArrayTableStart:
- kg := p.next()
- p.approxLine = kg.line
+ case itemArrayTableStart: // [[ .. ]]
+ name := p.next()
+ p.approxLine = name.line
var key Key
- for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
- key = append(key, p.keyString(kg))
+ for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
+ key = append(key, p.keyString(name))
}
- p.assertEqual(itemArrayTableEnd, kg.typ)
+ p.assertEqual(itemArrayTableEnd, name.typ)
- p.establishContext(key, true)
+ p.addContext(key, true)
p.setType("", tomlArrayHash)
p.ordered = append(p.ordered, key)
- case itemKeyStart:
- kname := p.next()
- p.approxLine = kname.line
- p.currentKey = p.keyString(kname)
-
- val, typ := p.value(p.next())
- p.setValue(p.currentKey, val)
- p.setType(p.currentKey, typ)
+ case itemKeyStart: // key = ..
+ outerContext := p.context
+ /// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
+ k := p.next()
+ p.approxLine = k.line
+ var key Key
+ for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
+ key = append(key, p.keyString(k))
+ }
+ p.assertEqual(itemKeyEnd, k.typ)
+
+ /// The current key is the last part.
+ p.currentKey = key[len(key)-1]
+
+ /// All the other parts (if any) are the context; need to set each part
+ /// as implicit.
+ context := key[:len(key)-1]
+ for i := range context {
+ p.addImplicitContext(append(p.context, context[i:i+1]...))
+ }
+
+ /// Set value.
+ val, typ := p.value(p.next(), false)
+ p.set(p.currentKey, val, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
+
+ /// Remove the context we added (preserving any context from [tbl] lines).
+ p.context = outerContext
p.currentKey = ""
default:
p.bug("Unexpected type at top level: %s", item.typ)
@@ -148,180 +188,253 @@ func (p *parser) keyString(it item) string {
return it.val
case itemString, itemMultilineString,
itemRawString, itemRawMultilineString:
- s, _ := p.value(it)
+ s, _ := p.value(it, false)
return s.(string)
default:
p.bug("Unexpected key type: %s", it.typ)
- panic("unreachable")
}
+ panic("unreachable")
}
+var datetimeRepl = strings.NewReplacer(
+ "z", "Z",
+ "t", "T",
+ " ", "T")
+
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
-func (p *parser) value(it item) (interface{}, tomlType) {
+func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
switch it.typ {
case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
case itemMultilineString:
- trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
- return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
+ return p.replaceEscapes(stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+ case itemInteger:
+ return p.valueInteger(it)
+ case itemFloat:
+ return p.valueFloat(it)
case itemBool:
switch it.val {
case "true":
return true, p.typeOfPrimitive(it)
case "false":
return false, p.typeOfPrimitive(it)
+ default:
+ p.bug("Expected boolean value, but got '%s'.", it.val)
}
- p.bug("Expected boolean value, but got '%s'.", it.val)
- case itemInteger:
- if !numUnderscoresOK(it.val) {
- p.panicf("Invalid integer %q: underscores must be surrounded by digits",
- it.val)
- }
- val := strings.Replace(it.val, "_", "", -1)
- num, err := strconv.ParseInt(val, 10, 64)
- if err != nil {
- // Distinguish integer values. Normally, it'd be a bug if the lexer
- // provides an invalid integer, but it's possible that the number is
- // out of range of valid values (which the lexer cannot determine).
- // So mark the former as a bug but the latter as a legitimate user
- // error.
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Integer '%s' is out of the range of 64-bit "+
- "signed integers.", it.val)
- } else {
- p.bug("Expected integer value, but got '%s'.", it.val)
- }
+ case itemDatetime:
+ return p.valueDatetime(it)
+ case itemArray:
+ return p.valueArray(it)
+ case itemInlineTableStart:
+ return p.valueInlineTable(it, parentIsArray)
+ default:
+ p.bug("Unexpected value type: %s", it.typ)
+ }
+ panic("unreachable")
+}
+
+func (p *parser) valueInteger(it item) (interface{}, tomlType) {
+ if !numUnderscoresOK(it.val) {
+ p.panicf("Invalid integer %q: underscores must be surrounded by digits", it.val)
+ }
+ if numHasLeadingZero(it.val) {
+ p.panicf("Invalid integer %q: cannot have leading zeroes", it.val)
+ }
+
+ num, err := strconv.ParseInt(it.val, 0, 64)
+ if err != nil {
+ // Distinguish integer values. Normally, it'd be a bug if the lexer
+ // provides an invalid integer, but it's possible that the number is
+ // out of range of valid values (which the lexer cannot determine).
+ // So mark the former as a bug but the latter as a legitimate user
+ // error.
+ if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+ p.panicf("Integer '%s' is out of the range of 64-bit signed integers.", it.val)
+ } else {
+ p.bug("Expected integer value, but got '%s'.", it.val)
}
- return num, p.typeOfPrimitive(it)
- case itemFloat:
- parts := strings.FieldsFunc(it.val, func(r rune) bool {
- switch r {
- case '.', 'e', 'E':
- return true
- }
- return false
- })
- for _, part := range parts {
- if !numUnderscoresOK(part) {
- p.panicf("Invalid float %q: underscores must be "+
- "surrounded by digits", it.val)
- }
+ }
+ return num, p.typeOfPrimitive(it)
+}
+
+func (p *parser) valueFloat(it item) (interface{}, tomlType) {
+ parts := strings.FieldsFunc(it.val, func(r rune) bool {
+ switch r {
+ case '.', 'e', 'E':
+ return true
}
- if !numPeriodsOK(it.val) {
- // As a special case, numbers like '123.' or '1.e2',
- // which are valid as far as Go/strconv are concerned,
- // must be rejected because TOML says that a fractional
- // part consists of '.' followed by 1+ digits.
- p.panicf("Invalid float %q: '.' must be followed "+
- "by one or more digits", it.val)
- }
- val := strings.Replace(it.val, "_", "", -1)
- num, err := strconv.ParseFloat(val, 64)
- if err != nil {
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Float '%s' is out of the range of 64-bit "+
- "IEEE-754 floating-point numbers.", it.val)
- } else {
- p.panicf("Invalid float value: %q", it.val)
- }
+ return false
+ })
+ for _, part := range parts {
+ if !numUnderscoresOK(part) {
+ p.panicf("Invalid float %q: underscores must be surrounded by digits", it.val)
}
- return num, p.typeOfPrimitive(it)
- case itemDatetime:
- var t time.Time
- var ok bool
- var err error
- for _, format := range []string{
- "2006-01-02T15:04:05Z07:00",
- "2006-01-02T15:04:05",
- "2006-01-02",
- } {
- t, err = time.ParseInLocation(format, it.val, time.Local)
- if err == nil {
- ok = true
- break
- }
+ }
+ if len(parts) > 0 && numHasLeadingZero(parts[0]) {
+ p.panicf("Invalid float %q: cannot have leading zeroes", it.val)
+ }
+ if !numPeriodsOK(it.val) {
+ // As a special case, numbers like '123.' or '1.e2',
+ // which are valid as far as Go/strconv are concerned,
+ // must be rejected because TOML says that a fractional
+ // part consists of '.' followed by 1+ digits.
+ p.panicf("Invalid float %q: '.' must be followed by one or more digits", it.val)
+ }
+ val := strings.Replace(it.val, "_", "", -1)
+ if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
+ val = "nan"
+ }
+ num, err := strconv.ParseFloat(val, 64)
+ if err != nil {
+ if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+ p.panicf("Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
+ } else {
+ p.panicf("Invalid float value: %q", it.val)
}
- if !ok {
- p.panicf("Invalid TOML Datetime: %q.", it.val)
+ }
+ return num, p.typeOfPrimitive(it)
+}
+
+var dtTypes = []struct {
+ fmt string
+ zone *time.Location
+}{
+ {time.RFC3339Nano, time.Local},
+ {"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
+ {"2006-01-02", internal.LocalDate},
+ {"15:04:05.999999999", internal.LocalTime},
+}
+
+func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
+ it.val = datetimeRepl.Replace(it.val)
+ var (
+ t time.Time
+ ok bool
+ err error
+ )
+ for _, dt := range dtTypes {
+ t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
+ if err == nil {
+ ok = true
+ break
}
- return t, p.typeOfPrimitive(it)
- case itemArray:
- array := make([]interface{}, 0)
- types := make([]tomlType, 0)
+ }
+ if !ok {
+ p.panicf("Invalid TOML Datetime: %q.", it.val)
+ }
+ return t, p.typeOfPrimitive(it)
+}
- for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
- }
+func (p *parser) valueArray(it item) (interface{}, tomlType) {
+ p.setType(p.currentKey, tomlArray)
+
+ // p.setType(p.currentKey, typ)
+ var (
+ array []interface{}
+ types []tomlType
+ )
+ for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
+ }
+
+ val, typ := p.value(it, true)
+ array = append(array, val)
+ types = append(types, typ)
+ }
+ return array, tomlArray
+}
+
+func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
+ var (
+ hash = make(map[string]interface{})
+ outerContext = p.context
+ outerKey = p.currentKey
+ )
+
+ p.context = append(p.context, p.currentKey)
+ prevContext := p.context
+ p.currentKey = ""
+
+ p.addImplicit(p.context)
+ p.addContext(p.context, parentIsArray)
- val, typ := p.value(it)
- array = append(array, val)
- types = append(types, typ)
+ /// Loop over all table key/value pairs.
+ for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
}
- return array, p.typeOfArray(types)
- case itemInlineTableStart:
- var (
- hash = make(map[string]interface{})
- outerContext = p.context
- outerKey = p.currentKey
- )
- p.context = append(p.context, p.currentKey)
- p.currentKey = ""
- for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
- if it.typ != itemKeyStart {
- p.bug("Expected key start but instead found %q, around line %d",
- it.val, p.approxLine)
- }
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
- }
+ /// Read all key parts.
+ k := p.next()
+ p.approxLine = k.line
+ var key Key
+ for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
+ key = append(key, p.keyString(k))
+ }
+ p.assertEqual(itemKeyEnd, k.typ)
- // retrieve key
- k := p.next()
- p.approxLine = k.line
- kname := p.keyString(k)
+ /// The current key is the last part.
+ p.currentKey = key[len(key)-1]
- // retrieve value
- p.currentKey = kname
- val, typ := p.value(p.next())
- // make sure we keep metadata up to date
- p.setType(kname, typ)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
- hash[kname] = val
+ /// All the other parts (if any) are the context; need to set each part
+ /// as implicit.
+ context := key[:len(key)-1]
+ for i := range context {
+ p.addImplicitContext(append(p.context, context[i:i+1]...))
}
- p.context = outerContext
- p.currentKey = outerKey
- return hash, tomlHash
+
+ /// Set the value.
+ val, typ := p.value(p.next(), false)
+ p.set(p.currentKey, val, typ)
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ hash[p.currentKey] = val
+
+ /// Restore context.
+ p.context = prevContext
}
- p.bug("Unexpected value type: %s", it.typ)
- panic("unreachable")
+ p.context = outerContext
+ p.currentKey = outerKey
+ return hash, tomlHash
+}
+
+// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
+// +/- signs, and base prefixes.
+func numHasLeadingZero(s string) bool {
+ if len(s) > 1 && s[0] == '0' && isDigit(rune(s[1])) { // >1 to allow "0" and isDigit to allow 0x
+ return true
+ }
+ if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
+ return true
+ }
+ return false
}
// numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores.
func numUnderscoresOK(s string) bool {
+ switch s {
+ case "nan", "+nan", "-nan", "inf", "-inf", "+inf":
+ return true
+ }
accept := false
for _, r := range s {
if r == '_' {
if !accept {
return false
}
- accept = false
- continue
}
- accept = true
+
+ // isHexadecimal is a superset of all the permissable characters
+ // surrounding an underscore.
+ accept = isHexadecimal(r)
}
return accept
}
@@ -338,13 +451,12 @@ func numPeriodsOK(s string) bool {
return !period
}
-// establishContext sets the current context of the parser,
-// where the context is either a hash or an array of hashes. Which one is
-// set depends on the value of the `array` parameter.
+// Set the current context of the parser, where the context is either a hash or
+// an array of hashes, depending on the value of the `array` parameter.
//
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
-func (p *parser) establishContext(key Key, array bool) {
+func (p *parser) addContext(key Key, array bool) {
var ok bool
// Always start at the top level and drill down for our context.
@@ -383,7 +495,7 @@ func (p *parser) establishContext(key Key, array bool) {
// list of tables for it.
k := key[len(key)-1]
if _, ok := hashContext[k]; !ok {
- hashContext[k] = make([]map[string]interface{}, 0, 5)
+ hashContext[k] = make([]map[string]interface{}, 0, 4)
}
// Add a new table. But make sure the key hasn't already been used
@@ -391,8 +503,7 @@ func (p *parser) establishContext(key Key, array bool) {
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{}))
} else {
- p.panicf("Key '%s' was already created and cannot be used as "+
- "an array.", keyContext)
+ p.panicf("Key '%s' was already created and cannot be used as an array.", keyContext)
}
} else {
p.setValue(key[len(key)-1], make(map[string]interface{}))
@@ -400,15 +511,22 @@ func (p *parser) establishContext(key Key, array bool) {
p.context = append(p.context, key[len(key)-1])
}
+// set calls setValue and setType.
+func (p *parser) set(key string, val interface{}, typ tomlType) {
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ)
+}
+
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
func (p *parser) setValue(key string, value interface{}) {
- var tmpHash interface{}
- var ok bool
-
- hash := p.mapping
- keyContext := make(Key, 0)
+ var (
+ tmpHash interface{}
+ ok bool
+ hash = p.mapping
+ keyContext Key
+ )
for _, k := range p.context {
keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok {
@@ -422,24 +540,26 @@ func (p *parser) setValue(key string, value interface{}) {
case map[string]interface{}:
hash = t
default:
- p.bug("Expected hash to have type 'map[string]interface{}', but "+
- "it has '%T' instead.", tmpHash)
+ p.panicf("Key '%s' has already been defined.", keyContext)
}
}
keyContext = append(keyContext, key)
if _, ok := hash[key]; ok {
- // Typically, if the given key has already been set, then we have
- // to raise an error since duplicate keys are disallowed. However,
- // it's possible that a key was previously defined implicitly. In this
- // case, it is allowed to be redefined concretely. (See the
- // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
+ // Normally redefining keys isn't allowed, but the key could have been
+ // defined implicitly and it's allowed to be redefined concretely. (See
+ // the `valid/implicit-and-explicit-after.toml` in toml-test)
//
// But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.)
//
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
+ if p.isArray(keyContext) {
+ p.removeImplicit(keyContext)
+ hash[key] = value
+ return
+ }
if p.isImplicit(keyContext) {
p.removeImplicit(keyContext)
return
@@ -449,6 +569,7 @@ func (p *parser) setValue(key string, value interface{}) {
// key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
+
hash[key] = value
}
@@ -468,21 +589,15 @@ func (p *parser) setType(key string, typ tomlType) {
p.types[keyContext.String()] = typ
}
-// addImplicit sets the given Key as having been created implicitly.
-func (p *parser) addImplicit(key Key) {
- p.implicits[key.String()] = true
-}
-
-// removeImplicit stops tagging the given key as having been implicitly
-// created.
-func (p *parser) removeImplicit(key Key) {
- p.implicits[key.String()] = false
-}
-
-// isImplicit returns true if the key group pointed to by the key was created
-// implicitly.
-func (p *parser) isImplicit(key Key) bool {
- return p.implicits[key.String()]
+// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
+// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
+func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = true }
+func (p *parser) removeImplicit(key Key) { p.implicits[key.String()] = false }
+func (p *parser) isImplicit(key Key) bool { return p.implicits[key.String()] }
+func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray }
+func (p *parser) addImplicitContext(key Key) {
+ p.addImplicit(key)
+ p.addContext(key, false)
}
// current returns the full key name of the current context.
@@ -497,20 +612,54 @@ func (p *parser) current() string {
}
func stripFirstNewline(s string) string {
- if len(s) == 0 || s[0] != '\n' {
- return s
+ if len(s) > 0 && s[0] == '\n' {
+ return s[1:]
+ }
+ if len(s) > 1 && s[0] == '\r' && s[1] == '\n' {
+ return s[2:]
}
- return s[1:]
+ return s
}
-func stripEscapedWhitespace(s string) string {
- esc := strings.Split(s, "\\\n")
- if len(esc) > 1 {
- for i := 1; i < len(esc); i++ {
- esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
+// Remove newlines inside triple-quoted strings if a line ends with "\".
+func stripEscapedNewlines(s string) string {
+ split := strings.Split(s, "\n")
+ if len(split) < 1 {
+ return s
+ }
+
+ escNL := false // Keep track of the last non-blank line was escaped.
+ for i, line := range split {
+ line = strings.TrimRight(line, " \t\r")
+
+ if len(line) == 0 || line[len(line)-1] != '\\' {
+ split[i] = strings.TrimRight(split[i], "\r")
+ if !escNL && i != len(split)-1 {
+ split[i] += "\n"
+ }
+ continue
+ }
+
+ escBS := true
+ for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
+ escBS = !escBS
+ }
+ if escNL {
+ line = strings.TrimLeft(line, " \t\r")
+ }
+ escNL = !escBS
+
+ if escBS {
+ split[i] += "\n"
+ continue
+ }
+
+ split[i] = line[:len(line)-1] // Remove \
+ if len(split)-1 > i {
+ split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
}
}
- return strings.Join(esc, "")
+ return strings.Join(split, "")
}
func (p *parser) replaceEscapes(str string) string {
@@ -533,6 +682,9 @@ func (p *parser) replaceEscapes(str string) string {
default:
p.bug("Expected valid escape code after \\, but got %q.", s[r])
return ""
+ case ' ', '\t':
+ p.panicf("invalid escape: '\\%c'", s[r])
+ return ""
case 'b':
replaced = append(replaced, rune(0x0008))
r += 1
@@ -585,8 +737,3 @@ func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
}
return rune(hex)
}
-
-func isStringType(ty itemType) bool {
- return ty == itemString || ty == itemMultilineString ||
- ty == itemRawString || ty == itemRawMultilineString
-}
diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim
deleted file mode 100644
index 562164be0..000000000
--- a/vendor/github.com/BurntSushi/toml/session.vim
+++ /dev/null
@@ -1 +0,0 @@
-au BufWritePost *.go silent!make tags > /dev/null 2>&1
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go
index c73f8afc1..d56aa80fa 100644
--- a/vendor/github.com/BurntSushi/toml/type_check.go
+++ b/vendor/github.com/BurntSushi/toml/type_check.go
@@ -68,24 +68,3 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType {
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable")
}
-
-// typeOfArray returns a tomlType for an array given a list of types of its
-// values.
-//
-// In the current spec, if an array is homogeneous, then its type is always
-// "Array". If the array is not homogeneous, an error is generated.
-func (p *parser) typeOfArray(types []tomlType) tomlType {
- // Empty arrays are cool.
- if len(types) == 0 {
- return tomlArray
- }
-
- theType := types[0]
- for _, t := range types[1:] {
- if !typeEqual(theType, t) {
- p.panicf("Array contains values of type '%s' and '%s', but "+
- "arrays must be homogeneous.", theType, t)
- }
- }
- return tomlArray
-}
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 8936ec087..7665a4c00 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -8,7 +8,7 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 15
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 0
+ VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml
index 12f6f10c6..20bede452 100644
--- a/vendor/github.com/containers/storage/.cirrus.yml
+++ b/vendor/github.com/containers/storage/.cirrus.yml
@@ -25,7 +25,7 @@ env:
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
- _BUILT_IMAGE_SUFFIX: "c6032583541653504"
+ _BUILT_IMAGE_SUFFIX: "c6248193773010944"
FEDORA_CACHE_IMAGE_NAME: "fedora-${_BUILT_IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${_BUILT_IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${_BUILT_IMAGE_SUFFIX}"
diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile
index 581961fed..dbc1f7c99 100644
--- a/vendor/github.com/containers/storage/Makefile
+++ b/vendor/github.com/containers/storage/Makefile
@@ -29,7 +29,7 @@ GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73
NATIVETAGS :=
-AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh)
+AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh)
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
GO ?= go
TESTFLAGS := $(shell go test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race)
@@ -108,7 +108,7 @@ install.docs: docs
install: install.docs
lint: install.tools
- tests/tools/build/golangci-lint run
+ tests/tools/build/golangci-lint run --build-tags="$(AUTOTAGS) $(TAGS)"
help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 02261bead..a95a46d9f 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.33.1
+1.34.1
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_115.go b/vendor/github.com/containers/storage/drivers/overlay/check_115.go
new file mode 100644
index 000000000..9ad1b863d
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/overlay/check_115.go
@@ -0,0 +1,42 @@
+// +build !go1.16
+
+package overlay
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/system"
+)
+
+func scanForMountProgramIndicators(home string) (detected bool, err error) {
+ err = filepath.Walk(home, func(path string, info os.FileInfo, err error) error {
+ if detected {
+ return filepath.SkipDir
+ }
+ if err != nil {
+ return err
+ }
+ basename := filepath.Base(path)
+ if strings.HasPrefix(basename, archive.WhiteoutPrefix) {
+ detected = true
+ return filepath.SkipDir
+ }
+ if info.IsDir() {
+ xattrs, err := system.Llistxattr(path)
+ if err != nil {
+ return err
+ }
+ for _, xattr := range xattrs {
+ if strings.HasPrefix(xattr, "user.fuseoverlayfs.") || strings.HasPrefix(xattr, "user.containers.") {
+ detected = true
+ return filepath.SkipDir
+ }
+ }
+ }
+ return nil
+ })
+ return detected, err
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_116.go b/vendor/github.com/containers/storage/drivers/overlay/check_116.go
new file mode 100644
index 000000000..6d7913cbf
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/overlay/check_116.go
@@ -0,0 +1,42 @@
+// +build go1.16
+
+package overlay
+
+import (
+ "io/fs"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/system"
+)
+
+func scanForMountProgramIndicators(home string) (detected bool, err error) {
+ err = filepath.WalkDir(home, func(path string, d fs.DirEntry, err error) error {
+ if detected {
+ return fs.SkipDir
+ }
+ if err != nil {
+ return err
+ }
+ basename := filepath.Base(path)
+ if strings.HasPrefix(basename, archive.WhiteoutPrefix) {
+ detected = true
+ return fs.SkipDir
+ }
+ if d.IsDir() {
+ xattrs, err := system.Llistxattr(path)
+ if err != nil {
+ return err
+ }
+ for _, xattr := range xattrs {
+ if strings.HasPrefix(xattr, "user.fuseoverlayfs.") || strings.HasPrefix(xattr, "user.containers.") {
+ detected = true
+ return fs.SkipDir
+ }
+ }
+ }
+ return nil
+ })
+ return detected, err
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index ecfbae916..f546f9b10 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -266,9 +266,8 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
if opts.mountProgram != "" {
- f, err := os.Create(getMountProgramFlagFile(home))
- if err == nil {
- f.Close()
+ if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil {
+ return nil, err
}
} else {
// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
@@ -542,9 +541,29 @@ func SupportsNativeOverlay(graphroot, rundir string) (bool, error) {
home := filepath.Join(graphroot, "overlay")
runhome := filepath.Join(rundir, "overlay")
- if _, err := os.Stat(getMountProgramFlagFile(home)); err == nil {
+ var contents string
+ flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home))
+ if err == nil {
+ contents = strings.TrimSpace(string(flagContent))
+ }
+ switch contents {
+ case "true":
logrus.Debugf("overlay storage already configured with a mount-program")
return false, nil
+ default:
+ needsMountProgram, err := scanForMountProgramIndicators(home)
+ if err != nil && !os.IsNotExist(err) {
+ return false, err
+ }
+ if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) {
+ return false, err
+ }
+ if needsMountProgram {
+ return false, nil
+ }
+ // fall through to check if we find ourselves needing to use a
+ // mount program now
+ case "false":
}
for _, dir := range []string{home, runhome} {
@@ -1922,7 +1941,7 @@ func (al *additionalLayer) Info() (io.ReadCloser, error) {
return os.Open(filepath.Join(al.path, "info"))
}
-// Blob returns a reader of the raw contents of this leyer.
+// Blob returns a reader of the raw contents of this layer.
func (al *additionalLayer) Blob() (io.ReadCloser, error) {
return os.Open(filepath.Join(al.path, "blob"))
}
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index e4f484d6b..d2d438d93 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -3,14 +3,14 @@ go 1.14
module github.com/containers/storage
require (
- github.com/BurntSushi/toml v0.3.1
+ github.com/BurntSushi/toml v0.4.1
github.com/Microsoft/go-winio v0.5.0
github.com/Microsoft/hcsshim v0.8.20
github.com/docker/go-units v0.4.0
github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1
github.com/json-iterator/go v1.1.11
- github.com/klauspost/compress v1.13.1
+ github.com/klauspost/compress v1.13.4
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.12
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
@@ -18,7 +18,7 @@ require (
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/runc v1.0.1
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
- github.com/opencontainers/selinux v1.8.2
+ github.com/opencontainers/selinux v1.8.4
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.8.1
github.com/stretchr/testify v1.7.0
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index 2607dbc9b..da7a8f53e 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -34,8 +34,9 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
+github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
@@ -387,8 +388,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ=
-github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s=
+github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -480,8 +481,9 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.m
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/opencontainers/selinux v1.8.2 h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc=
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
+github.com/opencontainers/selinux v1.8.4 h1:krlgQ6/j9CkCXT5oW0yVXdQFOME3NjKuuAZXuR6O7P4=
+github.com/opencontainers/selinux v1.8.4/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index fa0dce033..b85ff7e70 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -27,6 +27,7 @@ import (
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
@@ -1407,7 +1408,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
if ad, ok := r.driver.(drivers.AdditionalLayerStoreDriver); ok {
if aLayer, err := ad.LookupAdditionalLayerByID(to); err == nil {
- // This is an additional layer. We leverage blob API for aquiring the reproduced raw blob.
+ // This is an additional layer. We leverage blob API for acquiring the reproduced raw blob.
info, err := aLayer.Info()
if err != nil {
aLayer.Release()
@@ -1529,6 +1530,9 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
if err != nil {
compressor = pgzip.NewWriter(&tsdata)
}
+ if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
+ logrus.Infof("error setting compression concurrency threads to 1: %v; ignoring", err)
+ }
metadata := storage.NewJSONPacker(compressor)
uncompressed, err := archive.DecompressStream(defragmented)
if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
index 34345d145..83bc8c34f 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
@@ -146,11 +146,11 @@ type IDMappings struct {
// using the data from /etc/sub{uid,gid} ranges, creates the
// proper uid and gid remapping ranges for that user/group pair
func NewIDMappings(username, groupname string) (*IDMappings, error) {
- subuidRanges, err := parseSubuid(username)
+ subuidRanges, err := readSubuid(username)
if err != nil {
return nil, err
}
- subgidRanges, err := parseSubgid(groupname)
+ subgidRanges, err := readSubgid(groupname)
if err != nil {
return nil, err
}
@@ -244,14 +244,6 @@ func createIDMap(subidRanges ranges) []IDMap {
return idMap
}
-func parseSubuid(username string) (ranges, error) {
- return parseSubidFile(subuidFileName, username)
-}
-
-func parseSubgid(username string) (ranges, error) {
- return parseSubidFile(subgidFileName, username)
-}
-
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
// and return all found ranges for a specified username. If the special value
// "ALL" is supplied for username, then all ranges in the file will be returned
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
new file mode 100644
index 000000000..db50a62e4
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
@@ -0,0 +1,61 @@
+// +build linux,cgo,libsubid
+
+package idtools
+
+import (
+ "unsafe"
+
+ "github.com/pkg/errors"
+)
+
+/*
+#cgo LDFLAGS: -l subid
+#include <shadow/subid.h>
+#include <stdlib.h>
+const char *Prog = "storage";
+struct subid_range get_range(struct subid_range *ranges, int i)
+{
+ return ranges[i];
+}
+*/
+import "C"
+
+func readSubid(username string, isUser bool) (ranges, error) {
+ var ret ranges
+ if username == "ALL" {
+ return nil, errors.New("username ALL not supported")
+ }
+
+ cUsername := C.CString(username)
+ defer C.free(unsafe.Pointer(cUsername))
+
+ var nRanges C.int
+ var cRanges *C.struct_subid_range
+ if isUser {
+ nRanges = C.get_subuid_ranges(cUsername, &cRanges)
+ } else {
+ nRanges = C.get_subgid_ranges(cUsername, &cRanges)
+ }
+ if nRanges < 0 {
+ return nil, errors.New("cannot read subids")
+ }
+ defer C.free(unsafe.Pointer(cRanges))
+
+ for i := 0; i < int(nRanges); i++ {
+ r := C.get_range(cRanges, C.int(i))
+ newRange := subIDRange{
+ Start: int(r.start),
+ Length: int(r.count),
+ }
+ ret = append(ret, newRange)
+ }
+ return ret, nil
+}
+
+func readSubuid(username string) (ranges, error) {
+ return readSubid(username, true)
+}
+
+func readSubgid(username string) (ranges, error) {
+ return readSubid(username, false)
+}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go
new file mode 100644
index 000000000..84da1b764
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go
@@ -0,0 +1,11 @@
+// +build !linux !libsubid !cgo
+
+package idtools
+
+func readSubuid(username string) (ranges, error) {
+ return parseSubidFile(subuidFileName, username)
+}
+
+func readSubgid(username string) (ranges, error) {
+ return parseSubidFile(subgidFileName, username)
+}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
index 9da7975e2..3dd7bf210 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
@@ -91,7 +91,7 @@ func createSubordinateRanges(name string) error {
// first, we should verify that ranges weren't automatically created
// by the distro tooling
- ranges, err := parseSubuid(name)
+ ranges, err := readSubuid(name)
if err != nil {
return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
}
@@ -107,7 +107,7 @@ func createSubordinateRanges(name string) error {
}
}
- ranges, err = parseSubgid(name)
+ ranges, err = readSubgid(name)
if err != nil {
return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
}
@@ -126,7 +126,7 @@ func createSubordinateRanges(name string) error {
}
func findNextUIDRange() (int, error) {
- ranges, err := parseSubuid("ALL")
+ ranges, err := readSubuid("ALL")
if err != nil {
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
}
@@ -135,7 +135,7 @@ func findNextUIDRange() (int, error) {
}
func findNextGIDRange() (int, error) {
- ranges, err := parseSubgid("ALL")
+ ranges, err := readSubgid("ALL")
if err != nil {
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
}
diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go
index 4d62b151a..b7ab07342 100644
--- a/vendor/github.com/containers/storage/types/utils.go
+++ b/vendor/github.com/containers/storage/types/utils.go
@@ -155,8 +155,14 @@ func getRootlessUID() int {
}
func expandEnvPath(path string, rootlessUID int) (string, error) {
+ var err error
path = strings.Replace(path, "$UID", strconv.Itoa(rootlessUID), -1)
- return filepath.Clean(os.ExpandEnv(path)), nil
+ path = os.ExpandEnv(path)
+ newpath, err := filepath.EvalSymlinks(path)
+ if err != nil {
+ newpath = filepath.Clean(path)
+ }
+ return newpath, nil
}
func DefaultConfigFile(rootless bool) (string, error) {
diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes
new file mode 100644
index 000000000..402433593
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/.gitattributes
@@ -0,0 +1,2 @@
+* -text
+*.bin -text -diff
diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore
new file mode 100644
index 000000000..b35f8449b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+/s2/cmd/_s2sx/sfx-exe
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
new file mode 100644
index 000000000..c9014ce1d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -0,0 +1,137 @@
+# This is an example goreleaser.yaml file with some sane defaults.
+# Make sure to check the documentation at http://goreleaser.com
+before:
+ hooks:
+ - ./gen.sh
+
+builds:
+ -
+ id: "s2c"
+ binary: s2c
+ main: ./s2/cmd/s2c/main.go
+ flags:
+ - -trimpath
+ env:
+ - CGO_ENABLED=0
+ goos:
+ - aix
+ - linux
+ - freebsd
+ - netbsd
+ - windows
+ - darwin
+ goarch:
+ - 386
+ - amd64
+ - arm
+ - arm64
+ - ppc64
+ - ppc64le
+ - mips64
+ - mips64le
+ goarm:
+ - 7
+ -
+ id: "s2d"
+ binary: s2d
+ main: ./s2/cmd/s2d/main.go
+ flags:
+ - -trimpath
+ env:
+ - CGO_ENABLED=0
+ goos:
+ - aix
+ - linux
+ - freebsd
+ - netbsd
+ - windows
+ - darwin
+ goarch:
+ - 386
+ - amd64
+ - arm
+ - arm64
+ - ppc64
+ - ppc64le
+ - mips64
+ - mips64le
+ goarm:
+ - 7
+ -
+ id: "s2sx"
+ binary: s2sx
+ main: ./s2/cmd/_s2sx/main.go
+ flags:
+ - -modfile=s2sx.mod
+ - -trimpath
+ env:
+ - CGO_ENABLED=0
+ goos:
+ - aix
+ - linux
+ - freebsd
+ - netbsd
+ - windows
+ - darwin
+ goarch:
+ - 386
+ - amd64
+ - arm
+ - arm64
+ - ppc64
+ - ppc64le
+ - mips64
+ - mips64le
+ goarm:
+ - 7
+
+archives:
+ -
+ id: s2-binaries
+ name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
+ replacements:
+ aix: AIX
+ darwin: OSX
+ linux: Linux
+ windows: Windows
+ 386: i386
+ amd64: x86_64
+ freebsd: FreeBSD
+ netbsd: NetBSD
+ format_overrides:
+ - goos: windows
+ format: zip
+ files:
+ - unpack/*
+ - s2/LICENSE
+ - s2/README.md
+checksum:
+ name_template: 'checksums.txt'
+snapshot:
+ name_template: "{{ .Tag }}-next"
+changelog:
+ sort: asc
+ filters:
+ exclude:
+ - '^doc:'
+ - '^docs:'
+ - '^test:'
+ - '^tests:'
+ - '^Update\sREADME.md'
+
+nfpms:
+ -
+ file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
+ vendor: Klaus Post
+ homepage: https://github.com/klauspost/compress
+ maintainer: Klaus Post <klauspost@gmail.com>
+ description: S2 Compression Tool
+ license: BSD 3-Clause
+ formats:
+ - deb
+ - rpm
+ replacements:
+ darwin: Darwin
+ linux: Linux
+ freebsd: FreeBSD
+ amd64: x86_64
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
new file mode 100644
index 000000000..d6a26466c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -0,0 +1,426 @@
+# compress
+
+This package provides various compression algorithms.
+
+* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go.
+* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy.
+* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
+* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams.
+* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
+* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
+* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
+* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.
+
+[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
+[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)
+[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)
+
+# changelog
+
+* Aug 3, 2021 (v1.13.3)
+
+ * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404)
+ * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411)
+ * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406)
+ * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399)
+ * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401)
+ * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410)
+
+* Jun 14, 2021 (v1.13.1)
+
+ * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396)
+ * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394)
+ * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389)
+ * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395)
+
+* Jun 3, 2021 (v1.13.0)
+ * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
+ * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
+ * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
+
+* May 25, 2021 (v1.12.3)
+ * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
+ * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
+ * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373)
+
+* Apr 27, 2021 (v1.12.2)
+ * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365)
+ * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363)
+ * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367)
+ * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358)
+ * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362)
+ * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368)
+
+* Apr 14, 2021 (v1.12.1)
+ * snappy package removed. Upstream added as dependency.
+ * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353)
+ * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352)
+ * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348)
+ * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
+ * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
+ * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
+
+* Mar 26, 2021 (v1.11.13)
+ * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
+ * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336)
+ * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338)
+ * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341)
+
+* Mar 5, 2021 (v1.11.12)
+ * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives).
+ * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328)
+
+* Mar 1, 2021 (v1.11.9)
+ * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324)
+ * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325)
+ * s2: Fix binaries.
+
+* Feb 25, 2021 (v1.11.8)
+ * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
+ * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
+ * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
+ * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
+ * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313)
+
+* Jan 14, 2021 (v1.11.7)
+ * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309)
+ * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310)
+ * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311)
+ * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308)
+ * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312)
+
+* Jan 7, 2021 (v1.11.6)
+ * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306)
+ * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305)
+
+* Dec 20, 2020 (v1.11.4)
+ * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304)
+ * Add header decoder [#299](https://github.com/klauspost/compress/pull/299)
+ * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297)
+ * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300)
+ * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303)
+
+* Nov 15, 2020 (v1.11.3)
+ * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293)
+ * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295)
+
+* Oct 11, 2020 (v1.11.2)
+ * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291)
+
+* Oct 1, 2020 (v1.11.1)
+ * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286)
+
+* Sept 8, 2020 (v1.11.0)
+ * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281)
+ * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282)
+ * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274)
+
+<details>
+ <summary>See changes prior to v1.11.0</summary>
+
+* July 8, 2020 (v1.10.11)
+ * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
+ * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275)
+
+* June 23, 2020 (v1.10.10)
+ * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270)
+
+* June 16, 2020 (v1.10.9):
+ * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268)
+ * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266)
+ * Fuzzit tests removed. The service has been purchased and is no longer available.
+
+* June 5, 2020 (v1.10.8):
+ * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265)
+
+* June 1, 2020 (v1.10.7):
+ * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries)
+ * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259)
+ * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263)
+
+* May 21, 2020: (v1.10.6)
+ * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252)
+ * zstd: Stricter decompression checks.
+
+* April 12, 2020: (v1.10.5)
+ * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239)
+
+* Apr 8, 2020: (v1.10.4)
+ * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247)
+* Mar 11, 2020: (v1.10.3)
+ * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245)
+ * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244)
+ * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240)
+ * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241)
+ * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238)
+
+* Feb 27, 2020: (v1.10.2)
+ * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232)
+ * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227)
+
+* Feb 18, 2020: (v1.10.1)
+ * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226)
+ * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224)
+ * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224)
+
+* Feb 4, 2020: (v1.10.0)
+ * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216)
+ * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218)
+ * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214)
+ * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186)
+
+</details>
+
+<details>
+ <summary>See changes prior to v1.10.0</summary>
+
+* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206).
+* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204)
+* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed.
+* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases.
+* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192)
+* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder.
+* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199)
+* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features
+* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197)
+* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198)
+* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit.
+* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191)
+* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188)
+* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187)
+* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines.
+* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate.
+* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184)
+* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate.
+* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180)
+* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB.
+* Nov 11, 2019: Reduce inflate memory use by 1KB.
+* Nov 10, 2019: Less allocations in deflate bit writer.
+* Nov 10, 2019: Fix inconsistent error returned by zstd decoder.
+* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174)
+* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173)
+* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172)
+* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105)
+
+</details>
+
+<details>
+ <summary>See changes prior to v1.9.0</summary>
+
+* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169)
+* Oct 3, 2019: Fix inconsistent results on broken zstd streams.
+* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools)
+* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools).
+* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip).
+* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes).
+* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option.
+* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables.
+* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode.
+* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding.
+* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy.
+* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing.
+* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing.
+* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147)
+* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146)
+* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144)
+* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142)
+* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder.
+* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder.
+* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content.
+* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix.
+* June 17, 2019: zstd decompression bugfix.
+* June 17, 2019: fix 32 bit builds.
+* June 17, 2019: Easier use in modules (less dependencies).
+* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio.
+* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression.
+* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels.
+* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression!
+* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels.
+* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added.
+* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression).
+* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below.
+* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0).
+* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change.
+* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change.
+* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function.
+* May 28, 2017: Reduce allocations when resetting decoder.
+* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7.
+* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625).
+* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before.
+* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update.
+* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level.
+* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression.
+* Mar 24, 2016: Small speedup for level 1-3.
+* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
+* Feb 19, 2016: Handle small payloads faster in level 1-3.
+* Feb 19, 2016: Added faster level 2 + 3 compression modes.
+* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
+* Feb 14, 2016: Snappy: Merge upstream changes.
+* Feb 14, 2016: Snappy: Fix aggressive skipping.
+* Feb 14, 2016: Snappy: Update benchmark.
+* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression.
+* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%.
+* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content.
+* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup.
+* Jan 16, 2016: Optimization on deflate level 1,2,3 compression.
+* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives.
+* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs.
+* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms.
+* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update!
+* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet).
+* Nov 20 2015: Small optimization to bit writer on 64 bit systems.
+* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15).
+* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate.
+* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file
+* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x.
+
+</details>
+
+# deflate usage
+
+* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
+* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
+* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
+* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
+
+The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
+
+| old import | new import | Documentation
+|--------------------|-----------------------------------------|--------------------|
+| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
+| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
+| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
+| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
+
+* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
+
+You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
+
+The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
+
+Currently there is only minor speedup on decompression (mostly CRC32 calculation).
+
+Memory usage is typically 1MB for a Writer. stdlib is in the same range.
+If you expect to have a lot of concurrently allocated Writers consider using
+the stateless compress described below.
+
+# Stateless compression
+
+This package offers stateless compression as a special option for gzip/deflate.
+It will do compression but without maintaining any state between Write calls.
+
+This means there will be no memory kept between Write calls, but compression and speed will be suboptimal.
+
+This is only relevant in cases where you expect to run many thousands of compressors concurrently,
+but with very little activity. This is *not* intended for regular web servers serving individual requests.
+
+Because of this, the size of actual Write calls will affect output size.
+
+In gzip, specify level `-3` / `gzip.StatelessCompression` to enable.
+
+For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter)
+
+A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer:
+
+```
+ // replace 'ioutil.Discard' with your output.
+ gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression)
+ if err != nil {
+ return err
+ }
+ defer gzw.Close()
+
+ w := bufio.NewWriterSize(gzw, 4096)
+ defer w.Flush()
+
+ // Write to 'w'
+```
+
+This will only use up to 4KB in memory when the writer is idle.
+
+Compression is almost always worse than the fastest compression level
+and each write will allocate (a little) memory.
+
+# Performance Update 2018
+
+It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
+
+The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
+
+The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
+
+The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
+
+
+## Overall differences.
+
+There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
+
+The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
+
+This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
+
+There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
+
+## Web Content
+
+This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
+
+Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
+
+Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
+
+## Object files
+
+This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
+
+The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
+
+The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
+
+## Highly Compressible File
+
+This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
+
+It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
+
+So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
+
+## Medium-High Compressible
+
+This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
+
+We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
+
+## Medium Compressible
+
+I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
+
+The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
+
+
+## Un-compressible Content
+
+This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
+
+
+## Huffman only compression
+
+This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
+
+This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
+
+Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
+
+The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
+
+The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
+
+For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
+
+This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
+
+
+# license
+
+This code is licensed under the same conditions as the original Go code. See LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go
new file mode 100644
index 000000000..ea5a692d5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/compressible.go
@@ -0,0 +1,85 @@
+package compress
+
+import "math"
+
+// Estimate returns a normalized compressibility estimate of block b.
+// Values close to zero are likely uncompressible.
+// Values above 0.1 are likely to be compressible.
+// Values above 0.5 are very compressible.
+// Very small lengths will return 0.
+func Estimate(b []byte) float64 {
+ if len(b) < 16 {
+ return 0
+ }
+
+ // Correctly predicted order 1
+ hits := 0
+ lastMatch := false
+ var o1 [256]byte
+ var hist [256]int
+ c1 := byte(0)
+ for _, c := range b {
+ if c == o1[c1] {
+ // We only count a hit if there was two correct predictions in a row.
+ if lastMatch {
+ hits++
+ }
+ lastMatch = true
+ } else {
+ lastMatch = false
+ }
+ o1[c1] = c
+ c1 = c
+ hist[c]++
+ }
+
+ // Use x^0.6 to give better spread
+ prediction := math.Pow(float64(hits)/float64(len(b)), 0.6)
+
+ // Calculate histogram distribution
+ variance := float64(0)
+ avg := float64(len(b)) / 256
+
+ for _, v := range hist {
+ Δ := float64(v) - avg
+ variance += Δ * Δ
+ }
+
+ stddev := math.Sqrt(float64(variance)) / float64(len(b))
+ exp := math.Sqrt(1 / float64(len(b)))
+
+ // Subtract expected stddev
+ stddev -= exp
+ if stddev < 0 {
+ stddev = 0
+ }
+ stddev *= 1 + exp
+
+ // Use x^0.4 to give better spread
+ entropy := math.Pow(stddev, 0.4)
+
+ // 50/50 weight between prediction and histogram distribution
+ return math.Pow((prediction+entropy)/2, 0.9)
+}
+
+// ShannonEntropyBits returns the number of bits minimum required to represent
+// an entropy encoding of the input bytes.
+// https://en.wiktionary.org/wiki/Shannon_entropy
+func ShannonEntropyBits(b []byte) int {
+ if len(b) == 0 {
+ return 0
+ }
+ var hist [256]int
+ for _, c := range b {
+ hist[c]++
+ }
+ shannon := float64(0)
+ invTotal := 1.0 / float64(len(b))
+ for _, v := range hist[:] {
+ if v > 0 {
+ n := float64(v)
+ shannon += math.Ceil(-math.Log2(n*invTotal) * n)
+ }
+ }
+ return int(math.Ceil(shannon))
+}
diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh
new file mode 100644
index 000000000..aff942205
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/gen.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+cd s2/cmd/_s2sx/ || exit 1
+go generate .
diff --git a/vendor/github.com/klauspost/compress/go.mod b/vendor/github.com/klauspost/compress/go.mod
new file mode 100644
index 000000000..2a4f0a41f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/go.mod
@@ -0,0 +1,5 @@
+module github.com/klauspost/compress
+
+go 1.13
+
+require github.com/golang/snappy v0.0.3 // indirect
diff --git a/vendor/github.com/klauspost/compress/go.sum b/vendor/github.com/klauspost/compress/go.sum
new file mode 100644
index 000000000..73204cafa
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/go.sum
@@ -0,0 +1,2 @@
+github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index 0823c928c..8323dc053 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -161,6 +161,70 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
return s.Out, false, nil
}
+// EstimateSizes will estimate the data sizes
+func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) {
+ s, err = s.prepare(in)
+ if err != nil {
+ return 0, 0, 0, err
+ }
+
+ // Create histogram, if none was provided.
+ tableSz, dataSz, reuseSz = -1, -1, -1
+ maxCount := s.maxCount
+ var canReuse = false
+ if maxCount == 0 {
+ maxCount, canReuse = s.countSimple(in)
+ } else {
+ canReuse = s.canUseTable(s.prevTable)
+ }
+
+ // We want the output size to be less than this:
+ wantSize := len(in)
+ if s.WantLogLess > 0 {
+ wantSize -= wantSize >> s.WantLogLess
+ }
+
+ // Reset for next run.
+ s.clearCount = true
+ s.maxCount = 0
+ if maxCount >= len(in) {
+ if maxCount > len(in) {
+ return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in))
+ }
+ if len(in) == 1 {
+ return 0, 0, 0, ErrIncompressible
+ }
+ // One symbol, use RLE
+ return 0, 0, 0, ErrUseRLE
+ }
+ if maxCount == 1 || maxCount < (len(in)>>7) {
+ // Each symbol present maximum once or too well distributed.
+ return 0, 0, 0, ErrIncompressible
+ }
+
+ // Calculate new table.
+ err = s.buildCTable()
+ if err != nil {
+ return 0, 0, 0, err
+ }
+
+ if false && !s.canUseTable(s.cTable) {
+ panic("invalid table generated")
+ }
+
+ tableSz, err = s.cTable.estTableSize(s)
+ if err != nil {
+ return 0, 0, 0, err
+ }
+ if canReuse {
+ reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen])
+ }
+ dataSz = s.cTable.estimateSize(s.count[:s.symbolLen])
+
+ // Restore
+ return tableSz, dataSz, reuseSz, nil
+}
+
func (s *Scratch) compress1X(src []byte) ([]byte, error) {
return s.compress1xDo(s.Out, src)
}
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index 7ec2022b6..3ee00ecb4 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -245,6 +245,68 @@ func (c cTable) write(s *Scratch) error {
return nil
}
+func (c cTable) estTableSize(s *Scratch) (sz int, err error) {
+ var (
+ // precomputed conversion table
+ bitsToWeight [tableLogMax + 1]byte
+ huffLog = s.actualTableLog
+ // last weight is not saved.
+ maxSymbolValue = uint8(s.symbolLen - 1)
+ huffWeight = s.huffWeight[:256]
+ )
+ const (
+ maxFSETableLog = 6
+ )
+ // convert to weight
+ bitsToWeight[0] = 0
+ for n := uint8(1); n < huffLog+1; n++ {
+ bitsToWeight[n] = huffLog + 1 - n
+ }
+
+ // Acquire histogram for FSE.
+ hist := s.fse.Histogram()
+ hist = hist[:256]
+ for i := range hist[:16] {
+ hist[i] = 0
+ }
+ for n := uint8(0); n < maxSymbolValue; n++ {
+ v := bitsToWeight[c[n].nBits] & 15
+ huffWeight[n] = v
+ hist[v]++
+ }
+
+ // FSE compress if feasible.
+ if maxSymbolValue >= 2 {
+ huffMaxCnt := uint32(0)
+ huffMax := uint8(0)
+ for i, v := range hist[:16] {
+ if v == 0 {
+ continue
+ }
+ huffMax = byte(i)
+ if v > huffMaxCnt {
+ huffMaxCnt = v
+ }
+ }
+ s.fse.HistogramFinished(huffMax, int(huffMaxCnt))
+ s.fse.TableLog = maxFSETableLog
+ b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse)
+ if err == nil && len(b) < int(s.symbolLen>>1) {
+ sz += 1 + len(b)
+ return sz, nil
+ }
+ // Unable to compress (RLE/uncompressible)
+ }
+ // write raw values as 4-bits (max : 15)
+ if maxSymbolValue > (256 - 128) {
+ // should not happen : likely means source cannot be compressed
+ return 0, ErrIncompressible
+ }
+ // special case, pack weights 4 bits/weight.
+ sz += 1 + int(maxSymbolValue/2)
+ return sz, nil
+}
+
// estimateSize returns the estimated size in bytes of the input represented in the
// histogram supplied.
func (c cTable) estimateSize(hist []uint32) int {
diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod
new file mode 100644
index 000000000..a9faf7a02
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2sx.mod
@@ -0,0 +1,5 @@
+module github.com/klauspost/compress
+
+go 1.16
+
+require github.com/golang/snappy v0.0.3 // indirect
diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum
new file mode 100644
index 000000000..73204cafa
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2sx.sum
@@ -0,0 +1,2 @@
+github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index 787813fa9..c8f0f16fc 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -152,7 +152,7 @@ file out level insize outsize millis mb/s
silesia.tar zskp 1 211947520 73101992 643 313.87
silesia.tar zskp 2 211947520 67504318 969 208.38
silesia.tar zskp 3 211947520 64595893 2007 100.68
-silesia.tar zskp 4 211947520 60995370 7691 26.28
+silesia.tar zskp 4 211947520 60995370 8825 22.90
cgo zstd:
silesia.tar zstd 1 211947520 73605392 543 371.56
@@ -162,7 +162,7 @@ silesia.tar zstd 9 211947520 60212393 5063 39.92
gzip, stdlib/this package:
silesia.tar gzstd 1 211947520 80007735 1654 122.21
-silesia.tar gzkp 1 211947520 80369488 1168 173.06
+silesia.tar gzkp 1 211947520 80136201 1152 175.45
GOB stream of binary data. Highly compressible.
https://files.klauspost.com/compress/gob-stream.7z
@@ -171,13 +171,15 @@ file out level insize outsize millis mb/s
gob-stream zskp 1 1911399616 235022249 3088 590.30
gob-stream zskp 2 1911399616 205669791 3786 481.34
gob-stream zskp 3 1911399616 175034659 9636 189.17
-gob-stream zskp 4 1911399616 167273881 29337 62.13
+gob-stream zskp 4 1911399616 165609838 50369 36.19
+
gob-stream zstd 1 1911399616 249810424 2637 691.26
gob-stream zstd 3 1911399616 208192146 3490 522.31
gob-stream zstd 6 1911399616 193632038 6687 272.56
gob-stream zstd 9 1911399616 177620386 16175 112.70
+
gob-stream gzstd 1 1911399616 357382641 10251 177.82
-gob-stream gzkp 1 1911399616 362156523 5695 320.08
+gob-stream gzkp 1 1911399616 359753026 5438 335.20
The test data for the Large Text Compression Benchmark is the first
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
@@ -187,11 +189,13 @@ file out level insize outsize millis mb/s
enwik9 zskp 1 1000000000 343848582 3609 264.18
enwik9 zskp 2 1000000000 317276632 5746 165.97
enwik9 zskp 3 1000000000 292243069 12162 78.41
-enwik9 zskp 4 1000000000 275241169 36430 26.18
+enwik9 zskp 4 1000000000 262183768 82837 11.51
+
enwik9 zstd 1 1000000000 358072021 3110 306.65
enwik9 zstd 3 1000000000 313734672 4784 199.35
enwik9 zstd 6 1000000000 295138875 10290 92.68
enwik9 zstd 9 1000000000 278348700 28549 33.40
+
enwik9 gzstd 1 1000000000 382578136 9604 99.30
enwik9 gzkp 1 1000000000 383825945 6544 145.73
@@ -202,13 +206,15 @@ file out level insize outsize millis mb/s
github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75
-github-june-2days-2019.json zskp 4 6273951764 503314661 93811 63.78
+github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16
+
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
+
github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79
-github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03
+github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61
VM Image, Linux mint with a few installed applications:
https://files.klauspost.com/compress/rawstudio-mint14.7z
@@ -217,13 +223,15 @@ file out level insize outsize millis mb/s
rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08
-rawstudio-mint14.tar zskp 4 8558382592 3020370044 404956 20.16
+rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52
+
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
+
rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40
-rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49
+rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92
CSV data:
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
@@ -232,13 +240,15 @@ file out level insize outsize millis mb/s
nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66
-nyc-taxi-data-10M.csv zskp 4 3325605752 490907191 65939 48.10
+nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33
+
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
+
nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83
-nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53
+nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00
```
## Decompressor
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index 4d984c3b2..f430f58b5 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -260,9 +260,10 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
if len(d.current.b) > 0 {
n2, err2 := w.Write(d.current.b)
n += int64(n2)
- if err2 != nil && d.current.err == nil {
+ if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) {
d.current.err = err2
- break
+ } else if n2 != len(d.current.b) {
+ d.current.err = io.ErrShortWrite
}
}
if d.current.err != nil {
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
index 60f298648..295cd602a 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_base.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -38,8 +38,8 @@ func (e *fastBase) AppendCRC(dst []byte) []byte {
// WindowSize returns the window size of the encoder,
// or a window size small enough to contain the input size, if > 0.
-func (e *fastBase) WindowSize(size int) int32 {
- if size > 0 && size < int(e.maxMatchOff) {
+func (e *fastBase) WindowSize(size int64) int32 {
+ if size > 0 && size < int64(e.maxMatchOff) {
b := int32(1) << uint(bits.Len(uint(size)))
// Keep minimum window.
if b < 1024 {
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
index b7d4b9004..96028ecd8 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_best.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -5,22 +5,61 @@
package zstd
import (
+ "bytes"
"fmt"
- "math/bits"
+
+ "github.com/klauspost/compress"
)
const (
- bestLongTableBits = 20 // Bits used in the long match table
+ bestLongTableBits = 22 // Bits used in the long match table
bestLongTableSize = 1 << bestLongTableBits // Size of the table
+ bestLongLen = 8 // Bytes used for table hash
// Note: Increasing the short table bits or making the hash shorter
// can actually lead to compression degradation since it will 'steal' more from the
// long match table and match offsets are quite big.
// This greatly depends on the type of input.
- bestShortTableBits = 16 // Bits used in the short match table
+ bestShortTableBits = 18 // Bits used in the short match table
bestShortTableSize = 1 << bestShortTableBits // Size of the table
+ bestShortLen = 4 // Bytes used for table hash
+
)
+type match struct {
+ offset int32
+ s int32
+ length int32
+ rep int32
+ est int32
+}
+
+const highScore = 25000
+
+// estBits will estimate output bits from predefined tables.
+func (m *match) estBits(bitsPerByte int32) {
+ mlc := mlCode(uint32(m.length - zstdMinMatch))
+ var ofc uint8
+ if m.rep < 0 {
+ ofc = ofCode(uint32(m.s-m.offset) + 3)
+ } else {
+ ofc = ofCode(uint32(m.rep))
+ }
+ // Cost, excluding
+ ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
+
+ // Add cost of match encoding...
+ m.est = int32(ofTT.outBits + mlTT.outBits)
+ m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16)
+ // Subtract savings compared to literal encoding...
+ m.est -= (m.length * bitsPerByte) >> 10
+ if m.est > 0 {
+ // Unlikely gain..
+ m.length = 0
+ m.est = highScore
+ }
+}
+
// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
// The long match table contains the previous entry with the same hash,
// effectively making it a "chain" of length 2.
@@ -109,6 +148,14 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
return
}
+ // Use this to estimate literal cost.
+ // Scaled by 10 bits.
+ bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src))
+ // Huffman can never go < 1 bit/byte
+ if bitsPerByte < 1024 {
+ bitsPerByte = 1024
+ }
+
// Override src
src = e.hist
sLimit := int32(len(src)) - inputMargin
@@ -145,51 +192,49 @@ encodeLoop:
panic("offset0 was 0")
}
- type match struct {
- offset int32
- s int32
- length int32
- rep int32
- }
- matchAt := func(offset int32, s int32, first uint32, rep int32) match {
- if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
- return match{offset: offset, s: s}
- }
- return match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
- }
-
bestOf := func(a, b match) match {
- aScore := b.s - a.s + a.length
- bScore := a.s - b.s + b.length
- if a.rep < 0 {
- aScore = aScore - int32(bits.Len32(uint32(a.offset)))/8
- }
- if b.rep < 0 {
- bScore = bScore - int32(bits.Len32(uint32(b.offset)))/8
- }
- if aScore >= bScore {
+ if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 {
return a
}
return b
}
const goodEnough = 100
- nextHashL := hash8(cv, bestLongTableBits)
- nextHashS := hash4x64(cv, bestShortTableBits)
+ nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
+ nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
+ matchAt := func(offset int32, s int32, first uint32, rep int32) match {
+ if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
+ return match{s: s, est: highScore}
+ }
+ if debugAsserts {
+ if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
+ panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
+ }
+ }
+ m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
+ m.estBits(bitsPerByte)
+ return m
+ }
+
best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
+
if canRepeat && best.length < goodEnough {
- best = bestOf(best, matchAt(s-offset1+1, s+1, uint32(cv>>8), 1))
- best = bestOf(best, matchAt(s-offset2+1, s+1, uint32(cv>>8), 2))
- best = bestOf(best, matchAt(s-offset3+1, s+1, uint32(cv>>8), 3))
+ cv32 := uint32(cv >> 8)
+ spp := s + 1
+ best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
+ best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
+ best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
if best.length > 0 {
- best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1))
- best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2))
- best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3))
+ cv32 = uint32(cv >> 24)
+ spp += 2
+ best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
+ best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
+ best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
}
}
// Load next and check...
@@ -209,22 +254,28 @@ encodeLoop:
}
s++
- candidateS = e.table[hash4x64(cv>>8, bestShortTableBits)]
+ candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
cv = load6432(src, s)
cv2 := load6432(src, s+1)
- candidateL = e.longTable[hash8(cv, bestLongTableBits)]
- candidateL2 := e.longTable[hash8(cv2, bestLongTableBits)]
+ candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
+ candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
+ // Short at s+1
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
+ // Long at s+1, s+2
best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
-
+ if false {
+ // Short at s+3.
+ // Too often worse...
+ best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1))
+ }
// See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match.
if sAt := best.s + best.length; sAt < sLimit {
- nextHashL := hash8(load6432(src, sAt), bestLongTableBits)
+ nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
candidateEnd := e.longTable[nextHashL]
if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
@@ -236,6 +287,12 @@ encodeLoop:
}
}
+ if debugAsserts {
+ if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) {
+ panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]))
+ }
+ }
+
// We have a match, we can store the forward value
if best.rep > 0 {
s = best.s
@@ -284,8 +341,8 @@ encodeLoop:
off := index0 + e.cur
for index0 < s-1 {
cv0 := load6432(src, index0)
- h0 := hash8(cv0, bestLongTableBits)
- h1 := hash4x64(cv0, bestShortTableBits)
+ h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
+ h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
off++
@@ -311,7 +368,7 @@ encodeLoop:
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
- if debugAsserts && canRepeat && int(offset1) > len(src) {
+ if debugAsserts && int(offset1) > len(src) {
panic("invalid offset")
}
@@ -352,8 +409,8 @@ encodeLoop:
// every entry
for index0 < s-1 {
cv0 := load6432(src, index0)
- h0 := hash8(cv0, bestLongTableBits)
- h1 := hash4x64(cv0, bestShortTableBits)
+ h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
+ h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
@@ -374,8 +431,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hash4x64(cv, bestShortTableBits)
- nextHashL := hash8(cv, bestLongTableBits)
+ nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
+ nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -425,7 +482,7 @@ func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
e.Encode(blk, src)
}
-// ResetDict will reset and set a dictionary if not nil
+// Reset will reset and set a dictionary if not nil
func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
e.resetBase(d, singleBlock)
if d == nil {
@@ -441,10 +498,10 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
const hashLog = bestShortTableBits
cv := load6432(d.content, i-e.maxMatchOff)
- nextHash := hash4x64(cv, hashLog) // 0 -> 4
- nextHash1 := hash4x64(cv>>8, hashLog) // 1 -> 5
- nextHash2 := hash4x64(cv>>16, hashLog) // 2 -> 6
- nextHash3 := hash4x64(cv>>24, hashLog) // 3 -> 7
+ nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4
+ nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5
+ nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6
+ nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7
e.dictTable[nextHash] = prevEntry{
prev: e.dictTable[nextHash].offset,
offset: i,
@@ -472,7 +529,7 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
}
if len(d.content) >= 8 {
cv := load6432(d.content, 0)
- h := hash8(cv, bestLongTableBits)
+ h := hashLen(cv, bestLongTableBits, bestLongLen)
e.dictLongTable[h] = prevEntry{
offset: e.maxMatchOff,
prev: e.dictLongTable[h].offset,
@@ -482,7 +539,7 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
off := 8 // First to read
for i := e.maxMatchOff + 1; i < end; i++ {
cv = cv>>8 | (uint64(d.content[off]) << 56)
- h := hash8(cv, bestLongTableBits)
+ h := hashLen(cv, bestLongTableBits, bestLongLen)
e.dictLongTable[h] = prevEntry{
offset: i,
prev: e.dictLongTable[h].offset,
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index eab7b5083..602c05ee0 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -9,6 +9,7 @@ import "fmt"
const (
betterLongTableBits = 19 // Bits used in the long match table
betterLongTableSize = 1 << betterLongTableBits // Size of the table
+ betterLongLen = 8 // Bytes used for table hash
// Note: Increasing the short table bits or making the hash shorter
// can actually lead to compression degradation since it will 'steal' more from the
@@ -16,6 +17,7 @@ const (
// This greatly depends on the type of input.
betterShortTableBits = 13 // Bits used in the short match table
betterShortTableSize = 1 << betterShortTableBits // Size of the table
+ betterShortLen = 5 // Bytes used for table hash
betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table
betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard
@@ -154,8 +156,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hash5(cv, betterShortTableBits)
- nextHashL := hash8(cv, betterLongTableBits)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
+ nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -214,10 +216,10 @@ encodeLoop:
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
- h0 := hash8(cv0, betterLongTableBits)
+ h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
- e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2
}
cv = load6432(src, s)
@@ -275,10 +277,10 @@ encodeLoop:
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
- h0 := hash8(cv0, betterLongTableBits)
+ h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
- e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2
}
cv = load6432(src, s)
@@ -353,7 +355,7 @@ encodeLoop:
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
- nextHashL = hash8(cv, betterLongTableBits)
+ nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = candidateL.offset - e.cur
@@ -413,8 +415,8 @@ encodeLoop:
}
// Try to find a better match by searching for a long match at the end of the current best match
- if true && s+matched < sLimit {
- nextHashL := hash8(load6432(src, s+matched), betterLongTableBits)
+ if s+matched < sLimit {
+ nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
cv := load3232(src, s)
candidateL := e.longTable[nextHashL]
coffsetL := candidateL.offset - e.cur - matched
@@ -495,10 +497,10 @@ encodeLoop:
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
- h0 := hash8(cv0, betterLongTableBits)
+ h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
- e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2
}
@@ -516,8 +518,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hash5(cv, betterShortTableBits)
- nextHashL := hash8(cv, betterLongTableBits)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
+ nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -672,8 +674,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hash5(cv, betterShortTableBits)
- nextHashL := hash8(cv, betterLongTableBits)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
+ nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -734,11 +736,11 @@ encodeLoop:
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
- h0 := hash8(cv0, betterLongTableBits)
+ h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0)
- h1 := hash5(cv1, betterShortTableBits)
+ h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1)
index0 += 2
@@ -798,11 +800,11 @@ encodeLoop:
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
- h0 := hash8(cv0, betterLongTableBits)
+ h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0)
- h1 := hash5(cv1, betterShortTableBits)
+ h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1)
index0 += 2
@@ -879,7 +881,7 @@ encodeLoop:
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
- nextHashL = hash8(cv, betterLongTableBits)
+ nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = candidateL.offset - e.cur
@@ -940,7 +942,7 @@ encodeLoop:
}
// Try to find a better match by searching for a long match at the end of the current best match
if s+matched < sLimit {
- nextHashL := hash8(load6432(src, s+matched), betterLongTableBits)
+ nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
cv := load3232(src, s)
candidateL := e.longTable[nextHashL]
coffsetL := candidateL.offset - e.cur - matched
@@ -1021,11 +1023,11 @@ encodeLoop:
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
- h0 := hash8(cv0, betterLongTableBits)
+ h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0)
- h1 := hash5(cv1, betterShortTableBits)
+ h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1)
index0 += 2
@@ -1045,8 +1047,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hash5(cv, betterShortTableBits)
- nextHashL := hash8(cv, betterLongTableBits)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
+ nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -1113,10 +1115,10 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
const hashLog = betterShortTableBits
cv := load6432(d.content, i-e.maxMatchOff)
- nextHash := hash5(cv, hashLog) // 0 -> 4
- nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5
- nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6
- nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7
+ nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4
+ nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5
+ nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6
+ nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7
e.dictTable[nextHash] = tableEntry{
val: uint32(cv),
offset: i,
@@ -1145,7 +1147,7 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
}
if len(d.content) >= 8 {
cv := load6432(d.content, 0)
- h := hash8(cv, betterLongTableBits)
+ h := hashLen(cv, betterLongTableBits, betterLongLen)
e.dictLongTable[h] = prevEntry{
offset: e.maxMatchOff,
prev: e.dictLongTable[h].offset,
@@ -1155,7 +1157,7 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
off := 8 // First to read
for i := e.maxMatchOff + 1; i < end; i++ {
cv = cv>>8 | (uint64(d.content[off]) << 56)
- h := hash8(cv, betterLongTableBits)
+ h := hashLen(cv, betterLongTableBits, betterLongLen)
e.dictLongTable[h] = prevEntry{
offset: i,
prev: e.dictLongTable[h].offset,
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index 96b21b90e..d6b310424 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -10,6 +10,7 @@ const (
dFastLongTableBits = 17 // Bits used in the long match table
dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+ dFastLongLen = 8 // Bytes used for table hash
dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table
dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard
@@ -17,6 +18,8 @@ const (
dFastShortTableBits = tableBits // Bits used in the short match table
dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+ dFastShortLen = 5 // Bytes used for table hash
+
)
type doubleFastEncoder struct {
@@ -124,8 +127,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hash5(cv, dFastShortTableBits)
- nextHashL := hash8(cv, dFastLongTableBits)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -208,7 +211,7 @@ encodeLoop:
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
- nextHashL = hash8(cv, dFastLongTableBits)
+ nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt
@@ -304,16 +307,16 @@ encodeLoop:
cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
- e.longTable[hash8(cv0, dFastLongTableBits)] = te0
- e.longTable[hash8(cv1, dFastLongTableBits)] = te1
+ e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
+ e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
cv0 >>= 8
cv1 >>= 8
te0.offset++
te1.offset++
te0.val = uint32(cv0)
te1.val = uint32(cv1)
- e.table[hash5(cv0, dFastShortTableBits)] = te0
- e.table[hash5(cv1, dFastShortTableBits)] = te1
+ e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
+ e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
cv = load6432(src, s)
@@ -330,8 +333,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hash5(cv, dFastShortTableBits)
- nextHashL := hash8(cv, dFastLongTableBits)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -436,8 +439,8 @@ encodeLoop:
var t int32
for {
- nextHashS := hash5(cv, dFastShortTableBits)
- nextHashL := hash8(cv, dFastLongTableBits)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -521,7 +524,7 @@ encodeLoop:
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
- nextHashL = hash8(cv, dFastLongTableBits)
+ nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt
@@ -614,16 +617,16 @@ encodeLoop:
cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
- e.longTable[hash8(cv0, dFastLongTableBits)] = te0
- e.longTable[hash8(cv1, dFastLongTableBits)] = te1
+ e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
+ e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
cv0 >>= 8
cv1 >>= 8
te0.offset++
te1.offset++
te0.val = uint32(cv0)
te1.val = uint32(cv1)
- e.table[hash5(cv0, dFastShortTableBits)] = te0
- e.table[hash5(cv1, dFastShortTableBits)] = te1
+ e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
+ e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
cv = load6432(src, s)
@@ -640,8 +643,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hash5(cv1>>8, dFastShortTableBits)
- nextHashL := hash8(cv, dFastLongTableBits)
+ nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen)
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -782,8 +785,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hash5(cv, dFastShortTableBits)
- nextHashL := hash8(cv, dFastLongTableBits)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -868,7 +871,7 @@ encodeLoop:
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
- nextHashL = hash8(cv, dFastLongTableBits)
+ nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt
@@ -965,8 +968,8 @@ encodeLoop:
cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
- longHash1 := hash8(cv0, dFastLongTableBits)
- longHash2 := hash8(cv0, dFastLongTableBits)
+ longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
+ longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
e.longTable[longHash1] = te0
e.longTable[longHash2] = te1
e.markLongShardDirty(longHash1)
@@ -977,8 +980,8 @@ encodeLoop:
te1.offset++
te0.val = uint32(cv0)
te1.val = uint32(cv1)
- hashVal1 := hash5(cv0, dFastShortTableBits)
- hashVal2 := hash5(cv1, dFastShortTableBits)
+ hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen)
+ hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen)
e.table[hashVal1] = te0
e.markShardDirty(hashVal1)
e.table[hashVal2] = te1
@@ -999,8 +1002,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hash5(cv, dFastShortTableBits)
- nextHashL := hash8(cv, dFastLongTableBits)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -1071,14 +1074,14 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
}
if len(d.content) >= 8 {
cv := load6432(d.content, 0)
- e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{
+ e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
val: uint32(cv),
offset: e.maxMatchOff,
}
end := int32(len(d.content)) - 8 + e.maxMatchOff
for i := e.maxMatchOff + 1; i < end; i++ {
cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56)
- e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{
+ e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
val: uint32(cv),
offset: i,
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index 2246d286d..f2502629b 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -11,12 +11,13 @@ import (
)
const (
- tableBits = 15 // Bits used in the table
- tableSize = 1 << tableBits // Size of the table
- tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table
- tableShardSize = tableSize / tableShardCnt // Size of an individual shard
- tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
- maxMatchLength = 131074
+ tableBits = 15 // Bits used in the table
+ tableSize = 1 << tableBits // Size of the table
+ tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table
+ tableShardSize = tableSize / tableShardCnt // Size of an individual shard
+ tableFastHashLen = 6
+ tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+ maxMatchLength = 131074
)
type tableEntry struct {
@@ -122,8 +123,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHash := hash6(cv, hashLog)
- nextHash2 := hash6(cv>>8, hashLog)
+ nextHash := hashLen(cv, hashLog, tableFastHashLen)
+ nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
candidate := e.table[nextHash]
candidate2 := e.table[nextHash2]
repIndex := s - offset1 + 2
@@ -301,7 +302,7 @@ encodeLoop:
}
// Store this, since we have it.
- nextHash := hash6(cv, hashLog)
+ nextHash := hashLen(cv, hashLog, tableFastHashLen)
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
@@ -405,8 +406,8 @@ encodeLoop:
// By not using them for the first 3 matches
for {
- nextHash := hash6(cv, hashLog)
- nextHash2 := hash6(cv>>8, hashLog)
+ nextHash := hashLen(cv, hashLog, tableFastHashLen)
+ nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
candidate := e.table[nextHash]
candidate2 := e.table[nextHash2]
repIndex := s - offset1 + 2
@@ -589,7 +590,7 @@ encodeLoop:
}
// Store this, since we have it.
- nextHash := hash6(cv, hashLog)
+ nextHash := hashLen(cv, hashLog, tableFastHashLen)
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
@@ -715,8 +716,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHash := hash6(cv, hashLog)
- nextHash2 := hash6(cv>>8, hashLog)
+ nextHash := hashLen(cv, hashLog, tableFastHashLen)
+ nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
candidate := e.table[nextHash]
candidate2 := e.table[nextHash2]
repIndex := s - offset1 + 2
@@ -896,7 +897,7 @@ encodeLoop:
}
// Store this, since we have it.
- nextHash := hash6(cv, hashLog)
+ nextHash := hashLen(cv, hashLog, tableFastHashLen)
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
e.markShardDirty(nextHash)
seq.matchLen = uint32(l) - zstdMinMatch
@@ -957,9 +958,9 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
const hashLog = tableBits
cv := load6432(d.content, i-e.maxMatchOff)
- nextHash := hash6(cv, hashLog) // 0 -> 5
- nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6
- nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7
+ nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5
+ nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6
+ nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7
e.dictTable[nextHash] = tableEntry{
val: uint32(cv),
offset: i,
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index ea85548fc..e6e315969 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -33,7 +33,7 @@ type encoder interface {
Block() *blockEnc
CRC() *xxhash.Digest
AppendCRC([]byte) []byte
- WindowSize(size int) int32
+ WindowSize(size int64) int32
UseBlock(*blockEnc)
Reset(d *dict, singleBlock bool)
}
@@ -48,6 +48,8 @@ type encoderState struct {
err error
writeErr error
nWritten int64
+ nInput int64
+ frameContentSize int64
headerWritten bool
eofWritten bool
fullFrameWritten bool
@@ -120,7 +122,21 @@ func (e *Encoder) Reset(w io.Writer) {
s.w = w
s.err = nil
s.nWritten = 0
+ s.nInput = 0
s.writeErr = nil
+ s.frameContentSize = 0
+}
+
+// ResetContentSize will reset and set a content size for the next stream.
+// If the bytes written does not match the size given an error will be returned
+// when calling Close().
+// This is removed when Reset is called.
+// Sizes <= 0 results in no content size set.
+func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
+ e.Reset(w)
+ if size >= 0 {
+ e.state.frameContentSize = size
+ }
}
// Write data to the encoder.
@@ -190,6 +206,7 @@ func (e *Encoder) nextBlock(final bool) error {
return s.err
}
s.nWritten += int64(n2)
+ s.nInput += int64(len(s.filling))
s.current = s.current[:0]
s.filling = s.filling[:0]
s.headerWritten = true
@@ -200,8 +217,8 @@ func (e *Encoder) nextBlock(final bool) error {
var tmp [maxHeaderSize]byte
fh := frameHeader{
- ContentSize: 0,
- WindowSize: uint32(s.encoder.WindowSize(0)),
+ ContentSize: uint64(s.frameContentSize),
+ WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)),
SingleSegment: false,
Checksum: e.o.crc,
DictID: e.o.dict.ID(),
@@ -243,6 +260,7 @@ func (e *Encoder) nextBlock(final bool) error {
// Move blocks forward.
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
+ s.nInput += int64(len(s.current))
s.wg.Add(1)
go func(src []byte) {
if debugEncoder {
@@ -394,6 +412,11 @@ func (e *Encoder) Close() error {
if err != nil {
return err
}
+ if s.frameContentSize > 0 {
+ if s.nInput != s.frameContentSize {
+ return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput)
+ }
+ }
if e.state.fullFrameWritten {
return s.err
}
@@ -470,7 +493,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
fh := frameHeader{
ContentSize: uint64(len(src)),
- WindowSize: uint32(enc.WindowSize(len(src))),
+ WindowSize: uint32(enc.WindowSize(int64(len(src)))),
SingleSegment: single,
Checksum: e.o.crc,
DictID: e.o.dict.ID(),
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index 16d4ab63c..7d29e1d68 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -189,7 +189,7 @@ func EncoderLevelFromZstd(level int) EncoderLevel {
case level >= 6 && level < 10:
return SpeedBetterCompression
case level >= 10:
- return SpeedBetterCompression
+ return SpeedBestCompression
}
return SpeedDefault
}
diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go
index 4a752067f..cf33f29a1 100644
--- a/vendor/github.com/klauspost/compress/zstd/hash.go
+++ b/vendor/github.com/klauspost/compress/zstd/hash.go
@@ -13,24 +13,24 @@ const (
prime8bytes = 0xcf1bbcdcb7a56463
)
-// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes.
-// l must be >=4 and <=8. Any other value will return hash for 4 bytes.
-// h should always be <32.
-// Preferably h and l should be a constant.
-// FIXME: This does NOT get resolved, if 'mls' is constant,
-// so this cannot be used.
-func hashLen(u uint64, hashLog, mls uint8) uint32 {
+// hashLen returns a hash of the lowest mls bytes of with length output bits.
+// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
+// length should always be < 32.
+// Preferably length and mls should be a constant for inlining.
+func hashLen(u uint64, length, mls uint8) uint32 {
switch mls {
+ case 3:
+ return (uint32(u<<8) * prime3bytes) >> (32 - length)
case 5:
- return hash5(u, hashLog)
+ return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
case 6:
- return hash6(u, hashLog)
+ return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
case 7:
- return hash7(u, hashLog)
+ return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
case 8:
- return hash8(u, hashLog)
+ return uint32((u * prime8bytes) >> (64 - length))
default:
- return hash4x64(u, hashLog)
+ return (uint32(u) * prime4bytes) >> (32 - length)
}
}
@@ -39,39 +39,3 @@ func hashLen(u uint64, hashLog, mls uint8) uint32 {
func hash3(u uint32, h uint8) uint32 {
return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
}
-
-// hash4 returns the hash of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <32.
-func hash4(u uint32, h uint8) uint32 {
- return (u * prime4bytes) >> ((32 - h) & 31)
-}
-
-// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <32.
-func hash4x64(u uint64, h uint8) uint32 {
- return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
-}
-
-// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <64.
-func hash5(u uint64, h uint8) uint32 {
- return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63))
-}
-
-// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <64.
-func hash6(u uint64, h uint8) uint32 {
- return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
-}
-
-// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <64.
-func hash7(u uint64, h uint8) uint32 {
- return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
-}
-
-// hash8 returns the hash of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <64.
-func hash8(u uint64, h uint8) uint32 {
- return uint32((u * prime8bytes) >> ((64 - h) & 63))
-}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go
index b3d142d8c..14e1e38c2 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go
@@ -1,12 +1,13 @@
package label
import (
+ "errors"
+ "fmt"
"os"
"os/user"
"strings"
"github.com/opencontainers/selinux/go-selinux"
- "github.com/pkg/errors"
)
// Valid Label Options
@@ -53,11 +54,11 @@ func InitLabels(options []string) (plabel string, mlabel string, retErr error) {
return "", selinux.PrivContainerMountLabel(), nil
}
if i := strings.Index(opt, ":"); i == -1 {
- return "", "", errors.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type, filetype' followed by ':' and a value", opt)
+ return "", "", fmt.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type, filetype' followed by ':' and a value", opt)
}
con := strings.SplitN(opt, ":", 2)
if !validOptions[con[0]] {
- return "", "", errors.Errorf("Bad label option %q, valid options 'disable, user, role, level, type, filetype'", con[0])
+ return "", "", fmt.Errorf("Bad label option %q, valid options 'disable, user, role, level, type, filetype'", con[0])
}
if con[0] == "filetype" {
mcon["type"] = con[1]
@@ -151,7 +152,7 @@ func Relabel(path string, fileLabel string, shared bool) error {
path = strings.TrimSuffix(path, "/")
}
if exclude_paths[path] {
- return errors.Errorf("SELinux relabeling of %s is not allowed", path)
+ return fmt.Errorf("SELinux relabeling of %s is not allowed", path)
}
if shared {
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go
new file mode 100644
index 000000000..897ecbac4
--- /dev/null
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go
@@ -0,0 +1,22 @@
+// +build linux,go1.16
+
+package selinux
+
+import (
+ "errors"
+ "io/fs"
+ "os"
+
+ "github.com/opencontainers/selinux/pkg/pwalkdir"
+)
+
+func rchcon(fpath, label string) error {
+ return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error {
+ e := setFileLabel(p, label)
+ // Walk a file tree can race with removal, so ignore ENOENT.
+ if errors.Is(e, os.ErrNotExist) {
+ return nil
+ }
+ return e
+ })
+}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go
new file mode 100644
index 000000000..2c8b033ce
--- /dev/null
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go
@@ -0,0 +1,21 @@
+// +build linux,!go1.16
+
+package selinux
+
+import (
+ "errors"
+ "os"
+
+ "github.com/opencontainers/selinux/pkg/pwalk"
+)
+
+func rchcon(fpath, label string) error {
+ return pwalk.Walk(fpath, func(p string, _ os.FileInfo, _ error) error {
+ e := setFileLabel(p, label)
+ // Walk a file tree can race with removal, so ignore ENOENT.
+ if errors.Is(e, os.ErrNotExist) {
+ return nil
+ }
+ return e
+ })
+}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
index b336ebad3..9ffd77afa 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
@@ -1,7 +1,7 @@
package selinux
import (
- "github.com/pkg/errors"
+ "errors"
)
const (
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
index a91a116f8..a804473e4 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
@@ -5,6 +5,7 @@ import (
"bytes"
"crypto/rand"
"encoding/binary"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -17,8 +18,6 @@ import (
"sync"
"github.com/bits-and-blooms/bitset"
- "github.com/opencontainers/selinux/pkg/pwalk"
- "github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@@ -120,7 +119,7 @@ func verifySELinuxfsMount(mnt string) bool {
if err == nil {
break
}
- if err == unix.EAGAIN || err == unix.EINTR {
+ if err == unix.EAGAIN || err == unix.EINTR { //nolint:errorlint // unix errors are bare
continue
}
return false
@@ -250,12 +249,12 @@ func isProcHandle(fh *os.File) error {
if err == nil {
break
}
- if err != unix.EINTR {
- return errors.Wrapf(err, "statfs(%q) failed", fh.Name())
+ if err != unix.EINTR { //nolint:errorlint // unix errors are bare
+ return &os.PathError{Op: "fstatfs", Path: fh.Name(), Err: err}
}
}
if buf.Type != unix.PROC_SUPER_MAGIC {
- return errors.Errorf("file %q is not on procfs", fh.Name())
+ return fmt.Errorf("file %q is not on procfs", fh.Name())
}
return nil
@@ -311,8 +310,8 @@ func setFileLabel(fpath string, label string) error {
if err == nil {
break
}
- if err != unix.EINTR {
- return errors.Wrapf(err, "failed to set file label on %s", fpath)
+ if err != unix.EINTR { //nolint:errorlint // unix errors are bare
+ return &os.PathError{Op: "lsetxattr", Path: fpath, Err: err}
}
}
@@ -327,7 +326,7 @@ func fileLabel(fpath string) (string, error) {
label, err := lgetxattr(fpath, xattrNameSelinux)
if err != nil {
- return "", err
+ return "", &os.PathError{Op: "lgetxattr", Path: fpath, Err: err}
}
// Trim the NUL byte at the end of the byte buffer, if present.
if len(label) > 0 && label[len(label)-1] == '\x00' {
@@ -390,7 +389,7 @@ func writeCon(fpath, val string) error {
_, err = out.Write(nil)
}
if err != nil {
- return errors.Wrapf(err, "failed to set %s on procfs", fpath)
+ return &os.PathError{Op: "write", Path: fpath, Err: err}
}
return nil
}
@@ -489,13 +488,13 @@ func (l *level) parseLevel(levelStr string) error {
lvl := strings.SplitN(levelStr, ":", 2)
sens, err := parseLevelItem(lvl[0], sensitivity)
if err != nil {
- return errors.Wrap(err, "failed to parse sensitivity")
+ return fmt.Errorf("failed to parse sensitivity: %w", err)
}
l.sens = sens
if len(lvl) > 1 {
cats, err := catsToBitset(lvl[1])
if err != nil {
- return errors.Wrap(err, "failed to parse categories")
+ return fmt.Errorf("failed to parse categories: %w", err)
}
l.cats = cats
}
@@ -513,14 +512,14 @@ func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) {
case 2:
mlsRange.high = &level{}
if err := mlsRange.high.parseLevel(levelSlice[1]); err != nil {
- return nil, errors.Wrapf(err, "failed to parse high level %q", levelSlice[1])
+ return nil, fmt.Errorf("failed to parse high level %q: %w", levelSlice[1], err)
}
fallthrough
// rangeStr that is single level, e.g. s6:c0,c3,c5,c30.c1023
case 1:
mlsRange.low = &level{}
if err := mlsRange.low.parseLevel(levelSlice[0]); err != nil {
- return nil, errors.Wrapf(err, "failed to parse low level %q", levelSlice[0])
+ return nil, fmt.Errorf("failed to parse low level %q: %w", levelSlice[0], err)
}
}
@@ -697,17 +696,21 @@ func socketLabel() (string, error) {
// peerLabel retrieves the label of the client on the other side of a socket
func peerLabel(fd uintptr) (string, error) {
- return unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC)
+ label, err := unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC)
+ if err != nil {
+ return "", &os.PathError{Op: "getsockopt", Path: "fd " + strconv.Itoa(int(fd)), Err: err}
+ }
+ return label, nil
}
// setKeyLabel takes a process label and tells the kernel to assign the
// label to the next kernel keyring that gets created
func setKeyLabel(label string) error {
err := writeCon("/proc/self/attr/keycreate", label)
- if os.IsNotExist(errors.Cause(err)) {
+ if errors.Is(err, os.ErrNotExist) {
return nil
}
- if label == "" && os.IsPermission(errors.Cause(err)) {
+ if label == "" && errors.Is(err, os.ErrPermission) {
return nil
}
return err
@@ -784,7 +787,7 @@ func enforceMode() int {
// setEnforceMode sets the current SELinux mode Enforcing, Permissive.
// Disabled is not valid, since this needs to be set at boot time.
func setEnforceMode(mode int) error {
- return ioutil.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0644)
+ return ioutil.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644)
}
// defaultEnforceMode returns the systems default SELinux mode Enforcing,
@@ -985,7 +988,7 @@ func addMcs(processLabel, fileLabel string) (string, string) {
// securityCheckContext validates that the SELinux label is understood by the kernel
func securityCheckContext(val string) error {
- return ioutil.WriteFile(path.Join(getSelinuxMountPoint(), "context"), []byte(val), 0644)
+ return ioutil.WriteFile(path.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644)
}
// copyLevel returns a label with the MLS/MCS level from src label replaced on
@@ -1023,7 +1026,7 @@ func badPrefix(fpath string) error {
badPrefixes := []string{"/usr"}
for _, prefix := range badPrefixes {
if strings.HasPrefix(fpath, prefix) {
- return errors.Errorf("relabeling content in %s is not allowed", prefix)
+ return fmt.Errorf("relabeling content in %s is not allowed", prefix)
}
}
return nil
@@ -1044,17 +1047,10 @@ func chcon(fpath string, label string, recurse bool) error {
}
if !recurse {
- return SetFileLabel(fpath, label)
+ return setFileLabel(fpath, label)
}
- return pwalk.Walk(fpath, func(p string, info os.FileInfo, err error) error {
- e := SetFileLabel(p, label)
- // Walk a file tree can race with removal, so ignore ENOENT
- if os.IsNotExist(errors.Cause(e)) {
- return nil
- }
- return e
- })
+ return rchcon(fpath, label)
}
// dupSecOpt takes an SELinux process label and returns security options that
@@ -1072,7 +1068,8 @@ func dupSecOpt(src string) ([]string, error) {
con["type"] == "" {
return nil, nil
}
- dup := []string{"user:" + con["user"],
+ dup := []string{
+ "user:" + con["user"],
"role:" + con["role"],
"type:" + con["type"],
}
@@ -1140,9 +1137,8 @@ func findUserInContext(context Context, r io.Reader, verifier func(string) error
return outConn, nil
}
}
-
if err := scanner.Err(); err != nil {
- return "", errors.Wrap(err, "failed to scan for context")
+ return "", fmt.Errorf("failed to scan for context: %w", err)
}
return "", nil
@@ -1155,7 +1151,7 @@ func getDefaultContextFromReaders(c *defaultSECtx) (string, error) {
context, err := newContext(c.scon)
if err != nil {
- return "", errors.Wrapf(err, "failed to create label for %s", c.scon)
+ return "", fmt.Errorf("failed to create label for %s: %w", c.scon, err)
}
// set so the verifier validates the matched context with the provided user and level.
@@ -1180,7 +1176,7 @@ func getDefaultContextFromReaders(c *defaultSECtx) (string, error) {
return conn, nil
}
- return "", errors.Wrapf(ErrContextMissing, "context not found: %q", c.scon)
+ return "", fmt.Errorf("context %q not found: %w", c.scon, ErrContextMissing)
}
func getDefaultContextWithLevel(user, level, scon string) (string, error) {
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go
index 117c255be..c6b0a7f26 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go
@@ -10,7 +10,7 @@ func lgetxattr(path, attr string) ([]byte, error) {
// Start with a 128 length byte array
dest := make([]byte, 128)
sz, errno := doLgetxattr(path, attr, dest)
- for errno == unix.ERANGE {
+ for errno == unix.ERANGE { //nolint:errorlint // unix errors are bare
// Buffer too small, use zero-sized buffer to get the actual size
sz, errno = doLgetxattr(path, attr, []byte{})
if errno != nil {
@@ -31,7 +31,7 @@ func lgetxattr(path, attr string) ([]byte, error) {
func doLgetxattr(path, attr string, dest []byte) (int, error) {
for {
sz, err := unix.Lgetxattr(path, attr, dest)
- if err != unix.EINTR {
+ if err != unix.EINTR { //nolint:errorlint // unix errors are bare
return sz, err
}
}
diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md b/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md
index 16c4dfd3e..7e78dce01 100644
--- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md
+++ b/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md
@@ -8,6 +8,12 @@ By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks.
This can be changed by using WalkN function which has the additional
parameter, specifying the number of goroutines (concurrency).
+### pwalk vs pwalkdir
+
+This package is deprecated in favor of
+[pwalkdir](https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir),
+which is faster, but requires at least Go 1.16.
+
### Caveats
Please note the following limitations of this code:
diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
index 437b12b3e..011fe862a 100644
--- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
+++ b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
@@ -1,12 +1,11 @@
package pwalk
import (
+ "fmt"
"os"
"path/filepath"
"runtime"
"sync"
-
- "github.com/pkg/errors"
)
type WalkFunc = filepath.WalkFunc
@@ -20,7 +19,7 @@ type WalkFunc = filepath.WalkFunc
//
// Note that this implementation only supports primitive error handling:
//
-// - no errors are ever passed to WalkFn;
+// - no errors are ever passed to walkFn;
//
// - once a walkFn returns any error, all further processing stops
// and the error is returned to the caller of Walk;
@@ -42,7 +41,7 @@ func Walk(root string, walkFn WalkFunc) error {
func WalkN(root string, walkFn WalkFunc, num int) error {
// make sure limit is sensible
if num < 1 {
- return errors.Errorf("walk(%q): num must be > 0", root)
+ return fmt.Errorf("walk(%q): num must be > 0", root)
}
files := make(chan *walkArgs, 2*num)
@@ -96,7 +95,7 @@ func WalkN(root string, walkFn WalkFunc, num int) error {
return err
}
-// walkArgs holds the arguments that were passed to the Walk or WalkLimit
+// walkArgs holds the arguments that were passed to the Walk or WalkN
// functions.
type walkArgs struct {
path string
diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md
new file mode 100644
index 000000000..068ac4005
--- /dev/null
+++ b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md
@@ -0,0 +1,54 @@
+## pwalkdir: parallel implementation of filepath.WalkDir
+
+This is a wrapper for [filepath.WalkDir](https://pkg.go.dev/path/filepath#WalkDir)
+which may speed it up by calling multiple callback functions (WalkDirFunc)
+in parallel, utilizing goroutines.
+
+By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks.
+This can be changed by using WalkN function which has the additional
+parameter, specifying the number of goroutines (concurrency).
+
+### pwalk vs pwalkdir
+
+This package is very similar to
+[pwalk](https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir),
+but utilizes `filepath.WalkDir` (added to Go 1.16), which does not call stat(2)
+on every entry and is therefore faster (up to 3x, depending on usage scenario).
+
+Users who are OK with requiring Go 1.16+ should switch to this
+implementation.
+
+### Caveats
+
+Please note the following limitations of this code:
+
+* Unlike filepath.WalkDir, the order of calls is non-deterministic;
+
+* Only primitive error handling is supported:
+
+ * fs.SkipDir is not supported;
+
+ * no errors are ever passed to WalkDirFunc;
+
+ * once any error is returned from any walkDirFunc instance, no more calls
+ to WalkDirFunc are made, and the error is returned to the caller of WalkDir;
+
+ * if more than one WalkDirFunc instance will return an error, only one
+ of such errors will be propagated to and returned by WalkDir, others
+ will be silently discarded.
+
+### Documentation
+
+For the official documentation, see
+https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir
+
+### Benchmarks
+
+For a WalkDirFunc that consists solely of the return statement, this
+implementation is about 15% slower than the standard library's
+filepath.WalkDir.
+
+Otherwise (if a WalkDirFunc is actually doing something) this is usually
+faster, except when the WalkDirN(..., 1) is used. Run `go test -bench .`
+to see how different operations can benefit from it, as well as how the
+level of paralellism affects the speed.
diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go
new file mode 100644
index 000000000..222820750
--- /dev/null
+++ b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go
@@ -0,0 +1,103 @@
+// +build go1.16
+
+package pwalkdir
+
+import (
+ "fmt"
+ "io/fs"
+ "path/filepath"
+ "runtime"
+ "sync"
+)
+
+// Walk is a wrapper for filepath.WalkDir which can call multiple walkFn
+// in parallel, allowing to handle each item concurrently. A maximum of
+// twice the runtime.NumCPU() walkFn will be called at any one time.
+// If you want to change the maximum, use WalkN instead.
+//
+// The order of calls is non-deterministic.
+//
+// Note that this implementation only supports primitive error handling:
+//
+// - no errors are ever passed to walkFn;
+//
+// - once a walkFn returns any error, all further processing stops
+// and the error is returned to the caller of Walk;
+//
+// - filepath.SkipDir is not supported;
+//
+// - if more than one walkFn instance will return an error, only one
+// of such errors will be propagated and returned by Walk, others
+// will be silently discarded.
+func Walk(root string, walkFn fs.WalkDirFunc) error {
+ return WalkN(root, walkFn, runtime.NumCPU()*2)
+}
+
+// WalkN is a wrapper for filepath.WalkDir which can call multiple walkFn
+// in parallel, allowing to handle each item concurrently. A maximum of
+// num walkFn will be called at any one time.
+//
+// Please see Walk documentation for caveats of using this function.
+func WalkN(root string, walkFn fs.WalkDirFunc, num int) error {
+ // make sure limit is sensible
+ if num < 1 {
+ return fmt.Errorf("walk(%q): num must be > 0", root)
+ }
+
+ files := make(chan *walkArgs, 2*num)
+ errCh := make(chan error, 1) // Get the first error, ignore others.
+
+ // Start walking a tree asap.
+ var (
+ err error
+ wg sync.WaitGroup
+ )
+ wg.Add(1)
+ go func() {
+ err = filepath.WalkDir(root, func(p string, entry fs.DirEntry, err error) error {
+ if err != nil {
+ close(files)
+ return err
+ }
+ // Add a file to the queue unless a callback sent an error.
+ select {
+ case e := <-errCh:
+ close(files)
+ return e
+ default:
+ files <- &walkArgs{path: p, entry: entry}
+ return nil
+ }
+ })
+ if err == nil {
+ close(files)
+ }
+ wg.Done()
+ }()
+
+ wg.Add(num)
+ for i := 0; i < num; i++ {
+ go func() {
+ for file := range files {
+ if e := walkFn(file.path, file.entry, nil); e != nil {
+ select {
+ case errCh <- e: // sent ok
+ default: // buffer full
+ }
+ }
+ }
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+
+ return err
+}
+
+// walkArgs holds the arguments that were passed to the Walk or WalkN
+// functions.
+type walkArgs struct {
+ path string
+ entry fs.DirEntry
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 8a4664eed..bdaf1e64e 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,8 +1,9 @@
# github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
github.com/Azure/go-ansiterm
github.com/Azure/go-ansiterm/winterm
-# github.com/BurntSushi/toml v0.3.1
+# github.com/BurntSushi/toml v0.4.1
github.com/BurntSushi/toml
+github.com/BurntSushi/toml/internal
# github.com/Microsoft/go-winio v0.5.0
github.com/Microsoft/go-winio
github.com/Microsoft/go-winio/backuptar
@@ -125,7 +126,7 @@ github.com/containers/common/pkg/umask
github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
github.com/containers/conmon/runner/config
-# github.com/containers/image/v5 v5.15.0
+# github.com/containers/image/v5 v5.15.1
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
github.com/containers/image/v5/directory/explicitfilepath
@@ -197,7 +198,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.33.1
+# github.com/containers/storage v1.34.1
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -402,7 +403,8 @@ github.com/json-iterator/go
# github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a
github.com/juju/ansiterm
github.com/juju/ansiterm/tabwriter
-# github.com/klauspost/compress v1.13.1
+# github.com/klauspost/compress v1.13.4
+github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
@@ -523,10 +525,11 @@ github.com/opencontainers/runtime-tools/generate
github.com/opencontainers/runtime-tools/generate/seccomp
github.com/opencontainers/runtime-tools/specerror
github.com/opencontainers/runtime-tools/validate
-# github.com/opencontainers/selinux v1.8.2
+# github.com/opencontainers/selinux v1.8.4
github.com/opencontainers/selinux/go-selinux
github.com/opencontainers/selinux/go-selinux/label
github.com/opencontainers/selinux/pkg/pwalk
+github.com/opencontainers/selinux/pkg/pwalkdir
# github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656
github.com/openshift/imagebuilder
github.com/openshift/imagebuilder/dockerfile/command