summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
authorrenovate[bot] <29139614+renovate[bot]@users.noreply.github.com>2023-06-09 10:58:13 +0000
committerPaul Holzinger <pholzing@redhat.com>2023-06-12 10:31:58 +0200
commit444f19cb2a3d056a5ffca96f4b0a8af69ecde5ab (patch)
tree4f0e51d420c2e3b7ef648dd1c65be0ab2660244d /vendor
parent32d96f40c3ab46a4c918a9099266748a54cb0772 (diff)
Update common, image, and storage deps
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/Azure/go-ansiterm/SECURITY.md41
-rw-r--r--vendor/github.com/Microsoft/hcsshim/Makefile18
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/log/format.go85
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/log/hook.go145
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go22
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go69
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go77
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/oc/span.go14
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go14
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/network.go38
-rw-r--r--vendor/github.com/containers/common/libnetwork/internal/util/validate.go37
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/config.go31
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/network.go32
-rw-r--r--vendor/github.com/containers/common/libnetwork/types/const.go12
-rw-r--r--vendor/github.com/containers/common/libnetwork/types/network.go33
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go8
-rw-r--r--vendor/github.com/containers/common/pkg/config/containers.conf20
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go14
-rw-r--r--vendor/github.com/containers/common/pkg/util/util.go90
-rw-r--r--vendor/github.com/containers/image/v5/copy/multiple.go85
-rw-r--r--vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go74
-rw-r--r--vendor/github.com/containers/image/v5/internal/manifest/list.go30
-rw-r--r--vendor/github.com/containers/image/v5/internal/manifest/oci_index.go96
-rw-r--r--vendor/github.com/containers/image/v5/internal/set/set.go4
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config.go124
-rw-r--r--vendor/github.com/containers/image/v5/types/types.go4
-rw-r--r--vendor/github.com/containers/storage/.cirrus.yml15
-rw-r--r--vendor/github.com/containers/storage/.golangci.yml64
-rw-r--r--vendor/github.com/containers/storage/Makefile47
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/check.go1086
-rw-r--r--vendor/github.com/containers/storage/containers.go24
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/aufs.go25
-rw-r--r--vendor/github.com/containers/storage/drivers/btrfs/btrfs.go39
-rw-r--r--vendor/github.com/containers/storage/drivers/chown_windows.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/copy/copy_linux.go1
-rw-r--r--vendor/github.com/containers/storage/drivers/counter.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/device_setup.go6
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/deviceset.go60
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/driver.go15
-rw-r--r--vendor/github.com/containers/storage/drivers/driver.go13
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_darwin.go10
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_freebsd.go5
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_linux.go5
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_solaris.go7
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_unsupported.go10
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_windows.go10
-rw-r--r--vendor/github.com/containers/storage/drivers/fsdiff.go10
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/check.go47
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/mount.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go241
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota.go19
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/template.go1
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/driver.go36
-rw-r--r--vendor/github.com/containers/storage/drivers/windows/windows.go9
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs.go13
-rw-r--r--vendor/github.com/containers/storage/images.go16
-rw-r--r--vendor/github.com/containers/storage/layers.go108
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go32
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_linux.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_unix.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_windows.go7
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/changes.go14
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/changes_linux.go1
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/changes_windows.go1
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/copy.go1
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/diff.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go1
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/cache_linux.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/internal/compression.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_linux.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/config/config.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go15
-rw-r--r--vendor/github.com/containers/storage/pkg/fileutils/fileutils.go7
-rw-r--r--vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go1
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/utils_unix.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go1
-rw-r--r--vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go7
-rw-r--r--vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go1
-rw-r--r--vendor/github.com/containers/storage/pkg/regexp/regexp.go7
-rw-r--r--vendor/github.com/containers/storage/pkg/stringid/stringid.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/system/errors.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/system/init_windows.go1
-rw-r--r--vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go1
-rw-r--r--vendor/github.com/containers/storage/pkg/system/path.go1
-rw-r--r--vendor/github.com/containers/storage/pkg/system/rm.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_common.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_darwin.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_freebsd.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_linux.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_openbsd.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_solaris.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_windows.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/truncindex/truncindex.go4
-rw-r--r--vendor/github.com/containers/storage/storage.conf11
-rw-r--r--vendor/github.com/containers/storage/store.go948
-rw-r--r--vendor/github.com/containers/storage/types/errors.go37
-rw-r--r--vendor/github.com/containers/storage/types/options.go51
-rw-r--r--vendor/github.com/containers/storage/types/options_darwin.go9
-rw-r--r--vendor/github.com/containers/storage/types/options_freebsd.go5
-rw-r--r--vendor/github.com/containers/storage/types/options_linux.go38
-rw-r--r--vendor/github.com/containers/storage/types/options_windows.go5
-rw-r--r--vendor/github.com/containers/storage/types/storage_test.conf10
-rw-r--r--vendor/github.com/containers/storage/types/utils.go11
-rw-r--r--vendor/github.com/coreos/go-oidc/v3/oidc/jose.go1
-rw-r--r--vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go4
-rw-r--r--vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go133
-rw-r--r--vendor/github.com/coreos/go-oidc/v3/oidc/verify.go46
-rw-r--r--vendor/github.com/google/go-containerregistry/pkg/name/registry.go6
-rw-r--r--vendor/github.com/google/trillian/.gitignore25
-rw-r--r--vendor/github.com/google/trillian/.golangci.yaml36
-rw-r--r--vendor/github.com/google/trillian/AUTHORS14
-rw-r--r--vendor/github.com/google/trillian/BUILD.bazel55
-rw-r--r--vendor/github.com/google/trillian/CHANGELOG.md1139
-rw-r--r--vendor/github.com/google/trillian/CODEOWNERS21
-rw-r--r--vendor/github.com/google/trillian/CONTRIBUTING.md58
-rw-r--r--vendor/github.com/google/trillian/CONTRIBUTORS39
-rw-r--r--vendor/github.com/google/trillian/LICENSE202
-rw-r--r--vendor/github.com/google/trillian/PULL_REQUEST_TEMPLATE.md15
-rw-r--r--vendor/github.com/google/trillian/README.md318
-rw-r--r--vendor/github.com/google/trillian/cloudbuild.yaml187
-rw-r--r--vendor/github.com/google/trillian/cloudbuild_master.yaml165
-rw-r--r--vendor/github.com/google/trillian/cloudbuild_pr.yaml175
-rw-r--r--vendor/github.com/google/trillian/cloudbuild_tag.yaml51
-rw-r--r--vendor/github.com/google/trillian/codecov.yml22
-rw-r--r--vendor/github.com/google/trillian/gen.go22
-rw-r--r--vendor/github.com/google/trillian/trillian.pb.go806
-rw-r--r--vendor/github.com/google/trillian/trillian.proto241
-rw-r--r--vendor/github.com/google/trillian/trillian_admin_api.pb.go621
-rw-r--r--vendor/github.com/google/trillian/trillian_admin_api.proto107
-rw-r--r--vendor/github.com/google/trillian/trillian_admin_api_grpc.pb.go311
-rw-r--r--vendor/github.com/google/trillian/trillian_log_api.pb.go2070
-rw-r--r--vendor/github.com/google/trillian/trillian_log_api.proto363
-rw-r--r--vendor/github.com/google/trillian/trillian_log_api_grpc.pb.go461
-rw-r--r--vendor/github.com/google/trillian/types/internal/tls/tls.go713
-rw-r--r--vendor/github.com/google/trillian/types/logroot.go102
-rw-r--r--vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md9
-rw-r--r--vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS1
-rw-r--r--vendor/github.com/hashicorp/go-retryablehttp/LICENSE2
-rw-r--r--vendor/github.com/hashicorp/go-retryablehttp/client.go16
-rw-r--r--vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go3
-rw-r--r--vendor/github.com/imdario/mergo/README.md20
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/utils.go9
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go2
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go2
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/zpool.go3
-rw-r--r--vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md12
-rw-r--r--vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go5
-rw-r--r--vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go6
-rw-r--r--vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go12
-rw-r--r--vendor/github.com/onsi/ginkgo/v2/types/version.go2
-rw-r--r--vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go49
-rw-r--r--vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go210
-rw-r--r--vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go29
-rw-r--r--vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go665
-rw-r--r--vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go36
-rw-r--r--vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go6
-rw-r--r--vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go21
-rw-r--r--vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go7
-rw-r--r--vendor/golang.org/x/oauth2/internal/oauth2.go2
-rw-r--r--vendor/golang.org/x/oauth2/internal/token.go60
-rw-r--r--vendor/golang.org/x/oauth2/token.go19
-rw-r--r--vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go11
-rw-r--r--vendor/golang.org/x/tools/go/packages/golist.go23
-rw-r--r--vendor/golang.org/x/tools/go/packages/packages.go3
-rw-r--r--vendor/golang.org/x/tools/go/types/objectpath/objectpath.go764
-rw-r--r--vendor/golang.org/x/tools/internal/event/tag/tag.go59
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/bexport.go852
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/bimport.go907
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go15
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/iexport.go19
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/iimport.go9
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go9
-rw-r--r--vendor/golang.org/x/tools/internal/gocommand/invoke.go18
-rw-r--r--vendor/golang.org/x/tools/internal/typesinternal/types.go9
-rw-r--r--vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go588
-rw-r--r--vendor/modules.txt54
189 files changed, 4750 insertions, 12864 deletions
diff --git a/vendor/github.com/Azure/go-ansiterm/SECURITY.md b/vendor/github.com/Azure/go-ansiterm/SECURITY.md
new file mode 100644
index 000000000..e138ec5d6
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/SECURITY.md
@@ -0,0 +1,41 @@
+<!-- BEGIN MICROSOFT SECURITY.MD V0.0.8 BLOCK -->
+
+## Security
+
+Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
+
+If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
+
+If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
+
+You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
+
+Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
+
+ * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
+ * Full paths of source file(s) related to the manifestation of the issue
+ * The location of the affected source code (tag/branch/commit or direct URL)
+ * Any special configuration required to reproduce the issue
+ * Step-by-step instructions to reproduce the issue
+ * Proof-of-concept or exploit code (if possible)
+ * Impact of the issue, including how an attacker might exploit the issue
+
+This information will help us triage your report more quickly.
+
+If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
+
+## Preferred Languages
+
+We prefer all communications to be in English.
+
+## Policy
+
+Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
+
+<!-- END MICROSOFT SECURITY.MD BLOCK -->
diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile
index 742c76d84..d8eb30b86 100644
--- a/vendor/github.com/Microsoft/hcsshim/Makefile
+++ b/vendor/github.com/Microsoft/hcsshim/Makefile
@@ -94,23 +94,9 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho
tar -zcf $@ -C rootfs .
rm -rf rootfs
--include deps/cmd/gcs.gomake
--include deps/cmd/gcstools.gomake
--include deps/cmd/hooks/wait-paths.gomake
--include deps/cmd/tar2ext4.gomake
--include deps/internal/tools/snp-report.gomake
-
-# Implicit rule for includes that define Go targets.
-%.gomake: $(SRCROOT)/Makefile
+bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report:
@mkdir -p $(dir $@)
- @/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new
- @/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new
- @/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new
- @/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new
- @/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new
- @/bin/echo -e '\tmv $$@.new $$@' >> $@.new
- @/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new
- mv $@.new $@
+ GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%)
bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o
@mkdir -p bin
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go
new file mode 100644
index 000000000..4b6500333
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go
@@ -0,0 +1,85 @@
+package log
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net"
+ "reflect"
+ "time"
+
+ "github.com/containerd/containerd/log"
+)
+
+const TimeFormat = log.RFC3339NanoFixed
+
+func FormatTime(t time.Time) string {
+ return t.Format(TimeFormat)
+}
+
+// DurationFormat formats a [time.Duration] log entry.
+//
+// A nil value signals an error with the formatting.
+type DurationFormat func(time.Duration) interface{}
+
+func DurationFormatString(d time.Duration) interface{} { return d.String() }
+func DurationFormatSeconds(d time.Duration) interface{} { return d.Seconds() }
+func DurationFormatMilliseconds(d time.Duration) interface{} { return d.Milliseconds() }
+
+// FormatIO formats net.Conn and other types that have an `Addr()` or `Name()`.
+//
+// See FormatEnabled for more information.
+func FormatIO(ctx context.Context, v interface{}) string {
+ m := make(map[string]string)
+ m["type"] = reflect.TypeOf(v).String()
+
+ switch t := v.(type) {
+ case net.Conn:
+ m["localAddress"] = formatAddr(t.LocalAddr())
+ m["remoteAddress"] = formatAddr(t.RemoteAddr())
+ case interface{ Addr() net.Addr }:
+ m["address"] = formatAddr(t.Addr())
+ default:
+ return Format(ctx, t)
+ }
+
+ return Format(ctx, m)
+}
+
+func formatAddr(a net.Addr) string {
+ return a.Network() + "://" + a.String()
+}
+
+// Format formats an object into a JSON string, without any indendtation or
+// HTML escapes.
+// Context is used to output a log waring if the conversion fails.
+//
+// This is intended primarily for `trace.StringAttribute()`
+func Format(ctx context.Context, v interface{}) string {
+ b, err := encode(v)
+ if err != nil {
+ G(ctx).WithError(err).Warning("could not format value")
+ return ""
+ }
+
+ return string(b)
+}
+
+func encode(v interface{}) ([]byte, error) {
+ return encodeBuffer(&bytes.Buffer{}, v)
+}
+
+func encodeBuffer(buf *bytes.Buffer, v interface{}) ([]byte, error) {
+ enc := json.NewEncoder(buf)
+ enc.SetEscapeHTML(false)
+ enc.SetIndent("", "")
+
+ if err := enc.Encode(v); err != nil {
+ err = fmt.Errorf("could not marshall %T to JSON for logging: %w", v, err)
+ return nil, err
+ }
+
+ // encoder.Encode appends a newline to the end
+ return bytes.TrimSpace(buf.Bytes()), nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go b/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go
index 8f8940592..94c6d0918 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go
@@ -1,23 +1,58 @@
package log
import (
+ "bytes"
+ "reflect"
+ "time"
+
"github.com/Microsoft/hcsshim/internal/logfields"
+ "github.com/containerd/containerd/log"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
-// Hook serves to intercept and format `logrus.Entry`s before they are passed
-// to the ETW hook.
+const nullString = "null"
+
+// Hook intercepts and formats a [logrus.Entry] before it logged.
//
-// The containerd shim discards the (formatted) logrus output, and outputs only via ETW.
-// The Linux GCS outputs logrus entries over stdout, which is consumed by the shim and
-// then re-output via the ETW hook.
-type Hook struct{}
+// The shim either outputs the logs through an ETW hook, discarding the (formatted) output
+// or logs output to a pipe for logging binaries to consume.
+// The Linux GCS outputs logrus entries over stdout, which is then consumed and re-output
+// by the shim.
+type Hook struct {
+ // EncodeAsJSON formats structs, maps, arrays, slices, and [bytes.Buffer] as JSON.
+ // Variables of [bytes.Buffer] will be converted to []byte.
+ //
+ // Default is false.
+ EncodeAsJSON bool
+
+ // FormatTime specifies the format for [time.Time] variables.
+ // An empty string disables formatting.
+ // When disabled, the fall back will the JSON encoding, if enabled.
+ //
+ // Default is [github.com/containerd/containerd/log.RFC3339NanoFixed].
+ TimeFormat string
+
+ // Duration format converts a [time.Duration] fields to an appropriate encoding.
+ // nil disables formatting.
+ // When disabled, the fall back will the JSON encoding, if enabled.
+ //
+ // Default is [DurationFormatString], which appends a duration unit after the value.
+ DurationFormat DurationFormat
+
+ // AddSpanContext adds [logfields.TraceID] and [logfields.SpanID] fields to
+ // the entry from the span context stored in [logrus.Entry.Context], if it exists.
+ AddSpanContext bool
+}
var _ logrus.Hook = &Hook{}
func NewHook() *Hook {
- return &Hook{}
+ return &Hook{
+ TimeFormat: log.RFC3339NanoFixed,
+ DurationFormat: DurationFormatString,
+ AddSpanContext: true,
+ }
}
func (h *Hook) Levels() []logrus.Level {
@@ -25,14 +60,108 @@ func (h *Hook) Levels() []logrus.Level {
}
func (h *Hook) Fire(e *logrus.Entry) (err error) {
+ // JSON encode, if necessary, then add span information
+ h.encode(e)
h.addSpanContext(e)
return nil
}
+// encode loops through all the fields in the [logrus.Entry] and encodes them according to
+// the settings in [Hook].
+// If [Hook.TimeFormat] is non-empty, it will be passed to [time.Time.Format] for
+// fields of type [time.Time].
+//
+// If [Hook.EncodeAsJSON] is true, then fields that are not numeric, boolean, strings, or
+// errors will be encoded via a [json.Marshal] (with HTML escaping disabled).
+// Chanel- and function-typed fields, as well as unsafe pointers are left alone and not encoded.
+//
+// If [Hook.TimeFormat] and [Hook.DurationFormat] are empty and [Hook.EncodeAsJSON] is false,
+// then this is a no-op.
+func (h *Hook) encode(e *logrus.Entry) {
+ d := e.Data
+
+ formatTime := h.TimeFormat != ""
+ formatDuration := h.DurationFormat != nil
+ if !(h.EncodeAsJSON || formatTime || formatDuration) {
+ return
+ }
+
+ for k, v := range d {
+ // encode types with dedicated formatting options first
+
+ if vv, ok := v.(time.Time); formatTime && ok {
+ d[k] = vv.Format(h.TimeFormat)
+ continue
+ }
+
+ if vv, ok := v.(time.Duration); formatDuration && ok {
+ d[k] = h.DurationFormat(vv)
+ continue
+ }
+
+ // general case JSON encoding
+
+ if !h.EncodeAsJSON {
+ continue
+ }
+
+ switch vv := v.(type) {
+ // built in types
+ // "json" marshals errors as "{}", so leave alone here
+ case bool, string, error, uintptr,
+ int8, int16, int32, int64, int,
+ uint8, uint32, uint64, uint,
+ float32, float64:
+ continue
+
+ // Rather than setting d[k] = vv.String(), JSON encode []byte value, since it
+ // may be a binary payload and not representable as a string.
+ // `case bytes.Buffer,*bytes.Buffer:` resolves `vv` to `interface{}`,
+ // so cannot use `vv.Bytes`.
+ // Could move to below the `reflect.Indirect()` call below, but
+ // that would require additional typematching and dereferencing.
+ // Easier to keep these duplicate branches here.
+ case bytes.Buffer:
+ v = vv.Bytes()
+ case *bytes.Buffer:
+ v = vv.Bytes()
+ }
+
+ // dereference pointer or interface variables
+ rv := reflect.Indirect(reflect.ValueOf(v))
+ // check if `v` is a null pointer
+ if !rv.IsValid() {
+ d[k] = nullString
+ continue
+ }
+
+ switch rv.Kind() {
+ case reflect.Map, reflect.Struct, reflect.Array, reflect.Slice:
+ default:
+ // Bool, [U]?Int*, Float*, Complex*, Uintptr, String: encoded as normal
+ // Chan, Func: not supported by json
+ // Interface, Pointer: dereferenced above
+ // UnsafePointer: not supported by json, not safe to de-reference; leave alone
+ continue
+ }
+
+ b, err := encode(v)
+ if err != nil {
+ // Errors are written to stderr (ie, to `panic.log`) and stops the remaining
+ // hooks (ie, exporting to ETW) from firing. So add encoding errors to
+ // the entry data to be written out, but keep on processing.
+ d[k+"-"+logrus.ErrorKey] = err.Error()
+ // keep the original `v` as the value,
+ continue
+ }
+ d[k] = string(b)
+ }
+}
+
func (h *Hook) addSpanContext(e *logrus.Entry) {
ctx := e.Context
- if ctx == nil {
+ if !h.AddSpanContext || ctx == nil {
return
}
span := trace.FromContext(ctx)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go
index d51e0fd89..d1ef15096 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"errors"
- "strings"
"sync/atomic"
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
@@ -56,11 +55,11 @@ func ScrubProcessParameters(s string) (string, error) {
}
pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement}
- buf := bytes.NewBuffer(b[:0])
- if err := encode(buf, pp); err != nil {
+ b, err := encodeBuffer(bytes.NewBuffer(b[:0]), pp)
+ if err != nil {
return "", err
}
- return strings.TrimSpace(buf.String()), nil
+ return string(b), nil
}
// ScrubBridgeCreate scrubs requests sent over the bridge of type
@@ -150,21 +149,12 @@ func scrubBytes(b []byte, scrub scrubberFunc) ([]byte, error) {
return nil, err
}
- buf := &bytes.Buffer{}
- if err := encode(buf, m); err != nil {
+ b, err := encode(m)
+ if err != nil {
return nil, err
}
- return bytes.TrimSpace(buf.Bytes()), nil
-}
-
-func encode(buf *bytes.Buffer, v interface{}) error {
- enc := json.NewEncoder(buf)
- enc.SetEscapeHTML(false)
- if err := enc.Encode(v); err != nil {
- return err
- }
- return nil
+ return b, nil
}
func isRequestBase(m genMap) bool {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go
new file mode 100644
index 000000000..71df25b8d
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go
@@ -0,0 +1,69 @@
+package oc
+
+import (
+ "errors"
+ "io"
+ "net"
+ "os"
+
+ "github.com/containerd/containerd/errdefs"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// todo: break import cycle with "internal/hcs/errors.go" and reference errors defined there
+// todo: add errors defined in "internal/guest/gcserror" (Hresult does not implement error)
+
+func toStatusCode(err error) codes.Code {
+ // checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status,
+ // wraps an error defined in "github.com/containerd/containerd/errdefs", or is a
+ // context timeout or cancelled error
+ if s, ok := status.FromError(errdefs.ToGRPC(err)); ok {
+ return s.Code()
+ }
+
+ switch {
+ // case isAny(err):
+ // return codes.Cancelled
+ case isAny(err, os.ErrInvalid):
+ return codes.InvalidArgument
+ case isAny(err, os.ErrDeadlineExceeded):
+ return codes.DeadlineExceeded
+ case isAny(err, os.ErrNotExist):
+ return codes.NotFound
+ case isAny(err, os.ErrExist):
+ return codes.AlreadyExists
+ case isAny(err, os.ErrPermission):
+ return codes.PermissionDenied
+ // case isAny(err):
+ // return codes.ResourceExhausted
+ case isAny(err, os.ErrClosed, net.ErrClosed, io.ErrClosedPipe, io.ErrShortBuffer):
+ return codes.FailedPrecondition
+ // case isAny(err):
+ // return codes.Aborted
+ // case isAny(err):
+ // return codes.OutOfRange
+ // case isAny(err):
+ // return codes.Unimplemented
+ case isAny(err, io.ErrNoProgress):
+ return codes.Internal
+ // case isAny(err):
+ // return codes.Unavailable
+ case isAny(err, io.ErrShortWrite, io.ErrUnexpectedEOF):
+ return codes.DataLoss
+ // case isAny(err):
+ // return codes.Unauthenticated
+ default:
+ return codes.Unknown
+ }
+}
+
+// isAny returns true if errors.Is is true for any of the provided errors, errs.
+func isAny(err error, errs ...error) bool {
+ for _, e := range errs {
+ if errors.Is(err, e) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go
index f428bdaf7..28f8f43a9 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go
@@ -3,19 +3,26 @@ package oc
import (
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
+ "google.golang.org/grpc/codes"
+
+ "github.com/Microsoft/hcsshim/internal/log"
+ "github.com/Microsoft/hcsshim/internal/logfields"
)
-var _ = (trace.Exporter)(&LogrusExporter{})
+const spanMessage = "Span"
+
+var _errorCodeKey = logrus.ErrorKey + "Code"
// LogrusExporter is an OpenCensus `trace.Exporter` that exports
// `trace.SpanData` to logrus output.
-type LogrusExporter struct {
-}
+type LogrusExporter struct{}
+
+var _ trace.Exporter = &LogrusExporter{}
// ExportSpan exports `s` based on the the following rules:
//
-// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`,
-// `s.ParentSpanID` for correlation
+// 1. All output will contain `s.Attributes`, `s.SpanKind`, `s.TraceID`,
+// `s.SpanID`, and `s.ParentSpanID` for correlation
//
// 2. Any calls to .Annotate will not be supported.
//
@@ -23,21 +30,57 @@ type LogrusExporter struct {
// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel`
// providing `s.Status.Message` as the error value.
func (le *LogrusExporter) ExportSpan(s *trace.SpanData) {
- // Combine all span annotations with traceID, spanID, parentSpanID
- baseEntry := logrus.WithFields(logrus.Fields(s.Attributes))
- baseEntry.Data["traceID"] = s.TraceID.String()
- baseEntry.Data["spanID"] = s.SpanID.String()
- baseEntry.Data["parentSpanID"] = s.ParentSpanID.String()
- baseEntry.Data["startTime"] = s.StartTime
- baseEntry.Data["endTime"] = s.EndTime
- baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String()
- baseEntry.Data["name"] = s.Name
- baseEntry.Time = s.StartTime
+ if s.DroppedAnnotationCount > 0 {
+ logrus.WithFields(logrus.Fields{
+ "name": s.Name,
+ logfields.TraceID: s.TraceID.String(),
+ logfields.SpanID: s.SpanID.String(),
+ "dropped": s.DroppedAttributeCount,
+ "maxAttributes": len(s.Attributes),
+ }).Warning("span had dropped attributes")
+ }
+
+ entry := log.L.Dup()
+ // Combine all span annotations with span data (eg, trace ID, span ID, parent span ID,
+ // error, status code)
+ // (OC) Span attributes are guaranteed to be strings, bools, or int64s, so we can
+ // can skip overhead in entry.WithFields() and add them directly to entry.Data.
+ // Preallocate ahead of time, since we should add, at most, 10 additional entries
+ data := make(logrus.Fields, len(entry.Data)+len(s.Attributes)+10)
+
+ // Default log entry may have prexisting/application-wide data
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range s.Attributes {
+ data[k] = v
+ }
+
+ data[logfields.Name] = s.Name
+ data[logfields.TraceID] = s.TraceID.String()
+ data[logfields.SpanID] = s.SpanID.String()
+ data[logfields.ParentSpanID] = s.ParentSpanID.String()
+ data[logfields.StartTime] = s.StartTime
+ data[logfields.EndTime] = s.EndTime
+ data[logfields.Duration] = s.EndTime.Sub(s.StartTime)
+ if sk := spanKindToString(s.SpanKind); sk != "" {
+ data["spanKind"] = sk
+ }
level := logrus.InfoLevel
if s.Status.Code != 0 {
level = logrus.ErrorLevel
- baseEntry.Data[logrus.ErrorKey] = s.Status.Message
+
+ // don't overwrite an existing "error" or "errorCode" attributes
+ if _, ok := data[logrus.ErrorKey]; !ok {
+ data[logrus.ErrorKey] = s.Status.Message
+ }
+ if _, ok := data[_errorCodeKey]; !ok {
+ data[_errorCodeKey] = codes.Code(s.Status.Code).String()
+ }
}
- baseEntry.Log(level, "Span")
+
+ entry.Data = data
+ entry.Time = s.StartTime
+ entry.Log(level, spanMessage)
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go
index 0e2b7e9bf..726078432 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go
@@ -14,8 +14,7 @@ var DefaultSampler = trace.AlwaysSample()
func SetSpanStatus(span *trace.Span, err error) {
status := trace.Status{}
if err != nil {
- // TODO: JTERRY75 - Handle errors in a non-generic way
- status.Code = trace.StatusCodeUnknown
+ status.Code = int32(toStatusCode(err))
status.Message = err.Error()
}
span.SetStatus(status)
@@ -46,3 +45,14 @@ func update(ctx context.Context, s *trace.Span) (context.Context, *trace.Span) {
var WithServerSpanKind = trace.WithSpanKind(trace.SpanKindServer)
var WithClientSpanKind = trace.WithSpanKind(trace.SpanKindClient)
+
+func spanKindToString(sk int) string {
+ switch sk {
+ case trace.SpanKindClient:
+ return "client"
+ case trace.SpanKindServer:
+ return "server"
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go
index bfcc15769..7dfa1e594 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go
@@ -23,20 +23,14 @@ type (
)
type explicitAccess struct {
- //nolint:structcheck
accessPermissions accessMask
- //nolint:structcheck
- accessMode accessMode
- //nolint:structcheck
- inheritance inheritMode
- //nolint:structcheck
- trustee trustee
+ accessMode accessMode
+ inheritance inheritMode
+ trustee trustee
}
type trustee struct {
- //nolint:unused,structcheck
- multipleTrustee *trustee
- //nolint:unused,structcheck
+ multipleTrustee *trustee
multipleTrusteeOperation int32
trusteeForm trusteeForm
trusteeType trusteeType
diff --git a/vendor/github.com/containers/common/libnetwork/cni/network.go b/vendor/github.com/containers/common/libnetwork/cni/network.go
index e9121252a..8180b49b2 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/network.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/network.go
@@ -17,6 +17,7 @@ import (
"github.com/containernetworking/cni/libcni"
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/config"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/unshare"
"github.com/sirupsen/logrus"
@@ -295,6 +296,43 @@ func (n *cniNetwork) DefaultInterfaceName() string {
return cniDeviceName
}
+// NetworkInfo return the network information about binary path,
+// package version and program version.
+func (n *cniNetwork) NetworkInfo() types.NetworkInfo {
+ path := ""
+ packageVersion := ""
+ for _, p := range n.cniPluginDirs {
+ ver := cutil.PackageVersion(p)
+ if ver != cutil.UnknownPackage {
+ path = p
+ packageVersion = ver
+ break
+ }
+ }
+
+ info := types.NetworkInfo{
+ Backend: types.CNI,
+ Package: packageVersion,
+ Path: path,
+ }
+
+ dnsPath := filepath.Join(path, "dnsname")
+ dnsPackage := cutil.PackageVersion(dnsPath)
+ dnsProgram, err := cutil.ProgramVersionDnsname(dnsPath)
+ if err != nil {
+ logrus.Infof("Failed to get the dnsname plugin version: %v", err)
+ }
+ if _, err := os.Stat(dnsPath); err == nil {
+ info.DNS = types.DNSNetworkInfo{
+ Path: dnsPath,
+ Package: dnsPackage,
+ Version: dnsProgram,
+ }
+ }
+
+ return info
+}
+
func (n *cniNetwork) Network(nameOrID string) (*types.Network, error) {
network, err := n.getNetwork(nameOrID)
if err != nil {
diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go
index 14f4052d8..adf615552 100644
--- a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go
+++ b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go
@@ -81,6 +81,43 @@ func ValidateSubnets(network *types.Network, addGateway bool, usedNetworks []*ne
return nil
}
+func ValidateRoutes(routes []types.Route) error {
+ for _, route := range routes {
+ err := ValidateRoute(route)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func ValidateRoute(route types.Route) error {
+ if route.Destination.IP == nil {
+ return fmt.Errorf("route destination ip nil")
+ }
+
+ if route.Destination.Mask == nil {
+ return fmt.Errorf("route destination mask nil")
+ }
+
+ if route.Gateway == nil {
+ return fmt.Errorf("route gateway nil")
+ }
+
+ // Reparse to ensure destination is valid.
+ ip, ipNet, err := net.ParseCIDR(route.Destination.String())
+ if err != nil {
+ return fmt.Errorf("route destination invalid: %w", err)
+ }
+
+ // check that destination is a network and not an address
+ if !ip.Equal(ipNet.IP) {
+ return fmt.Errorf("route destination invalid")
+ }
+
+ return nil
+}
+
func ValidateSetupOptions(n NetUtil, namespacePath string, options types.SetupOptions) error {
if namespacePath == "" {
return errors.New("namespacePath is empty")
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go
index 297338ffb..aaf7843be 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/config.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go
@@ -198,6 +198,13 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
if err != nil {
return nil, err
}
+ case types.NoDefaultRoute:
+ val, err := strconv.ParseBool(value)
+ if err != nil {
+ return nil, err
+ }
+ // rust only support "true" or "false" while go can parse 1 and 0 as well so we need to change it
+ newNetwork.Options[types.NoDefaultRoute] = strconv.FormatBool(val)
default:
return nil, fmt.Errorf("unsupported bridge network option %s", key)
@@ -237,6 +244,12 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
return nil, err
}
+ //validate routes
+ err = internalutil.ValidateRoutes(newNetwork.Routes)
+ if err != nil {
+ return nil, err
+ }
+
newNetwork.Created = time.Now()
if !defaultNet {
@@ -317,6 +330,24 @@ func createIpvlanOrMacvlan(network *types.Network) error {
if err != nil {
return err
}
+ case types.NoDefaultRoute:
+ val, err := strconv.ParseBool(value)
+ if err != nil {
+ return err
+ }
+ // rust only support "true" or "false" while go can parse 1 and 0 as well so we need to change it
+ network.Options[types.NoDefaultRoute] = strconv.FormatBool(val)
+ case types.BclimOption:
+ if isMacVlan {
+ _, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return fmt.Errorf("failed to parse %q option: %w", key, err)
+ }
+ // do not fallthrough for macvlan
+ break
+ }
+ // bclim is only valid for macvlan not ipvlan so fallthrough to error case
+ fallthrough
default:
return fmt.Errorf("unsupported %s network option %s", driver, key)
}
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go
index 77dda2483..cadf5e718 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/network.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go
@@ -15,6 +15,7 @@ import (
"github.com/containers/common/libnetwork/internal/util"
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/config"
+ cutil "github.com/containers/common/pkg/util"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/unshare"
"github.com/sirupsen/logrus"
@@ -336,6 +337,37 @@ func (n *netavarkNetwork) DefaultInterfaceName() string {
return defaultBridgeName
}
+// NetworkInfo return the network information about binary path,
+// package version and program version.
+func (n *netavarkNetwork) NetworkInfo() types.NetworkInfo {
+ path := n.netavarkBinary
+ packageVersion := cutil.PackageVersion(path)
+ programVersion, err := cutil.ProgramVersion(path)
+ if err != nil {
+ logrus.Infof("Failed to get the netavark version: %v", err)
+ }
+ info := types.NetworkInfo{
+ Backend: types.Netavark,
+ Version: programVersion,
+ Package: packageVersion,
+ Path: path,
+ }
+
+ dnsPath := n.aardvarkBinary
+ dnsPackage := cutil.PackageVersion(dnsPath)
+ dnsProgram, err := cutil.ProgramVersion(dnsPath)
+ if err != nil {
+ logrus.Infof("Failed to get the aardvark version: %v", err)
+ }
+ info.DNS = types.DNSNetworkInfo{
+ Package: dnsPackage,
+ Path: dnsPath,
+ Version: dnsProgram,
+ }
+
+ return info
+}
+
func (n *netavarkNetwork) Network(nameOrID string) (*types.Network, error) {
network, err := n.getNetwork(nameOrID)
if err != nil {
diff --git a/vendor/github.com/containers/common/libnetwork/types/const.go b/vendor/github.com/containers/common/libnetwork/types/const.go
index e367f9ad3..83103ef6e 100644
--- a/vendor/github.com/containers/common/libnetwork/types/const.go
+++ b/vendor/github.com/containers/common/libnetwork/types/const.go
@@ -36,11 +36,13 @@ const (
IPVLANModeL3s = "l3s"
// valid network options
- VLANOption = "vlan"
- MTUOption = "mtu"
- ModeOption = "mode"
- IsolateOption = "isolate"
- MetricOption = "metric"
+ VLANOption = "vlan"
+ MTUOption = "mtu"
+ ModeOption = "mode"
+ IsolateOption = "isolate"
+ MetricOption = "metric"
+ NoDefaultRoute = "no_default_route"
+ BclimOption = "bclim"
)
type NetworkBackend string
diff --git a/vendor/github.com/containers/common/libnetwork/types/network.go b/vendor/github.com/containers/common/libnetwork/types/network.go
index b8804bf6b..94087fd37 100644
--- a/vendor/github.com/containers/common/libnetwork/types/network.go
+++ b/vendor/github.com/containers/common/libnetwork/types/network.go
@@ -34,6 +34,10 @@ type ContainerNetwork interface {
// DefaultNetworkName will return the default network name
// for this interface.
DefaultNetworkName() string
+
+ // NetworkInfo return the network information about backend type,
+ // binary path, package version and so on.
+ NetworkInfo() NetworkInfo
}
// Network describes the Network attributes.
@@ -50,6 +54,8 @@ type Network struct {
Created time.Time `json:"created,omitempty"`
// Subnets to use for this network.
Subnets []Subnet `json:"subnets,omitempty"`
+ // Routes to use for this network.
+ Routes []Route `json:"routes,omitempty"`
// IPv6Enabled if set to true an ipv6 subnet should be created for this net.
IPv6Enabled bool `json:"ipv6_enabled"`
// Internal is whether the Network should not have external routes
@@ -80,6 +86,22 @@ type NetworkUpdateOptions struct {
RemoveDNSServers []string `json:"remove_dns_servers,omitempty"`
}
+// NetworkInfo contains the network information.
+type NetworkInfo struct {
+ Backend NetworkBackend `json:"backend"`
+ Version string `json:"version,omitempty"`
+ Package string `json:"package,omitempty"`
+ Path string `json:"path,omitempty"`
+ DNS DNSNetworkInfo `json:"dns,omitempty"`
+}
+
+// NetworkInfo contains the DNS information.
+type DNSNetworkInfo struct {
+ Version string `json:"version,omitempty"`
+ Package string `json:"package,omitempty"`
+ Path string `json:"path,omitempty"`
+}
+
// IPNet is used as custom net.IPNet type to add Marshal/Unmarshal methods.
type IPNet struct {
net.IPNet
@@ -169,6 +191,17 @@ type Subnet struct {
LeaseRange *LeaseRange `json:"lease_range,omitempty"`
}
+type Route struct {
+ // Destination for this route in CIDR form.
+ // swagger:strfmt string
+ Destination IPNet `json:"destination"`
+ // Gateway IP for this route.
+ // swagger:strfmt string
+ Gateway net.IP `json:"gateway"`
+ // Metric for this route. Optional.
+ Metric *uint32 `json:"metric,omitempty"`
+}
+
// LeaseRange contains the range where IP are leased.
type LeaseRange struct {
// StartIP first IP in the subnet which should be used to assign ips.
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index dbf32a80c..3ed71f662 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -584,6 +584,10 @@ type NetworkConfig struct {
// are always assigned randomly.
DefaultSubnetPools []SubnetPool `toml:"default_subnet_pools,omitempty"`
+ // DefaultRootlessNetworkCmd is used to set the default rootless network
+ // program, either "slirp4nents" (default) or "pasta".
+ DefaultRootlessNetworkCmd string `toml:"default_rootless_network_cmd,omitempty"`
+
// NetworkConfigDir is where network configuration files are stored.
NetworkConfigDir string `toml:"network_config_dir,omitempty"`
@@ -591,6 +595,10 @@ type NetworkConfig struct {
// for netavark rootful bridges with dns enabled. This can be necessary
// when other dns forwarders run on the machine. 53 is used if unset.
DNSBindPort uint16 `toml:"dns_bind_port,omitempty,omitzero"`
+
+ // PastaOptions contains a default list of pasta(1) options that should
+ // be used when running pasta.
+ PastaOptions []string `toml:"pasta_options,omitempty"`
}
type SubnetPool struct {
diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf
index 71526bdeb..4e5a3d009 100644
--- a/vendor/github.com/containers/common/pkg/config/containers.conf
+++ b/vendor/github.com/containers/common/pkg/config/containers.conf
@@ -314,9 +314,9 @@ default_sysctls = [
#
#netavark_plugin_dirs = [
# "/usr/local/libexec/netavark",
-# "/usr/libexec/netavark",
-# "/usr/local/lib/netavark",
-# "/usr/lib/netavark",
+# "/usr/libexec/netavark",
+# "/usr/local/lib/netavark",
+# "/usr/lib/netavark",
#]
# The network name of the default network to attach pods to.
@@ -344,6 +344,13 @@ default_sysctls = [
# {"base" = "10.128.0.0/9", "size" = 24},
#]
+
+
+# Configure which rootless network program to use by default. Valid options are
+# `slirp4netns` (default) and `pasta`.
+#
+#default_rootless_network_cmd = "slirp4netns"
+
# Path to the directory where network configuration files are located.
# For the CNI backend the default is "/etc/cni/net.d" as root
# and "$HOME/.config/cni/net.d" as rootless.
@@ -359,6 +366,11 @@ default_sysctls = [
#
#dns_bind_port = 53
+# A list of default pasta options that should be used running pasta.
+# It accepts the pasta cli options, see pasta(1) for the full list of options.
+#
+#pasta_options = []
+
[engine]
# Index to the active service
#
@@ -407,7 +419,7 @@ default_sysctls = [
# Format is a single character [a-Z] or a comma separated sequence of
# `ctrl-<value>`, where `<value>` is one of:
# `a-z`, `@`, `^`, `[`, `\`, `]`, `^` or `_`
-#
+# Specifying "" disables this feature.
#detach_keys = "ctrl-p,ctrl-q"
# Determines whether engine will reserve ports on the host when they are
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index 28249e80e..b7aa5f292 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -215,12 +215,13 @@ func DefaultConfig() (*Config, error) {
UserNSSize: DefaultUserNSSize, // Deprecated
},
Network: NetworkConfig{
- DefaultNetwork: "podman",
- DefaultSubnet: DefaultSubnet,
- DefaultSubnetPools: DefaultSubnetPools,
- DNSBindPort: 0,
- CNIPluginDirs: DefaultCNIPluginDirs,
- NetavarkPluginDirs: DefaultNetavarkPluginDirs,
+ DefaultNetwork: "podman",
+ DefaultSubnet: DefaultSubnet,
+ DefaultSubnetPools: DefaultSubnetPools,
+ DefaultRootlessNetworkCmd: "slirp4netns",
+ DNSBindPort: 0,
+ CNIPluginDirs: DefaultCNIPluginDirs,
+ NetavarkPluginDirs: DefaultNetavarkPluginDirs,
},
Engine: *defaultEngineConfig,
Secrets: defaultSecretConfig(),
@@ -283,6 +284,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes")
c.VolumePluginTimeout = DefaultVolumePluginTimeout
+ c.CompressionFormat = "gzip"
c.HelperBinariesDir = defaultHelperBinariesDir
if additionalHelperBinariesDir != "" {
diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go
index 98890a686..44d4cda6e 100644
--- a/vendor/github.com/containers/common/pkg/util/util.go
+++ b/vendor/github.com/containers/common/pkg/util/util.go
@@ -1,6 +1,94 @@
package util
-import "regexp"
+import (
+ "bytes"
+ "fmt"
+ "os/exec"
+ "regexp"
+ "strings"
+)
+
+const (
+ UnknownPackage = "Unknown"
+)
+
+// Note: This function is copied from containers/podman libpod/util.go
+// Please see https://github.com/containers/common/pull/1460
+func queryPackageVersion(cmdArg ...string) string {
+ output := UnknownPackage
+ if 1 < len(cmdArg) {
+ cmd := exec.Command(cmdArg[0], cmdArg[1:]...)
+ if outp, err := cmd.Output(); err == nil {
+ output = string(outp)
+ if cmdArg[0] == "/usr/bin/dpkg" {
+ r := strings.Split(output, ": ")
+ queryFormat := `${Package}_${Version}_${Architecture}`
+ cmd = exec.Command("/usr/bin/dpkg-query", "-f", queryFormat, "-W", r[0])
+ if outp, err := cmd.Output(); err == nil {
+ output = string(outp)
+ }
+ }
+ }
+ if cmdArg[0] == "/sbin/apk" {
+ prefix := cmdArg[len(cmdArg)-1] + " is owned by "
+ output = strings.Replace(output, prefix, "", 1)
+ }
+ }
+ return strings.Trim(output, "\n")
+}
+
+// Note: This function is copied from containers/podman libpod/util.go
+// Please see https://github.com/containers/common/pull/1460
+func PackageVersion(program string) string { // program is full path
+ packagers := [][]string{
+ {"/usr/bin/rpm", "-q", "-f"},
+ {"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu
+ {"/usr/bin/pacman", "-Qo"}, // Arch
+ {"/usr/bin/qfile", "-qv"}, // Gentoo (quick)
+ {"/usr/bin/equery", "b"}, // Gentoo (slow)
+ {"/sbin/apk", "info", "-W"}, // Alpine
+ {"/usr/local/sbin/pkg", "which", "-q"}, // FreeBSD
+ }
+
+ for _, cmd := range packagers {
+ cmd = append(cmd, program)
+ if out := queryPackageVersion(cmd...); out != UnknownPackage {
+ return out
+ }
+ }
+ return UnknownPackage
+}
+
+// Note: This function is copied from containers/podman libpod/util.go
+// Please see https://github.com/containers/common/pull/1460
+func ProgramVersion(program string) (string, error) {
+ return programVersion(program, false)
+}
+
+func ProgramVersionDnsname(program string) (string, error) {
+ return programVersion(program, true)
+}
+
+func programVersion(program string, dnsname bool) (string, error) {
+ cmd := exec.Command(program, "--version")
+ var stdout bytes.Buffer
+ var stderr bytes.Buffer
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+
+ err := cmd.Run()
+ if err != nil {
+ return "", fmt.Errorf("`%v --version` failed: %v %v (%v)", program, stderr.String(), stdout.String(), err)
+ }
+
+ output := strings.TrimSuffix(stdout.String(), "\n")
+ // dnsname --version returns the information to stderr
+ if dnsname {
+ output = strings.TrimSuffix(stderr.String(), "\n")
+ }
+
+ return output, nil
+}
// StringInSlice determines if a string is in a string slice, returns bool
func StringInSlice(s string, sl []string) bool {
diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go
index 097a18855..41ea1b11b 100644
--- a/vendor/github.com/containers/image/v5/copy/multiple.go
+++ b/vendor/github.com/containers/image/v5/copy/multiple.go
@@ -12,11 +12,41 @@ import (
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/signature"
+ digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
+type instanceCopyKind int
+
+const (
+ instanceCopyCopy instanceCopyKind = iota
+ instanceCopyClone
+)
+
+type instanceCopy struct {
+ op instanceCopyKind
+ sourceDigest digest.Digest
+}
+
+// prepareInstanceCopies prepares a list of instances which needs to copied to the manifest list.
+func prepareInstanceCopies(instanceDigests []digest.Digest, options *Options) []instanceCopy {
+ res := []instanceCopy{}
+ for i, instanceDigest := range instanceDigests {
+ if options.ImageListSelection == CopySpecificImages &&
+ !slices.Contains(options.Instances, instanceDigest) {
+ logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
+ continue
+ }
+ res = append(res, instanceCopy{
+ op: instanceCopyCopy,
+ sourceDigest: instanceDigest,
+ })
+ }
+ return res
+}
+
// copyMultipleImages copies some or all of an image list's instances, using
// policyContext to validate source image admissibility.
func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) {
@@ -88,44 +118,35 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
// Copy each image, or just the ones we want to copy, in turn.
instanceDigests := updatedList.Instances()
- imagesToCopy := len(instanceDigests)
- if options.ImageListSelection == CopySpecificImages {
- imagesToCopy = len(options.Instances)
- }
- c.Printf("Copying %d of %d images in list\n", imagesToCopy, len(instanceDigests))
- updates := make([]manifest.ListUpdate, len(instanceDigests))
- instancesCopied := 0
- for i, instanceDigest := range instanceDigests {
- if options.ImageListSelection == CopySpecificImages &&
- !slices.Contains(options.Instances, instanceDigest) {
- update, err := updatedList.Instance(instanceDigest)
+ instanceEdits := []internalManifest.ListEdit{}
+ instanceCopyList := prepareInstanceCopies(instanceDigests, options)
+ c.Printf("Copying %d of %d images in list\n", len(instanceCopyList), len(instanceDigests))
+ for i, instance := range instanceCopyList {
+ // Update instances to be edited by their `ListOperation` and
+ // populate necessary fields.
+ switch instance.op {
+ case instanceCopyCopy:
+ logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
+ c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
+ unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
+ updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceCopyList[i].sourceDigest)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
}
- logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
- // Record the digest/size/type of the manifest that we didn't copy.
- updates[i] = update
- continue
- }
- logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
- c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy)
- unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest)
- updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest)
- if err != nil {
- return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", instancesCopied+1, imagesToCopy, err)
- }
- instancesCopied++
- // Record the result of a possible conversion here.
- update := manifest.ListUpdate{
- Digest: updatedManifestDigest,
- Size: int64(len(updatedManifest)),
- MediaType: updatedManifestType,
+ // Record the result of a possible conversion here.
+ instanceEdits = append(instanceEdits, internalManifest.ListEdit{
+ ListOperation: internalManifest.ListOpUpdate,
+ UpdateOldDigest: instance.sourceDigest,
+ UpdateDigest: updatedManifestDigest,
+ UpdateSize: int64(len(updatedManifest)),
+ UpdateMediaType: updatedManifestType})
+ default:
+ return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op)
}
- updates[i] = update
}
// Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
- if err = updatedList.UpdateInstances(updates); err != nil {
+ if err = updatedList.EditInstances(instanceEdits); err != nil {
return nil, fmt.Errorf("updating manifest list: %w", err)
}
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
index e98c5c99e..516ca7ac9 100644
--- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
+++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
@@ -69,27 +69,71 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
-func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
- if len(updates) != len(list.Manifests) {
- return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
+func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
+ editInstances := []ListEdit{}
+ for i, instance := range updates {
+ editInstances = append(editInstances, ListEdit{
+ UpdateOldDigest: index.Manifests[i].Digest,
+ UpdateDigest: instance.Digest,
+ UpdateSize: instance.Size,
+ UpdateMediaType: instance.MediaType,
+ ListOperation: ListOpUpdate})
}
- for i := range updates {
- if err := updates[i].Digest.Validate(); err != nil {
- return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
- }
- list.Manifests[i].Digest = updates[i].Digest
- if updates[i].Size < 0 {
- return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
- }
- list.Manifests[i].Size = updates[i].Size
- if updates[i].MediaType == "" {
- return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
+ return index.editInstances(editInstances)
+}
+
+func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
+ addedEntries := []Schema2ManifestDescriptor{}
+ for i, editInstance := range editInstances {
+ switch editInstance.ListOperation {
+ case ListOpUpdate:
+ if err := editInstance.UpdateOldDigest.Validate(); err != nil {
+ return fmt.Errorf("Schema2List.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err)
+ }
+ if err := editInstance.UpdateDigest.Validate(); err != nil {
+ return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
+ }
+ targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool {
+ return m.Digest == editInstance.UpdateOldDigest
+ })
+ if targetIndex == -1 {
+ return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
+ }
+ index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
+ if editInstance.UpdateSize < 0 {
+ return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
+ }
+ index.Manifests[targetIndex].Size = editInstance.UpdateSize
+ if editInstance.UpdateMediaType == "" {
+ return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
+ }
+ index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
+ case ListOpAdd:
+ addInstance := Schema2ManifestDescriptor{
+ Schema2Descriptor{Digest: editInstance.AddDigest, Size: editInstance.AddSize, MediaType: editInstance.AddMediaType},
+ Schema2PlatformSpec{
+ OS: editInstance.AddPlatform.OS,
+ Architecture: editInstance.AddPlatform.Architecture,
+ OSVersion: editInstance.AddPlatform.OSVersion,
+ OSFeatures: editInstance.AddPlatform.OSFeatures,
+ Variant: editInstance.AddPlatform.Variant,
+ },
+ }
+ addedEntries = append(addedEntries, addInstance)
+ default:
+ return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
}
- list.Manifests[i].MediaType = updates[i].MediaType
+ }
+ if len(addedEntries) != 0 {
+ index.Manifests = append(index.Manifests, addedEntries...)
}
return nil
}
+func (index *Schema2List) EditInstances(editInstances []ListEdit) error {
+ return index.editInstances(editInstances)
+}
+
func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
// ChooseInstanceByCompression is same as ChooseInstance for schema2 manifest list.
return list.ChooseInstance(ctx)
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/list.go b/vendor/github.com/containers/image/v5/internal/manifest/list.go
index 07c7d85f4..3eae3a304 100644
--- a/vendor/github.com/containers/image/v5/internal/manifest/list.go
+++ b/vendor/github.com/containers/image/v5/internal/manifest/list.go
@@ -55,6 +55,10 @@ type List interface {
// SystemContext ( or for the current platform if the SystemContext doesn't specify any detail ) and preferGzip for compression which
// when configured to OptionalBoolTrue and chooses best available compression when it is OptionalBoolFalse or left OptionalBoolUndefined.
ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error)
+ // Edit information about the list's instances. Contains Slice of ListEdit where each element
+ // is responsible for either Modifying or Adding a new instance to the Manifest. Operation is
+ // selected on the basis of configured ListOperation field.
+ EditInstances([]ListEdit) error
}
// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
@@ -65,6 +69,32 @@ type ListUpdate struct {
MediaType string
}
+type ListOp int
+
+const (
+ listOpInvalid ListOp = iota
+ ListOpAdd
+ ListOpUpdate
+)
+
+// ListEdit includes the fields which a List's EditInstances() method will modify.
+type ListEdit struct {
+ ListOperation ListOp
+
+ // if Op == ListEditUpdate (basically the previous UpdateInstances). All fields must be set.
+ UpdateOldDigest digest.Digest
+ UpdateDigest digest.Digest
+ UpdateSize int64
+ UpdateMediaType string
+
+ // If Op = ListEditAdd. All fields must be set.
+ AddDigest digest.Digest
+ AddSize int64
+ AddMediaType string
+ AddPlatform *imgspecv1.Platform
+ AddAnnotations map[string]string
+}
+
// ListPublicFromBlob parses a list of manifests.
// This is publicly visible as c/image/manifest.ListFromBlob.
func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) {
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
index 8e911678e..6a16c5929 100644
--- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
+++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
@@ -64,26 +64,68 @@ func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error {
- if len(updates) != len(index.Manifests) {
- return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
+ editInstances := []ListEdit{}
+ for i, instance := range updates {
+ editInstances = append(editInstances, ListEdit{
+ UpdateOldDigest: index.Manifests[i].Digest,
+ UpdateDigest: instance.Digest,
+ UpdateSize: instance.Size,
+ UpdateMediaType: instance.MediaType,
+ ListOperation: ListOpUpdate})
}
- for i := range updates {
- if err := updates[i].Digest.Validate(); err != nil {
- return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
- }
- index.Manifests[i].Digest = updates[i].Digest
- if updates[i].Size < 0 {
- return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
- }
- index.Manifests[i].Size = updates[i].Size
- if updates[i].MediaType == "" {
- return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
+ return index.editInstances(editInstances)
+}
+
+func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
+ addedEntries := []imgspecv1.Descriptor{}
+ for i, editInstance := range editInstances {
+ switch editInstance.ListOperation {
+ case ListOpUpdate:
+ if err := editInstance.UpdateOldDigest.Validate(); err != nil {
+ return fmt.Errorf("OCI1Index.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err)
+ }
+ if err := editInstance.UpdateDigest.Validate(); err != nil {
+ return fmt.Errorf("OCI1Index.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
+ }
+ targetIndex := slices.IndexFunc(index.Manifests, func(m imgspecv1.Descriptor) bool {
+ return m.Digest == editInstance.UpdateOldDigest
+ })
+ if targetIndex == -1 {
+ return fmt.Errorf("OCI1Index.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
+ }
+ index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
+ if editInstance.UpdateSize < 0 {
+ return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
+ }
+ index.Manifests[targetIndex].Size = editInstance.UpdateSize
+ if editInstance.UpdateMediaType == "" {
+ return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
+ }
+ index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
+ case ListOpAdd:
+ addedEntries = append(addedEntries, imgspecv1.Descriptor{
+ MediaType: editInstance.AddMediaType,
+ Size: editInstance.AddSize,
+ Digest: editInstance.AddDigest,
+ Platform: editInstance.AddPlatform,
+ Annotations: editInstance.AddAnnotations})
+ default:
+ return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
}
- index.Manifests[i].MediaType = updates[i].MediaType
+ }
+ if len(addedEntries) != 0 {
+ index.Manifests = append(index.Manifests, addedEntries...)
+ slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) bool {
+ return !instanceIsZstd(a) && instanceIsZstd(b)
+ })
}
return nil
}
+func (index *OCI1Index) EditInstances(editInstances []ListEdit) error {
+ return index.editInstances(editInstances)
+}
+
// instanceIsZstd returns true if instance is a zstd instance otherwise false.
func instanceIsZstd(manifest imgspecv1.Descriptor) bool {
if value, ok := manifest.Annotations[OCI1InstanceAnnotationCompressionZSTD]; ok && value == "true" {
@@ -131,24 +173,20 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
for manifestIndex, d := range index.Manifests {
candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest}
if d.Platform != nil {
- foundPlatform := false
- for platformIndex, wantedPlatform := range wantedPlatforms {
- imagePlatform := imgspecv1.Platform{
- Architecture: d.Platform.Architecture,
- OS: d.Platform.OS,
- OSVersion: d.Platform.OSVersion,
- OSFeatures: slices.Clone(d.Platform.OSFeatures),
- Variant: d.Platform.Variant,
- }
- if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
- foundPlatform = true
- candidate.platformIndex = platformIndex
- break
- }
+ imagePlatform := imgspecv1.Platform{
+ Architecture: d.Platform.Architecture,
+ OS: d.Platform.OS,
+ OSVersion: d.Platform.OSVersion,
+ OSFeatures: slices.Clone(d.Platform.OSFeatures),
+ Variant: d.Platform.Variant,
}
- if !foundPlatform {
+ platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool {
+ return platform.MatchesPlatform(imagePlatform, wantedPlatform)
+ })
+ if platformIndex == -1 {
continue
}
+ candidate.platformIndex = platformIndex
}
if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) {
bestMatch = &candidate
diff --git a/vendor/github.com/containers/image/v5/internal/set/set.go b/vendor/github.com/containers/image/v5/internal/set/set.go
index 5c7bcabef..3e777fe12 100644
--- a/vendor/github.com/containers/image/v5/internal/set/set.go
+++ b/vendor/github.com/containers/image/v5/internal/set/set.go
@@ -24,11 +24,11 @@ func NewWithValues[E comparable](values ...E) *Set[E] {
return s
}
-func (s Set[E]) Add(v E) {
+func (s *Set[E]) Add(v E) {
s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
}
-func (s Set[E]) Delete(v E) {
+func (s *Set[E]) Delete(v E) {
delete(s.m, v)
}
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
index 0e3003cec..2e79d0ffb 100644
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
@@ -48,9 +48,9 @@ var (
ErrNotSupported = errors.New("not supported")
)
-// authPath combines a path to a file with container registry access keys,
-// along with expected properties of that path (currently just whether it's)
-// legacy format or not.
+// authPath combines a path to a file with container registry credentials,
+// along with expected properties of that path (currently just whether it's
+// legacy format or not).
type authPath struct {
path string
legacyFormat bool
@@ -87,12 +87,12 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
switch helper {
// Special-case the built-in helpers for auth files.
case sysregistriesv2.AuthenticationFileHelper:
- desc, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) {
- if ch, exists := auths.CredHelpers[key]; exists {
+ desc, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
+ if ch, exists := fileContents.CredHelpers[key]; exists {
if isNamespaced {
return false, "", unsupportedNamespaceErr(ch)
}
- desc, err := setAuthToCredHelper(ch, key, username, password)
+ desc, err := setCredsInCredHelper(ch, key, username, password)
if err != nil {
return false, "", err
}
@@ -100,7 +100,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
}
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
newCreds := dockerAuthConfig{Auth: creds}
- auths.AuthConfigs[key] = newCreds
+ fileContents.AuthConfigs[key] = newCreds
return true, "", nil
})
// External helpers.
@@ -108,7 +108,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
if isNamespaced {
err = unsupportedNamespaceErr(helper)
} else {
- desc, err = setAuthToCredHelper(helper, key, username, password)
+ desc, err = setCredsInCredHelper(helper, key, username, password)
}
}
if err != nil {
@@ -156,17 +156,17 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
case sysregistriesv2.AuthenticationFileHelper:
for _, path := range getAuthFilePaths(sys, homedir.Get()) {
// parse returns an empty map in case the path doesn't exist.
- auths, err := path.parse()
+ fileContents, err := path.parse()
if err != nil {
return nil, fmt.Errorf("reading JSON file %q: %w", path.path, err)
}
// Credential helpers in the auth file have a
// direct mapping to a registry, so we can just
// walk the map.
- for registry := range auths.CredHelpers {
+ for registry := range fileContents.CredHelpers {
allKeys.Add(registry)
}
- for key := range auths.AuthConfigs {
+ for key := range fileContents.AuthConfigs {
key := normalizeAuthFileKey(key, path.legacyFormat)
if key == normalizedDockerIORegistry {
key = "docker.io"
@@ -176,7 +176,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
}
// External helpers.
default:
- creds, err := listAuthsFromCredHelper(helper)
+ creds, err := listCredsInCredHelper(helper)
if err != nil {
logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err)
if errors.Is(err, exec.ErrNotFound) {
@@ -193,19 +193,19 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// Now use `GetCredentials` to the specific auth configs for each
// previously listed registry.
- authConfigs := make(map[string]types.DockerAuthConfig)
+ allCreds := make(map[string]types.DockerAuthConfig)
for _, key := range allKeys.Values() {
- authConf, err := GetCredentials(sys, key)
+ creds, err := GetCredentials(sys, key)
if err != nil {
// Note: we rely on the logging in `GetCredentials`.
return nil, err
}
- if authConf != (types.DockerAuthConfig{}) {
- authConfigs[key] = authConf
+ if creds != (types.DockerAuthConfig{}) {
+ allCreds[key] = creds
}
}
- return authConfigs, nil
+ return allCreds, nil
}
// getAuthFilePaths returns a slice of authPaths based on the system context
@@ -285,13 +285,13 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
// Anonymous function to query credentials from auth files.
getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) {
for _, path := range getAuthFilePaths(sys, homeDir) {
- authConfig, err := findCredentialsInFile(key, registry, path)
+ creds, err := findCredentialsInFile(key, registry, path)
if err != nil {
return types.DockerAuthConfig{}, "", err
}
- if authConfig != (types.DockerAuthConfig{}) {
- return authConfig, path.path, nil
+ if creds != (types.DockerAuthConfig{}) {
+ return creds, path.path, nil
}
}
return types.DockerAuthConfig{}, "", nil
@@ -320,7 +320,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
// This intentionally uses "registry", not "key"; we don't support namespaced
// credentials in helpers, but a "registry" is a valid parent of "key".
helperKey = registry
- creds, err = getAuthFromCredHelper(helper, registry)
+ creds, err = getCredsFromCredHelper(helper, registry)
}
if err != nil {
logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err)
@@ -360,14 +360,14 @@ func GetAuthentication(sys *types.SystemContext, key string) (string, string, er
// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication,
// it exists only to allow testing it with an artificial home directory.
func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) {
- auth, err := getCredentialsWithHomeDir(sys, key, homeDir)
+ creds, err := getCredentialsWithHomeDir(sys, key, homeDir)
if err != nil {
return "", "", err
}
- if auth.IdentityToken != "" {
+ if creds.IdentityToken != "" {
return "", "", fmt.Errorf("non-empty identity token found and this API doesn't support it: %w", ErrNotSupported)
}
- return auth.Username, auth.Password, nil
+ return creds.Username, creds.Password, nil
}
// RemoveAuthentication removes credentials for `key` from all possible
@@ -393,7 +393,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper)
return
}
- err := deleteAuthFromCredHelper(helper, key)
+ err := deleteCredsFromCredHelper(helper, key)
if err == nil {
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
isLoggedIn = true
@@ -411,13 +411,13 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
switch helper {
// Special-case the built-in helper for auth files.
case sysregistriesv2.AuthenticationFileHelper:
- _, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) {
- if innerHelper, exists := auths.CredHelpers[key]; exists {
+ _, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
+ if innerHelper, exists := fileContents.CredHelpers[key]; exists {
removeFromCredHelper(innerHelper)
}
- if _, ok := auths.AuthConfigs[key]; ok {
+ if _, ok := fileContents.AuthConfigs[key]; ok {
isLoggedIn = true
- delete(auths.AuthConfigs, key)
+ delete(fileContents.AuthConfigs, key)
}
return true, "", multiErr
})
@@ -454,23 +454,23 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
switch helper {
// Special-case the built-in helper for auth files.
case sysregistriesv2.AuthenticationFileHelper:
- _, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) {
- for registry, helper := range auths.CredHelpers {
+ _, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
+ for registry, helper := range fileContents.CredHelpers {
// Helpers in auth files are expected
// to exist, so no special treatment
// for them.
- if err := deleteAuthFromCredHelper(helper, registry); err != nil {
+ if err := deleteCredsFromCredHelper(helper, registry); err != nil {
return false, "", err
}
}
- auths.CredHelpers = make(map[string]string)
- auths.AuthConfigs = make(map[string]dockerAuthConfig)
+ fileContents.CredHelpers = make(map[string]string)
+ fileContents.AuthConfigs = make(map[string]dockerAuthConfig)
return true, "", nil
})
// External helpers.
default:
var creds map[string]string
- creds, err = listAuthsFromCredHelper(helper)
+ creds, err = listCredsInCredHelper(helper)
if err != nil {
if errors.Is(err, exec.ErrNotFound) {
// It's okay if the helper doesn't exist.
@@ -480,7 +480,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
}
}
for registry := range creds {
- err = deleteAuthFromCredHelper(helper, registry)
+ err = deleteCredsFromCredHelper(helper, registry)
if err != nil {
break
}
@@ -497,7 +497,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
return multiErr
}
-func listAuthsFromCredHelper(credHelper string) (map[string]string, error) {
+func listCredsInCredHelper(credHelper string) (map[string]string, error) {
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
p := helperclient.NewShellProgramFunc(helperName)
return helperclient.List(p)
@@ -543,40 +543,40 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool,
return newAuthPathDefault(fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil
}
-// parse unmarshals the authentications stored in the auth.json file and returns it
+// parse unmarshals the credentials stored in the auth.json file and returns it
// or returns an empty dockerConfigFile data structure if auth.json does not exist
// if the file exists and is empty, this function returns an error.
func (path authPath) parse() (dockerConfigFile, error) {
- var auths dockerConfigFile
+ var fileContents dockerConfigFile
raw, err := os.ReadFile(path.path)
if err != nil {
if os.IsNotExist(err) {
- auths.AuthConfigs = map[string]dockerAuthConfig{}
- return auths, nil
+ fileContents.AuthConfigs = map[string]dockerAuthConfig{}
+ return fileContents, nil
}
return dockerConfigFile{}, err
}
if path.legacyFormat {
- if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil {
+ if err = json.Unmarshal(raw, &fileContents.AuthConfigs); err != nil {
return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err)
}
- return auths, nil
+ return fileContents, nil
}
- if err = json.Unmarshal(raw, &auths); err != nil {
+ if err = json.Unmarshal(raw, &fileContents); err != nil {
return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err)
}
- if auths.AuthConfigs == nil {
- auths.AuthConfigs = map[string]dockerAuthConfig{}
+ if fileContents.AuthConfigs == nil {
+ fileContents.AuthConfigs = map[string]dockerAuthConfig{}
}
- if auths.CredHelpers == nil {
- auths.CredHelpers = make(map[string]string)
+ if fileContents.CredHelpers == nil {
+ fileContents.CredHelpers = make(map[string]string)
}
- return auths, nil
+ return fileContents, nil
}
// modifyJSON finds an auth.json file, calls editor on the contents, and
@@ -585,7 +585,7 @@ func (path authPath) parse() (dockerConfigFile, error) {
//
// The editor may also return a human-readable description of the updated location; if it is "",
// the file itself is used.
-func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, string, error)) (string, error) {
+func modifyJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) {
path, _, err := getPathToAuth(sys)
if err != nil {
return "", err
@@ -599,17 +599,17 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
return "", err
}
- auths, err := path.parse()
+ fileContents, err := path.parse()
if err != nil {
return "", fmt.Errorf("reading JSON file %q: %w", path.path, err)
}
- updated, description, err := editor(&auths)
+ updated, description, err := editor(&fileContents)
if err != nil {
return "", fmt.Errorf("updating %q: %w", path.path, err)
}
if updated {
- newData, err := json.MarshalIndent(auths, "", "\t")
+ newData, err := json.MarshalIndent(fileContents, "", "\t")
if err != nil {
return "", fmt.Errorf("marshaling JSON %q: %w", path.path, err)
}
@@ -625,7 +625,7 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
return description, nil
}
-func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
+func getCredsFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
p := helperclient.NewShellProgramFunc(helperName)
creds, err := helperclient.Get(p, registry)
@@ -650,9 +650,9 @@ func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig,
}
}
-// setAuthToCredHelper stores (username, password) for registry in credHelper.
+// setCredsInCredHelper stores (username, password) for registry in credHelper.
// Returns a human-readable description of the destination, to be returned by SetCredentials.
-func setAuthToCredHelper(credHelper, registry, username, password string) (string, error) {
+func setCredsInCredHelper(credHelper, registry, username, password string) (string, error) {
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
p := helperclient.NewShellProgramFunc(helperName)
creds := &credentials.Credentials{
@@ -666,7 +666,7 @@ func setAuthToCredHelper(credHelper, registry, username, password string) (strin
return fmt.Sprintf("credential helper: %s", credHelper), nil
}
-func deleteAuthFromCredHelper(credHelper, registry string) error {
+func deleteCredsFromCredHelper(credHelper, registry string) error {
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
p := helperclient.NewShellProgramFunc(helperName)
return helperclient.Erase(p, registry)
@@ -675,7 +675,7 @@ func deleteAuthFromCredHelper(credHelper, registry string) error {
// findCredentialsInFile looks for credentials matching "key"
// (which is "registry" or a namespace in "registry") in "path".
func findCredentialsInFile(key, registry string, path authPath) (types.DockerAuthConfig, error) {
- auths, err := path.parse()
+ fileContents, err := path.parse()
if err != nil {
return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path.path, err)
}
@@ -683,9 +683,9 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut
// First try cred helpers. They should always be normalized.
// This intentionally uses "registry", not "key"; we don't support namespaced
// credentials in helpers.
- if ch, exists := auths.CredHelpers[registry]; exists {
+ if ch, exists := fileContents.CredHelpers[registry]; exists {
logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path.path)
- return getAuthFromCredHelper(ch, registry)
+ return getCredsFromCredHelper(ch, registry)
}
// Support sub-registry namespaces in auth.
@@ -701,7 +701,7 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut
// Repo or namespace keys are only supported as exact matches. For registry
// keys we prefer exact matches as well.
for _, key := range keys {
- if val, exists := auths.AuthConfigs[key]; exists {
+ if val, exists := fileContents.AuthConfigs[key]; exists {
return decodeDockerAuth(path.path, key, val)
}
}
@@ -715,7 +715,7 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut
// The docker.io registry still uses the /v1/ key with a special host name,
// so account for that as well.
registry = normalizeRegistry(registry)
- for k, v := range auths.AuthConfigs {
+ for k, v := range fileContents.AuthConfigs {
if normalizeAuthFileKey(k, path.legacyFormat) == registry {
return decodeDockerAuth(path.path, k, v)
}
diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go
index 6ea414b86..33adb5f1d 100644
--- a/vendor/github.com/containers/image/v5/types/types.go
+++ b/vendor/github.com/containers/image/v5/types/types.go
@@ -585,9 +585,9 @@ type SystemContext struct {
// resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this
// specific context.
PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool
- // If not "", overrides the default path for the authentication file, but only new format files
+ // If not "", overrides the default path for the registry authentication file, but only new format files
AuthFilePath string
- // if not "", overrides the default path for the authentication file, but with the legacy format;
+ // if not "", overrides the default path for the registry authentication file, but with the legacy format;
// the code currently will by default look for legacy format files like .dockercfg in the $HOME dir;
// but in addition to the home dir, openshift may mount .dockercfg files (via secret mount)
// in locations other than the home dir; openshift components should then set this field in those cases;
diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml
index 4e3ba7317..85a3770c1 100644
--- a/vendor/github.com/containers/storage/.cirrus.yml
+++ b/vendor/github.com/containers/storage/.cirrus.yml
@@ -17,13 +17,13 @@ env:
####
#### Cache-image names to test with (double-quotes around names are critical)
###
- FEDORA_NAME: "fedora-37"
+ FEDORA_NAME: "fedora-38"
DEBIAN_NAME: "debian-12"
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
- IMAGE_SUFFIX: "c20230405t152256z-f37f36d12"
+ IMAGE_SUFFIX: "c20230517t144652z-f38f37d12"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@@ -56,7 +56,6 @@ gce_instance:
linux_testing: &linux_testing
depends_on:
- lint
- only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
gce_instance: # Only need to specify differences from defaults (above)
image_name: "${VM_IMAGE}"
@@ -127,10 +126,12 @@ lint_task:
fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod
build_script: |
- echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/backports.list
apt-get update
apt-get install -y libbtrfs-dev libdevmapper-dev
- test_script: make TAGS=regex_precompile local-validate && make lint && make clean
+ test_script: |
+ make TAGS=regex_precompile local-validate
+ make lint
+ make clean
# Update metadata on VM images referenced by this repository state
@@ -168,7 +169,7 @@ vendor_task:
cross_task:
container:
- image: golang:1.17
+ image: golang:1.19
build_script: make cross
@@ -182,6 +183,6 @@ success_task:
- vendor
- cross
container:
- image: golang:1.17
+ image: golang:1.19
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
script: /bin/true
diff --git a/vendor/github.com/containers/storage/.golangci.yml b/vendor/github.com/containers/storage/.golangci.yml
index 755aa35c0..20968466c 100644
--- a/vendor/github.com/containers/storage/.golangci.yml
+++ b/vendor/github.com/containers/storage/.golangci.yml
@@ -4,68 +4,8 @@ run:
deadline: 5m
skip-dirs-use-default: true
linters:
- enable-all: true
+ enable:
+ - gofumpt
disable:
- - cyclop
- - deadcode
- - dogsled
- - dupl
- errcheck
- - errname
- - errorlint
- - exhaustive
- - exhaustivestruct
- - exhaustruct
- - forbidigo
- - forcetypeassert
- - funlen
- - gci
- - gochecknoglobals
- - gochecknoinits
- - gocognit
- - gocritic
- - gocyclo
- - godot
- - godox
- - goerr113
- - gofumpt
- - golint
- - gomnd
- - gosec
- - gosimple
- - govet
- - ifshort
- - ineffassign
- - interfacer
- - interfacebloat
- - ireturn
- - lll
- - maintidx
- - maligned
- - misspell
- - musttag
- - nakedret
- - nestif
- - nlreturn
- - nolintlint
- - nonamedreturns
- - nosnakecase
- - paralleltest
- - prealloc
- - predeclared
- - rowserrcheck
- - scopelint
- staticcheck
- - structcheck
- - stylecheck
- - tagliatelle
- - testpackage
- - thelper
- - unconvert
- - unparam
- - varcheck
- - varnamelen
- - wastedassign
- - whitespace
- - wrapcheck
- - wsl
diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile
index 52266da0f..6cb354c2c 100644
--- a/vendor/github.com/containers/storage/Makefile
+++ b/vendor/github.com/containers/storage/Makefile
@@ -1,13 +1,18 @@
-export GO111MODULE=off
-export GOPROXY=https://proxy.golang.org
-
.PHONY: \
all \
+ binary \
clean \
+ codespell \
+ containers-storage \
+ cross \
default \
docs \
+ gccgo \
help \
+ install \
+ install.docs \
install.tools \
+ lint \
local-binary \
local-cross \
local-gccgo \
@@ -15,33 +20,25 @@ export GOPROXY=https://proxy.golang.org
local-test-integration \
local-test-unit \
local-validate \
- lint \
- vendor
+ test-integration \
+ test-unit \
+ validate \
+ vendor \
+ vendor-in-container
-PACKAGE := github.com/containers/storage
-GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
-GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
-EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73
NATIVETAGS :=
AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh)
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
GO ?= go
TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race)
-# Go module support: set `-mod=vendor` to use the vendored sources
-ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true)
- GO:=GO111MODULE=on $(GO)
- MOD_VENDOR=-mod=vendor
-endif
-
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs
clean: ## remove all built files
$(RM) -f containers-storage containers-storage.* docs/*.1 docs/*.5
-sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.go internal/*/*.go pkg/*/*.go pkg/*/*/*.go types/*.go)
-containers-storage: $(sources) ## build using gc on the host
- $(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
+containers-storage: ## build using gc on the host
+ $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
codespell:
codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L worl,flate,uint,iff,od,ERRO -w
@@ -49,15 +46,15 @@ codespell:
binary local-binary: containers-storage
local-gccgo gccgo: ## build using gccgo on the host
- GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
+ GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
local-cross cross: ## cross build the binaries for arm, darwin, and freebsd
@for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \
os=`echo $${target} | cut -f1 -d/` ; \
arch=`echo $${target} | cut -f2 -d/` ; \
suffix=$${os}.$${arch} ; \
- echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \
- env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \
+ echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \
+ env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \
done
docs: install.tools ## build the docs on the host
@@ -66,21 +63,17 @@ docs: install.tools ## build the docs on the host
local-test: local-binary local-test-unit local-test-integration ## build the binaries and run the tests
local-test-unit test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges)
- @$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
+ @$(GO) test -count 1 $(BUILDFLAGS) $(TESTFLAGS) ./...
local-test-integration test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges)
@cd tests; ./test_runner.bash
-local-validate validate: install.tools ## validate DCO and gofmt on the host
+local-validate validate: install.tools ## validate DCO on the host
@./hack/git-validation.sh
- @./hack/gofmt.sh
install.tools:
$(MAKE) -C tests/tools
-$(FFJSON):
- $(MAKE) -C tests/tools
-
install.docs: docs
$(MAKE) -C docs install
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index aa3ed3a5e..9dccaf226 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.46.1
+1.47.0-dev
diff --git a/vendor/github.com/containers/storage/check.go b/vendor/github.com/containers/storage/check.go
new file mode 100644
index 000000000..81b5c3ab8
--- /dev/null
+++ b/vendor/github.com/containers/storage/check.go
@@ -0,0 +1,1086 @@
+package storage
+
+import (
+ "archive/tar"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ drivers "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/types"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // ErrLayerUnaccounted describes a layer that is present in the lower-level storage driver,
+ // but which is not known to or managed by the higher-level driver-agnostic logic.
+ ErrLayerUnaccounted = types.ErrLayerUnaccounted
+ // ErrLayerUnreferenced describes a layer which is not used by any image or container.
+ ErrLayerUnreferenced = types.ErrLayerUnreferenced
+ // ErrLayerIncorrectContentDigest describes a layer for which the contents of one or more
+ // files which were added in the layer appear to have changed. It may instead look like an
+ // unnamed "file integrity checksum failed" error.
+ ErrLayerIncorrectContentDigest = types.ErrLayerIncorrectContentDigest
+ // ErrLayerIncorrectContentSize describes a layer for which regenerating the diff that was
+ // used to populate the layer produced a diff of a different size. We check the digest
+ // first, so it's highly unlikely you'll ever see this error.
+ ErrLayerIncorrectContentSize = types.ErrLayerIncorrectContentSize
+ // ErrLayerContentModified describes a layer which contains contents which should not be
+ // there, or for which ownership/permissions/dates have been changed.
+ ErrLayerContentModified = types.ErrLayerContentModified
+ // ErrLayerDataMissing describes a layer which is missing a big data item.
+ ErrLayerDataMissing = types.ErrLayerDataMissing
+ // ErrLayerMissing describes a layer which is the missing parent of a layer.
+ ErrLayerMissing = types.ErrLayerMissing
+ // ErrImageLayerMissing describes an image which claims to have a layer that we don't know
+ // about.
+ ErrImageLayerMissing = types.ErrImageLayerMissing
+ // ErrImageDataMissing describes an image which is missing a big data item.
+ ErrImageDataMissing = types.ErrImageDataMissing
+ // ErrImageDataIncorrectSize describes an image which has a big data item which looks like
+ // its size has changed, likely because it's been modified somehow.
+ ErrImageDataIncorrectSize = types.ErrImageDataIncorrectSize
+ // ErrContainerImageMissing describes a container which claims to be based on an image that
+ // we don't know about.
+ ErrContainerImageMissing = types.ErrContainerImageMissing
+ // ErrContainerDataMissing describes a container which is missing a big data item.
+ ErrContainerDataMissing = types.ErrContainerDataMissing
+ // ErrContainerDataIncorrectSize describes a container which has a big data item which looks
+ // like its size has changed, likely because it's been modified somehow.
+ ErrContainerDataIncorrectSize = types.ErrContainerDataIncorrectSize
+)
+
+const (
+ defaultMaximumUnreferencedLayerAge = 24 * time.Hour
+)
+
+// CheckOptions is the set of options for Check(), specifying which tests to perform.
+type CheckOptions struct {
+ LayerUnreferencedMaximumAge *time.Duration // maximum allowed age of unreferenced layers
+ LayerDigests bool // check that contents of image layer diffs can still be reconstructed
+ LayerMountable bool // check that layers are mountable
+ LayerContents bool // check that contents of image layers match their diffs, with no unexpected changes, requires LayerMountable
+ LayerData bool // check that associated "big" data items are present and can be read
+ ImageData bool // check that associated "big" data items are present, can be read, and match the recorded size
+ ContainerData bool // check that associated "big" data items are present and can be read
+}
+
+// CheckMost returns a CheckOptions with mostly just "quick" checks enabled.
+func CheckMost() *CheckOptions {
+ return &CheckOptions{
+ LayerDigests: true,
+ LayerMountable: true,
+ LayerContents: false,
+ LayerData: true,
+ ImageData: true,
+ ContainerData: true,
+ }
+}
+
+// CheckEverything returns a CheckOptions with every check enabled.
+func CheckEverything() *CheckOptions {
+ return &CheckOptions{
+ LayerDigests: true,
+ LayerMountable: true,
+ LayerContents: true,
+ LayerData: true,
+ ImageData: true,
+ ContainerData: true,
+ }
+}
+
+// CheckReport is a list of detected problems.
+type CheckReport struct {
+ Layers map[string][]error // damaged read-write layers
+ ROLayers map[string][]error // damaged read-only layers
+ layerParentsByLayerID map[string]string
+ layerOrder map[string]int
+ Images map[string][]error // damaged read-write images (including those with damaged layers)
+ ROImages map[string][]error // damaged read-only images (including those with damaged layers)
+ Containers map[string][]error // damaged containers (including those based on damaged images)
+}
+
+// RepairOptions is the set of options for Repair().
+type RepairOptions struct {
+ RemoveContainers bool // Remove damaged containers
+}
+
+// RepairEverything returns a RepairOptions with every optional remediation
+// enabled.
+func RepairEverything() *RepairOptions {
+ return &RepairOptions{
+ RemoveContainers: true,
+ }
+}
+
+// Check returns a list of problems with what's in the store, as a whole. It can be very expensive
+// to call.
+func (s *store) Check(options *CheckOptions) (CheckReport, error) {
+ var ignoreChownErrors bool
+ for _, o := range s.graphOptions {
+ if strings.Contains(o, "ignore_chown_errors") {
+ ignoreChownErrors = true
+ }
+ }
+
+ if options == nil {
+ options = CheckMost()
+ }
+
+ report := CheckReport{
+ Layers: make(map[string][]error),
+ ROLayers: make(map[string][]error),
+ layerParentsByLayerID: make(map[string]string), // layers ID -> their parent's ID, if there is one
+ layerOrder: make(map[string]int), // layers ID -> order for removal, if we needed to remove them all
+ Images: make(map[string][]error),
+ ROImages: make(map[string][]error),
+ Containers: make(map[string][]error),
+ }
+
+ // This map will track known layer IDs. If we have multiple stores, read-only ones can
+ // contain copies of layers that are in the read-write store, but we'll only ever be
+ // mounting or extracting contents from the read-write versions, since we always search it
+ // first. The boolean will track if the layer is referenced by at least one image or
+ // container.
+ referencedLayers := make(map[string]bool)
+ referencedROLayers := make(map[string]bool)
+
+ // This map caches the headers for items included in layer diffs.
+ diffHeadersByLayer := make(map[string][]*tar.Header)
+ var diffHeadersByLayerMutex sync.Mutex
+
+ // Walk the list of layer stores, looking at each layer that we didn't see in a
+ // previously-visited store.
+ if _, _, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) {
+ layers, err := store.Layers()
+ if err != nil {
+ return struct{}{}, true, err
+ }
+ isReadWrite := roLayerStoreIsReallyReadWrite(store)
+ readWriteDesc := ""
+ if !isReadWrite {
+ readWriteDesc = "read-only "
+ }
+ // Examine each layer in turn.
+ for i := range layers {
+ layer := layers[i]
+ id := layer.ID
+ // If we've already seen a layer with this ID, no need to process it again.
+ if _, checked := referencedLayers[id]; checked {
+ continue
+ }
+ if _, checked := referencedROLayers[id]; checked {
+ continue
+ }
+ // Note the parent of this layer, and add it to the map of known layers so
+ // that we know that we've visited it, but we haven't confirmed that it's
+ // used by anything.
+ report.layerParentsByLayerID[id] = layer.Parent
+ if isReadWrite {
+ referencedLayers[id] = false
+ } else {
+ referencedROLayers[id] = false
+ }
+ logrus.Debugf("checking %slayer %s", readWriteDesc, id)
+ // Check that all of the big data items are present and can be read. We
+ // have no digest or size information to compare the contents to (grumble),
+ // so we can't verify that the contents haven't been changed since they
+ // were stored.
+ if options.LayerData {
+ for _, name := range layer.BigDataNames {
+ func() {
+ rc, err := store.BigData(id, name)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ err := fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, ErrLayerDataMissing)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ err = fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, err)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ defer rc.Close()
+ if _, err = io.Copy(io.Discard, rc); err != nil {
+ err = fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, err)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ }()
+ }
+ }
+ // Check that the content we get back when extracting the layer's contents
+ // match the recorded digest and size. A layer for which they're not given
+ // isn't a part of an image, and is likely the read-write layer for a
+ // container, and we can't vouch for the integrity of its contents.
+ // For each layer with known contents, record the headers for the layer's
+ // diff, which we can use to reconstruct the expected contents for the tree
+ // we see when the layer is mounted.
+ if options.LayerDigests && layer.UncompressedDigest != "" {
+ func() {
+ expectedDigest := layer.UncompressedDigest
+ // Double-check that the digest isn't invalid somehow.
+ if err := layer.UncompressedDigest.Validate(); err != nil {
+ err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ // Extract the diff.
+ uncompressed := archive.Uncompressed
+ diffOptions := DiffOptions{
+ Compression: &uncompressed,
+ }
+ diff, err := store.Diff("", id, &diffOptions)
+ if err != nil {
+ err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ // Digest and count the length of the diff.
+ digester := expectedDigest.Algorithm().Digester()
+ counter := ioutils.NewWriteCounter(digester.Hash())
+ reader := io.TeeReader(diff, counter)
+ var wg sync.WaitGroup
+ var archiveErr error
+ wg.Add(1)
+ go func(layerID string, diffReader io.Reader) {
+ // Read the diff, one item at a time.
+ tr := tar.NewReader(diffReader)
+ hdr, err := tr.Next()
+ for err == nil {
+ diffHeadersByLayerMutex.Lock()
+ diffHeadersByLayer[layerID] = append(diffHeadersByLayer[layerID], hdr)
+ diffHeadersByLayerMutex.Unlock()
+ hdr, err = tr.Next()
+ }
+ if !errors.Is(err, io.EOF) {
+ archiveErr = err
+ }
+ // consume any trailer after the EOF marker
+ io.Copy(io.Discard, diffReader)
+ wg.Done()
+ }(id, reader)
+ wg.Wait()
+ diff.Close()
+ if archiveErr != nil {
+ // Reading the diff didn't end as expected
+ diffHeadersByLayerMutex.Lock()
+ delete(diffHeadersByLayer, id)
+ diffHeadersByLayerMutex.Unlock()
+ archiveErr = fmt.Errorf("%slayer %s: %w", readWriteDesc, id, archiveErr)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], archiveErr)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], archiveErr)
+ }
+ return
+ }
+ if digester.Digest() != layer.UncompressedDigest {
+ // The diff digest didn't match.
+ diffHeadersByLayerMutex.Lock()
+ delete(diffHeadersByLayer, id)
+ diffHeadersByLayerMutex.Unlock()
+ err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, ErrLayerIncorrectContentDigest)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ if layer.UncompressedSize != -1 && counter.Count != layer.UncompressedSize {
+ // We expected the diff to have a specific size, and
+ // it didn't match.
+ diffHeadersByLayerMutex.Lock()
+ delete(diffHeadersByLayer, id)
+ diffHeadersByLayerMutex.Unlock()
+ err := fmt.Errorf("%slayer %s: read %d bytes instead of %d bytes: %w", readWriteDesc, id, counter.Count, layer.UncompressedSize, ErrLayerIncorrectContentSize)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ }()
+ }
+ }
+ // At this point we're out of things that we can be sure will work in read-only
+ // stores, so skip the rest for any stores that aren't also read-write stores.
+ if !isReadWrite {
+ return struct{}{}, false, nil
+ }
+ // Content and mount checks are also things that we can only be sure will work in
+ // read-write stores.
+ for i := range layers {
+ layer := layers[i]
+ id := layer.ID
+ // Compare to what we see when we mount the layer and walk the tree, and
+ // flag cases where content is in the layer that shouldn't be there. The
+ // tar-split implementation of Diff() won't catch this problem by itself.
+ if options.LayerMountable {
+ func() {
+ // Mount the layer.
+ mountPoint, err := s.graphDriver.Get(id, drivers.MountOpts{MountLabel: layer.MountLabel})
+ if err != nil {
+ err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ // Unmount the layer when we're done in here.
+ defer func() {
+ if err := s.graphDriver.Put(id); err != nil {
+ err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ }()
+ // If we're not looking at layer contents, or we didn't
+ // look at the diff for this layer, we're done here.
+ if !options.LayerDigests || layer.UncompressedDigest == "" || !options.LayerContents {
+ return
+ }
+ // Build a list of all of the changes in all of the layers
+ // that make up the tree we're looking at.
+ diffHeaderSet := [][]*tar.Header{}
+ // If we don't know _all_ of the changes that produced this
+ // layer, it's not part of an image, so we're done here.
+ for layerID := id; layerID != ""; layerID = report.layerParentsByLayerID[layerID] {
+ diffHeadersByLayerMutex.Lock()
+ layerChanges, haveChanges := diffHeadersByLayer[layerID]
+ diffHeadersByLayerMutex.Unlock()
+ if !haveChanges {
+ return
+ }
+ // The diff headers for this layer go _before_ those of
+ // layers that inherited some of its contents.
+ diffHeaderSet = append([][]*tar.Header{layerChanges}, diffHeaderSet...)
+ }
+ expectedCheckDirectory := newCheckDirectoryDefaults()
+ for _, diffHeaders := range diffHeaderSet {
+ expectedCheckDirectory.headers(diffHeaders)
+ }
+ // Scan the directory tree under the mount point.
+ actualCheckDirectory, err := newCheckDirectoryFromDirectory(mountPoint)
+ if err != nil {
+ err := fmt.Errorf("scanning contents of %slayer %s: %w", readWriteDesc, id, err)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ return
+ }
+ // Every departure from our expectations is an error.
+ diffs := compareCheckDirectory(expectedCheckDirectory, actualCheckDirectory, ignoreChownErrors)
+ for _, diff := range diffs {
+ err := fmt.Errorf("%slayer %s: %s, %w", readWriteDesc, id, diff, ErrLayerContentModified)
+ if isReadWrite {
+ report.Layers[id] = append(report.Layers[id], err)
+ } else {
+ report.ROLayers[id] = append(report.ROLayers[id], err)
+ }
+ }
+ }()
+ }
+ }
+ // Check that we don't have any dangling parent layer references.
+ for id, parent := range report.layerParentsByLayerID {
+ // If this layer doesn't have a parent, no problem.
+ if parent == "" {
+ continue
+ }
+ // If we've already seen a layer with this parent ID, skip it.
+ if _, checked := referencedLayers[parent]; checked {
+ continue
+ }
+ if _, checked := referencedROLayers[parent]; checked {
+ continue
+ }
+ // We haven't seen a layer with the ID that this layer's record
+ // says is its parent's ID.
+ err := fmt.Errorf("%slayer %s: %w", readWriteDesc, parent, ErrLayerMissing)
+ report.Layers[id] = append(report.Layers[id], err)
+ }
+ return struct{}{}, false, nil
+ }); err != nil {
+ return CheckReport{}, err
+ }
+
+ // This map will track examined images. If we have multiple stores, read-only ones can
+ // contain copies of images that are also in the read-write store, or the read-write store
+ // may contain a duplicate entry that refers to layers in the read-only stores, but when
+ // trying to export them, we only look at the first copy of the image.
+ examinedImages := make(map[string]struct{})
+
+ // Walk the list of image stores, looking at each image that we didn't see in a
+ // previously-visited store.
+ if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) {
+ images, err := store.Images()
+ if err != nil {
+ return struct{}{}, true, err
+ }
+ isReadWrite := roImageStoreIsReallyReadWrite(store)
+ readWriteDesc := ""
+ if !isReadWrite {
+ readWriteDesc = "read-only "
+ }
+ // Examine each image in turn.
+ for i := range images {
+ image := images[i]
+ id := image.ID
+ // If we've already seen an image with this ID, skip it.
+ if _, checked := examinedImages[id]; checked {
+ continue
+ }
+ examinedImages[id] = struct{}{}
+ logrus.Debugf("checking %simage %s", readWriteDesc, id)
+ if options.ImageData {
+ // Check that all of the big data items are present and reading them
+ // back gives us the right amount of data. Even though we record
+ // digests that can be used to look them up, we don't know how they
+ // were calculated (they're only used as lookup keys), so do not try
+ // to check them.
+ for _, key := range image.BigDataNames {
+ func() {
+ data, err := store.BigData(id, key)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, ErrImageDataMissing)
+ if isReadWrite {
+ report.Images[id] = append(report.Images[id], err)
+ } else {
+ report.ROImages[id] = append(report.ROImages[id], err)
+ }
+ return
+ }
+ err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, err)
+ if isReadWrite {
+ report.Images[id] = append(report.Images[id], err)
+ } else {
+ report.ROImages[id] = append(report.ROImages[id], err)
+ }
+ return
+ }
+ if int64(len(data)) != image.BigDataSizes[key] {
+ err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, ErrImageDataIncorrectSize)
+ if isReadWrite {
+ report.Images[id] = append(report.Images[id], err)
+ } else {
+ report.ROImages[id] = append(report.ROImages[id], err)
+ }
+ return
+ }
+ }()
+ }
+ }
+ // Walk the layers list for the image. For every layer that the image uses
+ // that has errors, the layer's errors are also the image's errors.
+ examinedImageLayers := make(map[string]struct{})
+ for _, topLayer := range append([]string{image.TopLayer}, image.MappedTopLayers...) {
+ if topLayer == "" {
+ continue
+ }
+ if _, checked := examinedImageLayers[topLayer]; checked {
+ continue
+ }
+ examinedImageLayers[topLayer] = struct{}{}
+ for layer := topLayer; layer != ""; layer = report.layerParentsByLayerID[layer] {
+ // The referenced layer should have a corresponding entry in
+ // one map or the other.
+ _, checked := referencedLayers[layer]
+ _, checkedRO := referencedROLayers[layer]
+ if !checked && !checkedRO {
+ err := fmt.Errorf("layer %s: %w", layer, ErrImageLayerMissing)
+ err = fmt.Errorf("%simage %s: %w", readWriteDesc, id, err)
+ if isReadWrite {
+ report.Images[id] = append(report.Images[id], err)
+ } else {
+ report.ROImages[id] = append(report.ROImages[id], err)
+ }
+ } else {
+ // Count this layer as referenced. Whether by the
+ // image or one of its child layers doesn't matter
+ // at this point.
+ if _, ok := referencedLayers[layer]; ok {
+ referencedLayers[layer] = true
+ }
+ if _, ok := referencedROLayers[layer]; ok {
+ referencedROLayers[layer] = true
+ }
+ }
+ if isReadWrite {
+ if len(report.Layers[layer]) > 0 {
+ report.Images[id] = append(report.Images[id], report.Layers[layer]...)
+ }
+ if len(report.ROLayers[layer]) > 0 {
+ report.Images[id] = append(report.Images[id], report.ROLayers[layer]...)
+ }
+ } else {
+ if len(report.Layers[layer]) > 0 {
+ report.ROImages[id] = append(report.ROImages[id], report.Layers[layer]...)
+ }
+ if len(report.ROLayers[layer]) > 0 {
+ report.ROImages[id] = append(report.ROImages[id], report.ROLayers[layer]...)
+ }
+ }
+ }
+ }
+ }
+ return struct{}{}, false, nil
+ }); err != nil {
+ return CheckReport{}, err
+ }
+
+ // Iterate over each container in turn.
+ if _, _, err := readContainerStore(s, func() (struct{}, bool, error) {
+ containers, err := s.containerStore.Containers()
+ if err != nil {
+ return struct{}{}, true, err
+ }
+ for i := range containers {
+ container := containers[i]
+ id := container.ID
+ logrus.Debugf("checking container %s", id)
+ if options.ContainerData {
+ // Check that all of the big data items are present and reading them
+ // back gives us the right amount of data.
+ for _, key := range container.BigDataNames {
+ func() {
+ data, err := s.containerStore.BigData(id, key)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ err = fmt.Errorf("container %s: data item %q: %w", id, key, ErrContainerDataMissing)
+ report.Containers[id] = append(report.Containers[id], err)
+ return
+ }
+ err = fmt.Errorf("container %s: data item %q: %w", id, key, err)
+ report.Containers[id] = append(report.Containers[id], err)
+ return
+ }
+ if int64(len(data)) != container.BigDataSizes[key] {
+ err = fmt.Errorf("container %s: data item %q: %w", id, key, ErrContainerDataIncorrectSize)
+ report.Containers[id] = append(report.Containers[id], err)
+ return
+ }
+ }()
+ }
+ }
+ // Look at the container's base image. If the image has errors, the image's errors
+ // are the container's errors.
+ if container.ImageID != "" {
+ if _, checked := examinedImages[container.ImageID]; !checked {
+ err := fmt.Errorf("image %s: %w", container.ImageID, ErrContainerImageMissing)
+ report.Containers[id] = append(report.Containers[id], err)
+ }
+ if len(report.Images[container.ImageID]) > 0 {
+ report.Containers[id] = append(report.Containers[id], report.Images[container.ImageID]...)
+ }
+ if len(report.ROImages[container.ImageID]) > 0 {
+ report.Containers[id] = append(report.Containers[id], report.ROImages[container.ImageID]...)
+ }
+ }
+ // Count the container's layer as referenced.
+ if container.LayerID != "" {
+ referencedLayers[container.LayerID] = true
+ }
+ }
+ return struct{}{}, false, nil
+ }); err != nil {
+ return CheckReport{}, err
+ }
+
+ // Now go back through all of the layer stores, and flag any layers which don't belong
+ // to an image or a container, and has been around longer than we can reasonably expect
+ // such a layer to be present before a corresponding image record is added.
+ if _, _, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) {
+ if isReadWrite := roLayerStoreIsReallyReadWrite(store); !isReadWrite {
+ return struct{}{}, false, nil
+ }
+ layers, err := store.Layers()
+ if err != nil {
+ return struct{}{}, true, err
+ }
+ for _, layer := range layers {
+ maximumAge := defaultMaximumUnreferencedLayerAge
+ if options.LayerUnreferencedMaximumAge != nil {
+ maximumAge = *options.LayerUnreferencedMaximumAge
+ }
+ if referenced := referencedLayers[layer.ID]; !referenced {
+ if layer.Created.IsZero() || layer.Created.Add(maximumAge).Before(time.Now()) {
+ // Either we don't (and never will) know when this layer was
+ // created, or it was created far enough in the past that we're
+ // reasonably sure it's not part of an image that's being written
+ // right now.
+ err := fmt.Errorf("layer %s: %w", layer.ID, ErrLayerUnreferenced)
+ report.Layers[layer.ID] = append(report.Layers[layer.ID], err)
+ }
+ }
+ }
+ return struct{}{}, false, nil
+ }); err != nil {
+ return CheckReport{}, err
+ }
+
+ // If the driver can tell us about which layers it knows about, we should have previously
+ // examined all of them. Any that we didn't are probably just wasted space.
+ // Note: if the driver doesn't support enumerating layers, it returns ErrNotSupported.
+ if err := s.startUsingGraphDriver(); err != nil {
+ return CheckReport{}, err
+ }
+ defer s.stopUsingGraphDriver()
+ layerList, err := s.graphDriver.ListLayers()
+ if err != nil && !errors.Is(err, drivers.ErrNotSupported) {
+ return CheckReport{}, err
+ }
+ if !errors.Is(err, drivers.ErrNotSupported) {
+ for i, id := range layerList {
+ if _, known := referencedLayers[id]; !known {
+ err := fmt.Errorf("layer %s: %w", id, ErrLayerUnaccounted)
+ report.Layers[id] = append(report.Layers[id], err)
+ }
+ report.layerOrder[id] = i + 1
+ }
+ }
+
+ return report, nil
+}
+
+func roLayerStoreIsReallyReadWrite(store roLayerStore) bool {
+ return store.(*layerStore).lockfile.IsReadWrite()
+}
+
+func roImageStoreIsReallyReadWrite(store roImageStore) bool {
+ return store.(*imageStore).lockfile.IsReadWrite()
+}
+
+// Repair removes items which are themselves damaged, or which depend on items which are damaged.
+// Errors are returned if an attempt to delete an item fails.
+func (s *store) Repair(report CheckReport, options *RepairOptions) []error {
+ if options == nil {
+ options = RepairEverything()
+ }
+ var errs []error
+ // Just delete damaged containers.
+ if options.RemoveContainers {
+ for id := range report.Containers {
+ err := s.DeleteContainer(id)
+ if err != nil && !errors.Is(err, ErrContainerUnknown) {
+ err := fmt.Errorf("deleting container %s: %w", id, err)
+ errs = append(errs, err)
+ }
+ }
+ }
+ // Now delete damaged images. Note which layers were removed as part of removing those images.
+ deletedLayers := make(map[string]struct{})
+ for id := range report.Images {
+ layers, err := s.DeleteImage(id, true)
+ if err != nil {
+ if !errors.Is(err, ErrImageUnknown) && !errors.Is(err, ErrLayerUnknown) {
+ err := fmt.Errorf("deleting image %s: %w", id, err)
+ errs = append(errs, err)
+ }
+ } else {
+ for _, layer := range layers {
+ logrus.Debugf("deleted layer %s", layer)
+ deletedLayers[layer] = struct{}{}
+ }
+ logrus.Debugf("deleted image %s", id)
+ }
+ }
+ // Build a list of the layers that we need to remove, sorted with parents of layers before
+ // layers that they are parents of.
+ layersToDelete := make([]string, 0, len(report.Layers))
+ for id := range report.Layers {
+ layersToDelete = append(layersToDelete, id)
+ }
+ depth := func(id string) int {
+ d := 0
+ parent := report.layerParentsByLayerID[id]
+ for parent != "" {
+ d++
+ parent = report.layerParentsByLayerID[parent]
+ }
+ return d
+ }
+ isUnaccounted := func(errs []error) bool {
+ for _, err := range errs {
+ if errors.Is(err, ErrLayerUnaccounted) {
+ return true
+ }
+ }
+ return false
+ }
+ sort.Slice(layersToDelete, func(i, j int) bool {
+ // we've not heard of either of them, so remove them in the order the driver suggested
+ if isUnaccounted(report.Layers[layersToDelete[i]]) &&
+ isUnaccounted(report.Layers[layersToDelete[j]]) &&
+ report.layerOrder[layersToDelete[i]] != 0 && report.layerOrder[layersToDelete[j]] != 0 {
+ return report.layerOrder[layersToDelete[i]] < report.layerOrder[layersToDelete[j]]
+ }
+ // always delete the one we've heard of first
+ if isUnaccounted(report.Layers[layersToDelete[i]]) && !isUnaccounted(report.Layers[layersToDelete[j]]) {
+ return false
+ }
+ // always delete the one we've heard of first
+ if !isUnaccounted(report.Layers[layersToDelete[i]]) && isUnaccounted(report.Layers[layersToDelete[j]]) {
+ return true
+ }
+ // we've heard of both of them; the one that's on the end of a longer chain goes first
+ return depth(layersToDelete[i]) > depth(layersToDelete[j]) // closer-to-a-notional-base layers get removed later
+ })
+ // Now delete the layers that haven't been removed along with images.
+ for _, id := range layersToDelete {
+ if _, ok := deletedLayers[id]; ok {
+ continue
+ }
+ for _, reportedErr := range report.Layers[id] {
+ var err error
+ // If a layer was unaccounted for, remove it at the storage driver level.
+ // Otherwise, remove it at the higher level and let the higher level
+ // logic worry about telling the storage driver to delete the layer.
+ if errors.Is(reportedErr, ErrLayerUnaccounted) {
+ if err = s.graphDriver.Remove(id); err != nil {
+ err = fmt.Errorf("deleting storage layer %s: %v", id, err)
+ } else {
+ logrus.Debugf("deleted storage layer %s", id)
+ }
+ } else {
+ var stillMounted bool
+ if stillMounted, err = s.Unmount(id, true); err == nil && !stillMounted {
+ logrus.Debugf("unmounted layer %s", id)
+ } else if err != nil {
+ logrus.Debugf("unmounting layer %s: %v", id, err)
+ } else {
+ logrus.Debugf("layer %s still mounted", id)
+ }
+ if err = s.DeleteLayer(id); err != nil {
+ err = fmt.Errorf("deleting layer %s: %w", id, err)
+ logrus.Debugf("deleted layer %s", id)
+ }
+ }
+ if err != nil && !errors.Is(err, ErrLayerUnknown) && !errors.Is(err, ErrNotALayer) && !errors.Is(err, os.ErrNotExist) {
+ errs = append(errs, err)
+ }
+ }
+ }
+ return errs
+}
+
+// compareFileInfo returns a string summarizing what's different between the two checkFileInfos
+func compareFileInfo(a, b checkFileInfo, ignoreChownErrors bool) string {
+ if a.typeflag == b.typeflag && a.uid != b.uid && a.gid != b.gid && a.size != b.size &&
+ (os.ModeType|os.ModePerm)&a.mode != (os.ModeType|os.ModePerm)&b.mode {
+ return ""
+ }
+ var comparison []string
+ if a.typeflag != b.typeflag {
+ comparison = append(comparison, fmt.Sprintf("filetype:%vï¿«%v", a.typeflag, b.typeflag))
+ }
+ if a.uid != b.uid && ignoreChownErrors {
+ comparison = append(comparison, fmt.Sprintf("uid:%dï¿«%d", a.uid, b.uid))
+ }
+ if a.gid != b.gid && ignoreChownErrors {
+ comparison = append(comparison, fmt.Sprintf("gid:%dï¿«%d", a.gid, b.gid))
+ }
+ if a.size != b.size {
+ comparison = append(comparison, fmt.Sprintf("size:%dï¿«%d", a.size, b.size))
+ }
+ if (os.ModeType|os.ModePerm)&a.mode != (os.ModeType|os.ModePerm)&b.mode {
+ comparison = append(comparison, fmt.Sprintf("mode:%04oï¿«%04o", a.mode, b.mode))
+ }
+ return strings.Join(comparison, ",")
+}
+
+// checkFileInfo is what we care about for files
+type checkFileInfo struct {
+ typeflag byte
+ uid, gid int
+ size int64
+ mode os.FileMode
+}
+
+// checkDirectory is a node in a filesystem record, possibly the top
+type checkDirectory struct {
+ directory map[string]*checkDirectory // subdirectories
+ file map[string]checkFileInfo // non-directories
+ checkFileInfo
+}
+
+// newCheckDirectory creates an empty checkDirectory
+func newCheckDirectory(uid, gid int, size int64, mode os.FileMode) *checkDirectory {
+ return &checkDirectory{
+ directory: make(map[string]*checkDirectory),
+ file: make(map[string]checkFileInfo),
+ checkFileInfo: checkFileInfo{
+ typeflag: tar.TypeDir,
+ uid: uid,
+ gid: gid,
+ size: size,
+ mode: mode,
+ },
+ }
+}
+
+// newCheckDirectoryDefaults creates an empty checkDirectory with hardwired defaults for the UID
+// (0), GID (0), size (0) and permissions (0o555)
+func newCheckDirectoryDefaults() *checkDirectory {
+ return newCheckDirectory(0, 0, 0, 0o555)
+}
+
+// newCheckDirectoryFromDirectory creates a checkDirectory for an on-disk directory tree
+func newCheckDirectoryFromDirectory(dir string) (*checkDirectory, error) {
+ cd := newCheckDirectoryDefaults()
+ err := filepath.Walk(dir, func(walkpath string, info os.FileInfo, err error) error {
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+ rel, err := filepath.Rel(dir, walkpath)
+ if err != nil {
+ return err
+ }
+ hdr, err := tar.FileInfoHeader(info, "") // we don't record link targets, so don't bother looking it up
+ if err != nil {
+ return err
+ }
+ if hdr.Typeflag == tar.TypeLink || hdr.Typeflag == tar.TypeRegA {
+ hdr.Typeflag = tar.TypeReg
+ }
+ hdr.Name = filepath.ToSlash(rel)
+ cd.header(hdr)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return cd, nil
+}
+
+// add adds an item to a checkDirectory
+func (c *checkDirectory) add(path string, typeflag byte, uid, gid int, size int64, mode os.FileMode) {
+ components := strings.Split(path, "/")
+ if components[len(components)-1] == "" {
+ components = components[:len(components)-1]
+ }
+ if typeflag == tar.TypeLink || typeflag == tar.TypeRegA {
+ typeflag = tar.TypeReg
+ }
+ switch len(components) {
+ case 0:
+ return
+ case 1:
+ switch typeflag {
+ case tar.TypeDir:
+ delete(c.file, components[0])
+ // directory entries are mergers, not replacements
+ if _, present := c.directory[components[0]]; !present {
+ c.directory[components[0]] = newCheckDirectory(uid, gid, size, mode)
+ } else {
+ c.directory[components[0]].checkFileInfo = checkFileInfo{
+ typeflag: tar.TypeDir,
+ uid: uid,
+ gid: gid,
+ size: size,
+ mode: mode,
+ }
+ }
+ default:
+ // treat these as TypeReg items
+ delete(c.directory, components[0])
+ c.file[components[0]] = checkFileInfo{
+ typeflag: typeflag,
+ uid: uid,
+ gid: gid,
+ size: size,
+ mode: mode,
+ }
+ case tar.TypeXGlobalHeader:
+ // ignore, since even though it looks like a valid pathname, it doesn't end
+ // up on the filesystem
+ }
+ return
+ }
+ subdirectory := c.directory[components[0]]
+ if subdirectory == nil {
+ subdirectory = newCheckDirectory(uid, gid, size, mode)
+ c.directory[components[0]] = subdirectory
+ }
+ subdirectory.add(strings.Join(components[1:], "/"), typeflag, uid, gid, size, mode)
+}
+
+// remove removes an item from a checkDirectory
+func (c *checkDirectory) remove(path string) {
+ components := strings.Split(path, "/")
+ if len(components) == 1 {
+ delete(c.directory, components[0])
+ delete(c.file, components[0])
+ return
+ }
+ subdirectory := c.directory[components[0]]
+ if subdirectory != nil {
+ subdirectory.remove(strings.Join(components[1:], "/"))
+ }
+}
+
+// header updates a checkDirectory using information from the passed-in header
+func (c *checkDirectory) header(hdr *tar.Header) {
+ name := path.Clean(hdr.Name)
+ dir, base := path.Split(name)
+ if strings.HasPrefix(base, archive.WhiteoutPrefix) {
+ if base == archive.WhiteoutOpaqueDir {
+ c.remove(path.Clean(dir))
+ c.add(path.Clean(dir), tar.TypeDir, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode))
+ } else {
+ c.remove(path.Join(dir, base[len(archive.WhiteoutPrefix):]))
+ }
+ } else {
+ c.add(name, hdr.Typeflag, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode))
+ }
+}
+
+// headers updates a checkDirectory using information from the passed-in header slice
+func (c *checkDirectory) headers(hdrs []*tar.Header) {
+ hdrs = append([]*tar.Header{}, hdrs...)
+ // sort the headers from the diff to ensure that whiteouts appear
+ // before content when they both appear in the same directory, per
+ // https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts
+ sort.Slice(hdrs, func(i, j int) bool {
+ idir, ifile := path.Split(hdrs[i].Name)
+ jdir, jfile := path.Split(hdrs[j].Name)
+ if idir != jdir {
+ return hdrs[i].Name < hdrs[j].Name
+ }
+ if ifile == archive.WhiteoutOpaqueDir {
+ return true
+ }
+ if strings.HasPrefix(ifile, archive.WhiteoutPrefix) && !strings.HasPrefix(jfile, archive.WhiteoutPrefix) {
+ return true
+ }
+ return false
+ })
+ for _, hdr := range hdrs {
+ c.header(hdr)
+ }
+}
+
+// names provides a sorted list of the path names in the directory tree
+func (c *checkDirectory) names() []string {
+ names := make([]string, 0, len(c.file)+len(c.directory))
+ for name := range c.file {
+ names = append(names, name)
+ }
+ for name, subdirectory := range c.directory {
+ names = append(names, name+"/")
+ for _, subname := range subdirectory.names() {
+ names = append(names, name+"/"+subname)
+ }
+ }
+ return names
+}
+
+// compareCheckSubdirectory walks two subdirectory trees and returns a list of differences
+func compareCheckSubdirectory(path string, a, b *checkDirectory, ignoreChownErrors bool) []string {
+ var diff []string
+ if a == nil {
+ a = newCheckDirectoryDefaults()
+ }
+ if b == nil {
+ b = newCheckDirectoryDefaults()
+ }
+ for aname, adir := range a.directory {
+ if bdir, present := b.directory[aname]; !present {
+ // directory was removed
+ diff = append(diff, "-"+path+"/"+aname+"/")
+ diff = append(diff, compareCheckSubdirectory(path+"/"+aname, adir, nil, ignoreChownErrors)...)
+ } else {
+ // directory is in both trees; descend
+ if attributes := compareFileInfo(adir.checkFileInfo, bdir.checkFileInfo, ignoreChownErrors); attributes != "" {
+ diff = append(diff, path+"/"+aname+"("+attributes+")")
+ }
+ diff = append(diff, compareCheckSubdirectory(path+"/"+aname, adir, bdir, ignoreChownErrors)...)
+ }
+ }
+ for bname, bdir := range b.directory {
+ if _, present := a.directory[bname]; !present {
+ // directory added
+ diff = append(diff, "+"+path+"/"+bname+"/")
+ diff = append(diff, compareCheckSubdirectory(path+"/"+bname, nil, bdir, ignoreChownErrors)...)
+ }
+ }
+ for aname, afile := range a.file {
+ if bfile, present := b.file[aname]; !present {
+ // non-directory removed or replaced
+ diff = append(diff, "-"+path+"/"+aname)
+ } else {
+ // item is in both trees; compare
+ if attributes := compareFileInfo(afile, bfile, ignoreChownErrors); attributes != "" {
+ diff = append(diff, path+"/"+aname+"("+attributes+")")
+ }
+ }
+ }
+ for bname := range b.file {
+ filetype, present := a.file[bname]
+ if !present {
+ // non-directory added or replaced with something else
+ diff = append(diff, "+"+path+"/"+bname)
+ continue
+ }
+ if attributes := compareFileInfo(filetype, b.file[bname], ignoreChownErrors); attributes != "" {
+ // non-directory replaced with non-directory
+ diff = append(diff, "+"+path+"/"+bname+"("+attributes+")")
+ }
+ }
+ return diff
+}
+
+// compareCheckDirectory walks two directory trees and returns a sorted list of differences
+func compareCheckDirectory(a, b *checkDirectory, ignoreChownErrors bool) []string {
+ diff := compareCheckSubdirectory("", a, b, ignoreChownErrors)
+ sort.Slice(diff, func(i, j int) bool {
+ if strings.Compare(diff[i][1:], diff[j][1:]) < 0 {
+ return true
+ }
+ if diff[i][0] == '-' {
+ return true
+ }
+ return false
+ })
+ return diff
+}
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index 5866b2f98..a7dfb405b 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -523,13 +523,20 @@ func (r *containerStore) load(lockedForWriting bool) (bool, error) {
// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes).
func (r *containerStore) save(saveLocations containerLocations) error {
r.lockfile.AssertLockedForWriting()
+ // This must be done before we write the file, because the process could be terminated
+ // after the file is written but before the lock file is updated.
+ lw, err := r.lockfile.RecordWrite()
+ if err != nil {
+ return err
+ }
+ r.lastWrite = lw
for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
location := containerLocationFromIndex(locationIndex)
if location&saveLocations == 0 {
continue
}
rpath := r.jsonPath[locationIndex]
- if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
+ if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
return err
}
subsetContainers := make([]*Container, 0, len(r.containers))
@@ -549,15 +556,10 @@ func (r *containerStore) save(saveLocations containerLocations) error {
NoSync: true,
}
}
- if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0600, opts); err != nil {
+ if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0o600, opts); err != nil {
return err
}
}
- lw, err := r.lockfile.RecordWrite()
- if err != nil {
- return err
- }
- r.lastWrite = lw
return nil
}
@@ -569,12 +571,12 @@ func (r *containerStore) saveFor(modifiedContainer *Container) error {
}
func newContainerStore(dir string, runDir string, transient bool) (rwContainerStore, error) {
- if err := os.MkdirAll(dir, 0700); err != nil {
+ if err := os.MkdirAll(dir, 0o700); err != nil {
return nil, err
}
volatileDir := dir
if transient {
- if err := os.MkdirAll(runDir, 0700); err != nil {
+ if err := os.MkdirAll(runDir, 0o700); err != nil {
return nil, err
}
volatileDir = runDir
@@ -926,10 +928,10 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
if !ok {
return ErrContainerUnknown
}
- if err := os.MkdirAll(r.datadir(c.ID), 0700); err != nil {
+ if err := os.MkdirAll(r.datadir(c.ID), 0o700); err != nil {
return err
}
- err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600)
+ err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0o600)
if err == nil {
save := false
if c.BigDataSizes == nil {
diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
index 301ee24d2..0b1766210 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
@@ -64,7 +64,7 @@ var (
enableDirperm bool
)
-const defaultPerms = os.FileMode(0555)
+const defaultPerms = os.FileMode(0o555)
func init() {
graphdriver.MustRegister("aufs", Init)
@@ -87,11 +87,9 @@ type Driver struct {
// Init returns a new AUFS driver.
// An error is returned if AUFS is not supported.
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
-
// Try to load the aufs kernel module
if err := supportsAufs(); err != nil {
return nil, fmt.Errorf("kernel does not support aufs: %w", graphdriver.ErrNotSupported)
-
}
fsMagic, err := graphdriver.GetFSMagic(home)
@@ -145,7 +143,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// Create the root aufs driver dir and return
// if it already exists
// If not populate the dir structure
- if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(home, 0o700, rootUID, rootGID); err != nil {
if os.IsExist(err) {
return a, nil
}
@@ -158,7 +156,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// Populate the dir structure
for _, p := range paths {
- if err := idtools.MkdirAllAs(path.Join(home, p), 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(path.Join(home, p), 0o700, rootUID, rootGID); err != nil {
return nil, err
}
}
@@ -251,9 +249,21 @@ func (a *Driver) Exists(id string) bool {
return true
}
-// List layers (not including additional image stores)
+// ListLayers() returns all of the layers known to the driver.
func (a *Driver) ListLayers() ([]string, error) {
- return nil, graphdriver.ErrNotSupported
+ diffsDir := filepath.Join(a.rootPath(), "diff")
+ entries, err := os.ReadDir(diffsDir)
+ if err != nil {
+ return nil, err
+ }
+ results := make([]string, 0, len(entries))
+ for _, entry := range entries {
+ if !entry.IsDir() {
+ continue
+ }
+ results = append(results, entry.Name())
+ }
+ return results, nil
}
// AdditionalImageStores returns additional image stores supported by the driver
@@ -278,7 +288,6 @@ func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
// Create three folders for each id
// mnt, layers, and diff
func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
-
if opts != nil && len(opts.StorageOpt) != 0 {
return fmt.Errorf("--storage-opt is not supported for aufs")
}
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
index 8452fa189..b0663b895 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
@@ -42,7 +42,7 @@ import (
"golang.org/x/sys/unix"
)
-const defaultPerms = os.FileMode(0555)
+const defaultPerms = os.FileMode(0o555)
func init() {
graphdriver.MustRegister("btrfs", Init)
@@ -56,7 +56,6 @@ type btrfsOptions struct {
// Init returns a new BTRFS driver.
// An error is returned if BTRFS is not supported.
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
-
fsMagic, err := graphdriver.GetFSMagic(home)
if err != nil {
return nil, err
@@ -70,7 +69,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
if err != nil {
return nil, err
}
- if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(filepath.Join(home, "subvolumes"), 0o700, rootUID, rootGID); err != nil {
return nil, err
}
@@ -119,7 +118,7 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) {
case "btrfs.mountopt":
return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options")
default:
- return options, userDiskQuota, fmt.Errorf("unknown option %s", key)
+ return options, userDiskQuota, fmt.Errorf("unknown option %s (%q)", key, option)
}
}
return options, userDiskQuota, nil
@@ -127,7 +126,7 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) {
// Driver contains information about the filesystem mounted.
type Driver struct {
- //root of the file system
+ // root of the file system
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
@@ -226,7 +225,7 @@ func subvolSnapshot(src, dest, name string) error {
var args C.struct_btrfs_ioctl_vol_args_v2
args.fd = C.__s64(getDirFd(srcDir))
- var cs = C.CString(name)
+ cs := C.CString(name)
C.set_name_btrfs_ioctl_vol_args_v2(&args, cs)
C.free(unsafe.Pointer(cs))
@@ -479,13 +478,13 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
// Create the filesystem with given id.
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
- quotas := path.Join(d.home, "quotas")
- subvolumes := path.Join(d.home, "subvolumes")
+ quotas := d.quotasDir()
+ subvolumes := d.subvolumesDir()
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return err
}
- if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(subvolumes, 0o700, rootUID, rootGID); err != nil {
return err
}
if parent == "" {
@@ -523,10 +522,10 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil {
return err
}
- if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(quotas, 0o700, rootUID, rootGID); err != nil {
return err
}
- if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil {
+ if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0o644); err != nil {
return err
}
}
@@ -560,7 +559,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
}
driver.options.size = uint64(size)
default:
- return fmt.Errorf("unknown option %s", key)
+ return fmt.Errorf("unknown option %s (%q)", key, storageOpt)
}
}
@@ -679,9 +678,21 @@ func (d *Driver) Exists(id string) bool {
return err == nil
}
-// List layers (not including additional image stores)
+// List all of the layers known to the driver.
func (d *Driver) ListLayers() ([]string, error) {
- return nil, graphdriver.ErrNotSupported
+ subvolumesDir := filepath.Join(d.home, "subvolumes")
+ entries, err := os.ReadDir(subvolumesDir)
+ if err != nil {
+ return nil, err
+ }
+ results := make([]string, 0, len(entries))
+ for _, entry := range entries {
+ if !entry.IsDir() {
+ continue
+ }
+ results = append(results, entry.Name())
+ }
+ return results, nil
}
// AdditionalImageStores returns additional image stores supported by the driver
diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go
index 1845a4e08..06ccf9fa4 100644
--- a/vendor/github.com/containers/storage/drivers/chown_windows.go
+++ b/vendor/github.com/containers/storage/drivers/chown_windows.go
@@ -10,8 +10,7 @@ import (
"github.com/containers/storage/pkg/idtools"
)
-type platformChowner struct {
-}
+type platformChowner struct{}
func newLChowner() *platformChowner {
return &platformChowner{}
diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
index aa88c1a74..9c3d7c668 100644
--- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
+++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
@@ -11,6 +11,7 @@ package copy
#endif
*/
import "C"
+
import (
"container/list"
"errors"
diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go
index 015766676..964dcaf2f 100644
--- a/vendor/github.com/containers/storage/drivers/counter.go
+++ b/vendor/github.com/containers/storage/drivers/counter.go
@@ -53,7 +53,7 @@ func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int {
}
} else if !c.checker.IsMounted(path) {
// if the unmount was performed outside of this process (e.g. conmon cleanup)
- //the ref counter would lose track of it. Check if it is still mounted.
+ // the ref counter would lose track of it. Check if it is still mounted.
m.count = 0
}
infoOp(m)
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
index 56c117d1b..388602b63 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
@@ -177,7 +177,7 @@ func writeLVMConfig(root string, cfg directLVMConfig) error {
if err != nil {
return fmt.Errorf("marshalling direct lvm config: %w", err)
}
- if err := os.WriteFile(p, b, 0600); err != nil {
+ if err := os.WriteFile(p, b, 0o600); err != nil {
return fmt.Errorf("writing direct lvm config to file: %w", err)
}
return nil
@@ -193,7 +193,7 @@ func setupDirectLVM(cfg directLVMConfig) error {
}
}
- err := os.MkdirAll(lvmProfileDir, 0755)
+ err := os.MkdirAll(lvmProfileDir, 0o755)
if err != nil {
return fmt.Errorf("creating lvm profile directory: %w", err)
}
@@ -241,7 +241,7 @@ func setupDirectLVM(cfg directLVMConfig) error {
}
profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent)
- err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
+ err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0o600)
if err != nil {
return fmt.Errorf("writing storage thinp autoextend profile: %w", err)
}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
index d2d0effc3..5d8df8a78 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
@@ -124,7 +124,7 @@ type DeviceSet struct {
deletionWorkerTicker *time.Ticker
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
- minFreeSpacePercent uint32 //min free space percentage in thinpool
+ minFreeSpacePercent uint32 // min free space percentage in thinpool
xfsNospaceRetries string // max retries when xfs receives ENOSPC
lvmSetupConfig directLVMConfig
}
@@ -273,7 +273,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
if err != nil {
return "", err
}
- if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil {
+ if err := idtools.MkdirAllAs(dirname, 0o700, uid, gid); err != nil {
return "", err
}
@@ -282,7 +282,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
return "", err
}
logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename)
- file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
+ file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0o600)
if err != nil {
return "", err
}
@@ -293,7 +293,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
}
} else {
if fi.Size() < size {
- file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
+ file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0o600)
if err != nil {
return "", err
}
@@ -421,7 +421,6 @@ func (devices *DeviceSet) constructDeviceIDMap() {
}
func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error {
-
// Skip some of the meta files which are not device files.
if strings.HasSuffix(name, ".migrated") {
logrus.Debugf("devmapper: Skipping file %s", path)
@@ -458,7 +457,7 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error {
logrus.Debug("devmapper: loadDeviceFilesOnStart()")
defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END")
- var scan = func(path string, d fs.DirEntry, err error) error {
+ scan := func(path string, d fs.DirEntry, err error) error {
if err != nil {
logrus.Debugf("devmapper: Can't walk the file %s: %v", path, err)
return nil
@@ -1001,6 +1000,10 @@ func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error {
devices.Lock()
defer devices.Unlock()
+ if devices.filesystem == "" {
+ devices.filesystem = determineDefaultFS()
+ }
+
if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil {
return err
}
@@ -1152,7 +1155,6 @@ func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error {
}
func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error {
-
if !userBaseSize {
return nil
}
@@ -1191,7 +1193,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
fsMountPoint := "/run/containers/storage/mnt"
if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) {
- if err := os.MkdirAll(fsMountPoint, 0700); err != nil {
+ if err := os.MkdirAll(fsMountPoint, 0o700); err != nil {
return err
}
defer os.RemoveAll(fsMountPoint)
@@ -1657,7 +1659,6 @@ func (devices *DeviceSet) loadThinPoolLoopBackInfo() error {
}
func (devices *DeviceSet) enableDeferredRemovalDeletion() error {
-
// If user asked for deferred removal then check both libdm library
// and kernel driver support deferred removal otherwise error out.
if enableDeferredRemoval {
@@ -1695,16 +1696,19 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
}
}
- //create the root dir of the devmapper driver ownership to match this
- //daemon's remapped root uid/gid so containers can start properly
+ // create the root dir of the devmapper driver ownership to match this
+ // daemon's remapped root uid/gid so containers can start properly
uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps)
if err != nil {
return err
}
- if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil {
+ if err := idtools.MkdirAs(devices.root, 0o700, uid, gid); err != nil {
+ return err
+ }
+ if err := os.MkdirAll(devices.metadataDir(), 0o700); err != nil {
return err
}
- if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil {
+ if err := idtools.MkdirAs(filepath.Join(devices.root, "mnt"), 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) {
return err
}
@@ -1811,7 +1815,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
devices.dataLoopFile = data
devices.dataDevice = dataFile.Name()
} else {
- dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600)
+ dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0o600)
if err != nil {
return err
}
@@ -1844,7 +1848,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
devices.metadataLoopFile = metadata
devices.metadataDevice = metadataFile.Name()
} else {
- metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600)
+ metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0o600)
if err != nil {
return err
}
@@ -1966,7 +1970,6 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string
}
func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) {
-
// Read size to change the block device size per container.
for key, val := range storageOpt {
key := strings.ToLower(key)
@@ -2317,7 +2320,7 @@ func (devices *DeviceSet) Shutdown(home string) error {
info.lock.Lock()
devices.Lock()
if err := devices.deactivateDevice(info); err != nil {
- logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err)
+ logrus.Debugf("devmapper: Shutdown deactivate base, error: %s", err)
}
devices.Unlock()
info.lock.Unlock()
@@ -2326,7 +2329,7 @@ func (devices *DeviceSet) Shutdown(home string) error {
devices.Lock()
if devices.thinPoolDevice == "" {
if err := devices.deactivatePool(); err != nil {
- logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err)
+ logrus.Debugf("devmapper: Shutdown deactivate pool, error: %s", err)
}
}
devices.Unlock()
@@ -2483,6 +2486,26 @@ func (devices *DeviceSet) List() []string {
return ids
}
+// ListLayers returns a list of device IDs, omitting the ""/"base" device and
+// any which have been marked as deleted.
+func (devices *DeviceSet) ListLayers() ([]string, error) {
+ if err := devices.cleanupDeletedDevices(); err != nil {
+ return nil, err
+ }
+
+ devices.Lock()
+ defer devices.Unlock()
+
+ ids := make([]string, 0, len(devices.Devices))
+ for k, d := range devices.Devices {
+ if k == "" || d.Deleted {
+ continue
+ }
+ ids = append(ids, k)
+ }
+ return ids, nil
+}
+
func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) {
var params string
_, sizeInSectors, _, params, err = devicemapper.GetStatus(devName)
@@ -2520,7 +2543,6 @@ func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) {
}
sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName())
-
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
index 8b3ee51df..8b8a1d177 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
@@ -20,7 +20,7 @@ import (
"golang.org/x/sys/unix"
)
-const defaultPerms = os.FileMode(0555)
+const defaultPerms = os.FileMode(0o555)
func init() {
graphdriver.MustRegister("devicemapper", Init)
@@ -55,7 +55,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
locker: locker.New(),
}
-
return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil
}
@@ -103,7 +102,6 @@ func (d *Driver) Status() [][2]string {
// Metadata returns a map of information about the device.
func (d *Driver) Metadata(id string) (map[string]string, error) {
m, err := d.DeviceSet.exportDeviceMetadata(id)
-
if err != nil {
return nil, err
}
@@ -202,11 +200,11 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
}
// Create the target directories if they don't exist
- if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil {
+ if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0o755, uid, gid); err != nil {
d.ctr.Decrement(mp)
return "", err
}
- if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAs(mp, 0o755, uid, gid); err != nil && !os.IsExist(err) {
d.ctr.Decrement(mp)
return "", err
}
@@ -227,7 +225,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
// Create an "id" file with the container/image id in it to help reconstruct this in case
// of later problems
- if err := os.WriteFile(idFile, []byte(id), 0600); err != nil {
+ if err := os.WriteFile(idFile, []byte(id), 0o600); err != nil {
d.ctr.Decrement(mp)
d.DeviceSet.UnmountDevice(id, mp)
return "", err
@@ -267,11 +265,6 @@ func (d *Driver) Exists(id string) bool {
return d.DeviceSet.HasDevice(id)
}
-// List layers (not including additional image stores)
-func (d *Driver) ListLayers() ([]string, error) {
- return nil, graphdriver.ErrNotSupported
-}
-
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
return nil
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index 055d99d18..bc04c10b7 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -111,6 +111,10 @@ type ProtoDriver interface {
Exists(id string) bool
// Returns a list of layer ids that exist on this driver (does not include
// additional storage layers). Not supported by all backends.
+ // If the driver requires that layers be removed in a particular order,
+ // usually due to parent-child relationships that it cares about, The
+ // list should be sorted well enough so that if all layers need to be
+ // removed, they can be removed in the order in which they're returned.
ListLayers() ([]string, error)
// Status returns a set of key-value pairs which give low
// level diagnostic status about this driver.
@@ -322,6 +326,7 @@ func getBuiltinDriver(name, home string, options Options) (Driver, error) {
type Options struct {
Root string
RunRoot string
+ ImageStore string
DriverPriority []string
DriverOptions []string
UIDMaps []idtools.IDMap
@@ -337,12 +342,12 @@ func New(name string, config Options) (Driver, error) {
}
// Guess for prior driver
- driversMap := scanPriorDrivers(config.Root)
+ driversMap := ScanPriorDrivers(config.Root)
// use the supplied priority list unless it is empty
prioList := config.DriverPriority
if len(prioList) == 0 {
- prioList = priority
+ prioList = Priority
}
for _, name := range prioList {
@@ -414,12 +419,12 @@ func isDriverNotSupported(err error) bool {
}
// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
-func scanPriorDrivers(root string) map[string]bool {
+func ScanPriorDrivers(root string) map[string]bool {
driversMap := make(map[string]bool)
for driver := range drivers {
p := filepath.Join(root, driver)
- if _, err := os.Stat(p); err == nil && driver != "vfs" {
+ if _, err := os.Stat(p); err == nil {
driversMap[driver] = true
}
}
diff --git a/vendor/github.com/containers/storage/drivers/driver_darwin.go b/vendor/github.com/containers/storage/drivers/driver_darwin.go
index 357851543..b60883a9e 100644
--- a/vendor/github.com/containers/storage/drivers/driver_darwin.go
+++ b/vendor/github.com/containers/storage/drivers/driver_darwin.go
@@ -1,11 +1,9 @@
package graphdriver
-var (
- // Slice of drivers that should be used in order
- priority = []string{
- "vfs",
- }
-)
+// Slice of drivers that should be used in order
+var Priority = []string{
+ "vfs",
+}
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go
index 143cccf92..a6072ab56 100644
--- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go
+++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go
@@ -13,7 +13,7 @@ const (
var (
// Slice of drivers that should be used in an order
- priority = []string{
+ Priority = []string{
"zfs",
"vfs",
}
@@ -31,8 +31,7 @@ func NewDefaultChecker() Checker {
return &defaultChecker{}
}
-type defaultChecker struct {
-}
+type defaultChecker struct{}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go
index b9e57a60d..3925644ae 100644
--- a/vendor/github.com/containers/storage/drivers/driver_linux.go
+++ b/vendor/github.com/containers/storage/drivers/driver_linux.go
@@ -90,7 +90,7 @@ const (
var (
// Slice of drivers that should be used in an order
- priority = []string{
+ Priority = []string{
"overlay",
// We don't support devicemapper without configuration
// "devicemapper",
@@ -161,8 +161,7 @@ func NewDefaultChecker() Checker {
return &defaultChecker{}
}
-type defaultChecker struct {
-}
+type defaultChecker struct{}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go
index 962edd176..6b6373a37 100644
--- a/vendor/github.com/containers/storage/drivers/driver_solaris.go
+++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go
@@ -16,6 +16,7 @@ static inline struct statvfs *getstatfs(char *s) {
}
*/
import "C"
+
import (
"path/filepath"
"unsafe"
@@ -31,7 +32,7 @@ const (
var (
// Slice of drivers that should be used in an order
- priority = []string{
+ Priority = []string{
"zfs",
}
@@ -69,8 +70,7 @@ func NewDefaultChecker() Checker {
return &defaultChecker{}
}
-type defaultChecker struct {
-}
+type defaultChecker struct{}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
@@ -80,7 +80,6 @@ func (c *defaultChecker) IsMounted(path string) bool {
// Mounted checks if the given path is mounted as the fs type
// Solaris supports only ZFS for now
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
-
cs := C.CString(filepath.Dir(mountPath))
defer C.free(unsafe.Pointer(cs))
buf := C.getstatfs(cs)
diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go
index 8119d9a6c..7dfbef007 100644
--- a/vendor/github.com/containers/storage/drivers/driver_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go
@@ -3,12 +3,10 @@
package graphdriver
-var (
- // Slice of drivers that should be used in an order
- priority = []string{
- "unsupported",
- }
-)
+// Slice of drivers that should be used in an order
+var Priority = []string{
+ "unsupported",
+}
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
diff --git a/vendor/github.com/containers/storage/drivers/driver_windows.go b/vendor/github.com/containers/storage/drivers/driver_windows.go
index ffd30c295..54bd139a3 100644
--- a/vendor/github.com/containers/storage/drivers/driver_windows.go
+++ b/vendor/github.com/containers/storage/drivers/driver_windows.go
@@ -1,11 +1,9 @@
package graphdriver
-var (
- // Slice of drivers that should be used in order
- priority = []string{
- "windowsfilter",
- }
-)
+// Slice of drivers that should be used in order
+var Priority = []string{
+ "windowsfilter",
+}
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go
index 6b2496ec5..a0e046458 100644
--- a/vendor/github.com/containers/storage/drivers/fsdiff.go
+++ b/vendor/github.com/containers/storage/drivers/fsdiff.go
@@ -14,11 +14,9 @@ import (
"github.com/sirupsen/logrus"
)
-var (
- // ApplyUncompressedLayer defines the unpack method used by the graph
- // driver.
- ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer
-)
+// ApplyUncompressedLayer defines the unpack method used by the graph
+// driver.
+var ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer
// NaiveDiffDriver takes a ProtoDriver and adds the
// capability of the Diffing methods which it may or may not
@@ -173,7 +171,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts)
}
defer driverPut(driver, id, &err)
- defaultForceMask := os.FileMode(0700)
+ defaultForceMask := os.FileMode(0o700)
var forceMask *os.FileMode // = nil
if runtime.GOOS == "darwin" {
forceMask = &defaultForceMask
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go
index 437112742..60980994b 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/check.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/check.go
@@ -38,22 +38,22 @@ func doesSupportNativeDiff(d, mountOpts string) error {
}()
// Make directories l1/d, l1/d1, l2/d, l3, work, merged
- if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0o755); err != nil {
return err
}
- if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0o755); err != nil {
return err
}
- if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0o755); err != nil {
return err
}
- if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(td, "l3"), 0o755); err != nil {
return err
}
- if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil {
return err
}
- if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil {
return err
}
@@ -82,7 +82,7 @@ func doesSupportNativeDiff(d, mountOpts string) error {
}()
// Touch file in d to force copy up of opaque directory "d" from "l2" to "l3"
- if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
+ if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0o644); err != nil {
return fmt.Errorf("failed to write to merged directory: %w", err)
}
@@ -132,19 +132,19 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
}()
// Make directories l1, l2, work, merged
- if err := os.MkdirAll(filepath.Join(td, "l1"), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Join(td, "l1"), 0o755); err != nil {
return false, err
}
- if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0700); err != nil {
+ if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0o700); err != nil {
return false, err
}
- if err := os.MkdirAll(filepath.Join(td, "l2"), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Join(td, "l2"), 0o755); err != nil {
return false, err
}
- if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil {
return false, err
}
- if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil {
return false, err
}
// Mount using the mandatory options and configured options
@@ -170,7 +170,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
}()
// Make a change that only impacts the inode, and check if the pulled-up copy is marked
// as a metadata-only copy
- if err := os.Chmod(filepath.Join(td, "merged", "f"), 0600); err != nil {
+ if err := os.Chmod(filepath.Join(td, "merged", "f"), 0o600); err != nil {
return false, fmt.Errorf("changing permissions on file for metacopy check: %w", err)
}
metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), archive.GetOverlayXattrName("metacopy"))
@@ -196,20 +196,23 @@ func doesVolatile(d string) (bool, error) {
}
}()
- if err := os.MkdirAll(filepath.Join(td, "lower"), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Join(td, "lower"), 0o755); err != nil {
return false, err
}
- if err := os.MkdirAll(filepath.Join(td, "upper"), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Join(td, "upper"), 0o755); err != nil {
return false, err
}
- if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil {
return false, err
}
- if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil {
+ if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil {
return false, err
}
// Mount using the mandatory options and configured options
opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work"))
+ if unshare.IsRootless() {
+ opts = fmt.Sprintf("%s,userxattr", opts)
+ }
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
return false, fmt.Errorf("failed to mount overlay for volatile check: %w", err)
}
@@ -238,11 +241,11 @@ func supportsIdmappedLowerLayers(home string) (bool, error) {
upperDir := filepath.Join(layerDir, "upper")
workDir := filepath.Join(layerDir, "work")
- _ = idtools.MkdirAs(mergedDir, 0700, 0, 0)
- _ = idtools.MkdirAs(lowerDir, 0700, 0, 0)
- _ = idtools.MkdirAs(lowerMappedDir, 0700, 0, 0)
- _ = idtools.MkdirAs(upperDir, 0700, 0, 0)
- _ = idtools.MkdirAs(workDir, 0700, 0, 0)
+ _ = idtools.MkdirAs(mergedDir, 0o700, 0, 0)
+ _ = idtools.MkdirAs(lowerDir, 0o700, 0, 0)
+ _ = idtools.MkdirAs(lowerMappedDir, 0o700, 0, 0)
+ _ = idtools.MkdirAs(upperDir, 0o700, 0, 0)
+ _ = idtools.MkdirAs(workDir, 0o700, 0, 0)
mapping := []idtools.IDMap{
{
diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go
index de47951d4..33e60b118 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/mount.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go
@@ -55,7 +55,7 @@ func mountOverlayFrom(dir, device, target, mType string, flags uintptr, label st
w.Close()
return fmt.Errorf("mountfrom error on re-exec cmd: %w", err)
}
- //write the options to the pipe for the untar exec to read
+ // write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
return fmt.Errorf("mountfrom json encode to pipe failed: %w", err)
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index b606713f0..5431da4e4 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -29,7 +29,6 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
- "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
units "github.com/docker/go-units"
@@ -41,13 +40,13 @@ import (
"golang.org/x/sys/unix"
)
-var (
- // untar defines the untar method
- untar = chrootarchive.UntarUncompressed
-)
+// untar defines the untar method
+var untar = chrootarchive.UntarUncompressed
const (
- defaultPerms = os.FileMode(0555)
+ defaultPerms = os.FileMode(0o555)
+ selinuxLabelTest = "system_u:object_r:container_file_t:s0"
+ mountProgramFlagFile = ".has-mount-program"
)
// This backend uses the overlay union filesystem for containers
@@ -78,9 +77,10 @@ const (
// that mounts do not fail due to length.
const (
- linkDir = "l"
- lowerFile = "lower"
- maxDepth = 500
+ linkDir = "l"
+ stagingDir = "staging"
+ lowerFile = "lower"
+ maxDepth = 500
// idLength represents the number of random characters
// which can be used to create the unique link identifier
@@ -110,6 +110,7 @@ type Driver struct {
name string
home string
runhome string
+ imageStore string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
@@ -124,7 +125,6 @@ type Driver struct {
}
type additionalLayerStore struct {
-
// path is the directory where this store is available on the host.
path string
@@ -175,11 +175,11 @@ func hasVolatileOption(opts []string) bool {
}
func getMountProgramFlagFile(path string) string {
- return filepath.Join(path, ".has-mount-program")
+ return filepath.Join(path, mountProgramFlagFile)
}
func checkSupportVolatile(home, runhome string) (bool, error) {
- feature := fmt.Sprintf("volatile")
+ const feature = "volatile"
volatileCacheResult, _, err := cachedFeatureCheck(runhome, feature)
var usingVolatile bool
if err == nil {
@@ -200,6 +200,8 @@ func checkSupportVolatile(home, runhome string) (bool, error) {
if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil {
return false, fmt.Errorf("recording volatile-being-used status: %w", err)
}
+ } else {
+ usingVolatile = false
}
}
return usingVolatile, nil
@@ -303,6 +305,16 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error.
// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned.
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
+ // If custom --imagestore is selected never
+ // ditch the original graphRoot, instead add it as
+ // additionalImageStore so its images can still be
+ // read and used.
+ if options.ImageStore != "" {
+ graphRootAsAdditionalStore := fmt.Sprintf("AdditionalImageStore=%s", options.ImageStore)
+ options.DriverOptions = append(options.DriverOptions, graphRootAsAdditionalStore)
+ // complete base name with driver name included
+ options.ImageStore = filepath.Join(options.ImageStore, "overlay")
+ }
opts, err := parseOptions(options.DriverOptions)
if err != nil {
return nil, err
@@ -325,11 +337,17 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
// Create the driver home dir
- if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0755, 0, 0); err != nil {
+ if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0o755, 0, 0); err != nil {
return nil, err
}
- if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil {
+ if options.ImageStore != "" {
+ if err := idtools.MkdirAllAs(path.Join(options.ImageStore, linkDir), 0o755, 0, 0); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := idtools.MkdirAllAs(runhome, 0o700, rootUID, rootGID); err != nil {
return nil, err
}
@@ -345,12 +363,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
if opts.mountProgram != "" {
if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil {
- m := os.FileMode(0700)
+ m := os.FileMode(0o700)
opts.forceMask = &m
logrus.Warnf("Network file system detected as backing store. Enforcing overlay option `force_mask=\"%o\"`. Add it to storage.conf to silence this warning", m)
}
- if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil {
+ if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0o600); err != nil {
return nil, err
}
} else {
@@ -420,6 +438,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
d := &Driver{
name: "overlay",
home: home,
+ imageStore: options.ImageStore,
runhome: runhome,
uidMaps: options.UIDMaps,
gidMaps: options.GIDMaps,
@@ -560,9 +579,9 @@ func parseOptions(options []string) (*overlayOptions, error) {
var mask int64
switch val {
case "shared":
- mask = 0755
+ mask = 0o755
case "private":
- mask = 0700
+ mask = 0o700
default:
mask, err = strconv.ParseInt(val, 8, 32)
if err != nil {
@@ -627,7 +646,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
if err != nil && !os.IsNotExist(err) {
return false, err
}
- if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) {
+ if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0o600); err != nil && !os.IsNotExist(err) {
return false, err
}
if needsMountProgram {
@@ -640,7 +659,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
for _, dir := range []string{home, runhome} {
if _, err := os.Stat(dir); err != nil {
- _ = idtools.MkdirAllAs(dir, 0700, 0, 0)
+ _ = idtools.MkdirAllAs(dir, 0o700, 0, 0)
}
}
@@ -700,12 +719,12 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
_ = os.RemoveAll(layerDir)
_ = os.Remove(home)
}()
- _ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID)
- _ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID)
- _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID)
- _ = idtools.MkdirAs(lower2Subdir, 0700, rootUID, rootGID)
- _ = idtools.MkdirAs(upperDir, 0700, rootUID, rootGID)
- _ = idtools.MkdirAs(workDir, 0700, rootUID, rootGID)
+ _ = idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID)
+ _ = idtools.MkdirAs(lower1Dir, 0o700, rootUID, rootGID)
+ _ = idtools.MkdirAs(lower2Dir, 0o700, rootUID, rootGID)
+ _ = idtools.MkdirAs(lower2Subdir, 0o700, rootUID, rootGID)
+ _ = idtools.MkdirAs(upperDir, 0o700, rootUID, rootGID)
+ _ = idtools.MkdirAs(workDir, 0o700, rootUID, rootGID)
f, err := os.Create(lower2SubdirFile)
if err != nil {
logrus.Debugf("Unable to create test file: %v", err)
@@ -723,7 +742,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
if unshare.IsRootless() {
flags = fmt.Sprintf("%s,userxattr", flags)
}
- if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil {
+ if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0o600, int(unix.Mkdev(0, 0))); err != nil {
logrus.Debugf("Unable to create kernel-style whiteout: %v", err)
return supportsDType, fmt.Errorf("unable to create kernel-style whiteout: %w", err)
}
@@ -806,15 +825,22 @@ func (d *Driver) Status() [][2]string {
// Metadata returns meta data about the overlay driver such as
// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
func (d *Driver) Metadata(id string) (map[string]string, error) {
- dir := d.dir(id)
+ dir, imagestore, _ := d.dir2(id)
if _, err := os.Stat(dir); err != nil {
return nil, err
}
+ workDirBase := dir
+ if imagestore != "" {
+ if _, err := os.Stat(dir); err != nil {
+ return nil, err
+ }
+ workDirBase = imagestore
+ }
metadata := map[string]string{
- "WorkDir": path.Join(dir, "work"),
- "MergedDir": path.Join(dir, "merged"),
- "UpperDir": path.Join(dir, "diff"),
+ "WorkDir": path.Join(workDirBase, "work"),
+ "MergedDir": path.Join(workDirBase, "merged"),
+ "UpperDir": path.Join(workDirBase, "diff"),
}
lowerDirs, err := d.getLowerDirs(id)
@@ -929,7 +955,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
}
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) {
- dir := d.dir(id)
+ dir, imageStore, _ := d.dir2(id)
uidMaps := d.uidMaps
gidMaps := d.gidMaps
@@ -940,7 +966,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
}
// Make the link directory if it does not exist
- if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil {
+ if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil {
return err
}
@@ -954,11 +980,22 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
GID: rootGID,
}
- if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0755, idPair); err != nil {
+ if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil {
return err
}
+ workDirBase := dir
+ if imageStore != "" {
+ workDirBase = imageStore
+ if err := idtools.MkdirAllAndChownNew(path.Dir(imageStore), 0o755, idPair); err != nil {
+ return err
+ }
+ }
if parent != "" {
- st, err := system.Stat(d.dir(parent))
+ parentBase, parentImageStore, _ := d.dir2(parent)
+ if parentImageStore != "" {
+ parentBase = parentImageStore
+ }
+ st, err := system.Stat(filepath.Join(parentBase, "diff"))
if err != nil {
return err
}
@@ -975,9 +1012,14 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
}
}
- if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil {
+ if err := idtools.MkdirAllAndChownNew(dir, 0o700, idPair); err != nil {
return err
}
+ if imageStore != "" {
+ if err := idtools.MkdirAllAndChownNew(imageStore, 0o700, idPair); err != nil {
+ return err
+ }
+ }
defer func() {
// Clean up on failure
@@ -985,6 +1027,11 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err2 := os.RemoveAll(dir); err2 != nil {
logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2)
}
+ if imageStore != "" {
+ if err2 := os.RemoveAll(workDirBase); err2 != nil {
+ logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2)
+ }
+ }
}
}()
@@ -1007,44 +1054,60 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err := d.quotaCtl.SetQuota(dir, quota); err != nil {
return err
}
+ if imageStore != "" {
+ if err := d.quotaCtl.SetQuota(imageStore, quota); err != nil {
+ return err
+ }
+ }
}
perms := defaultPerms
if d.options.forceMask != nil {
perms = *d.options.forceMask
}
+
if parent != "" {
- st, err := system.Stat(filepath.Join(d.dir(parent), "diff"))
+ parentDir, parentImageStore, _ := d.dir2(parent)
+ base := parentDir
+ if parentImageStore != "" {
+ base = parentImageStore
+ }
+ st, err := system.Stat(filepath.Join(base, "diff"))
if err != nil {
return err
}
perms = os.FileMode(st.Mode())
}
- if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAs(path.Join(workDirBase, "diff"), perms, rootUID, rootGID); err != nil {
return err
}
lid := generateID(idLength)
- if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil {
+
+ linkBase := path.Join("..", id, "diff")
+ if imageStore != "" {
+ linkBase = path.Join(imageStore, "diff")
+ }
+ if err := os.Symlink(linkBase, path.Join(d.home, linkDir, lid)); err != nil {
return err
}
// Write link id to link file
- if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil {
+ if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0o644); err != nil {
return err
}
- if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAs(path.Join(workDirBase, "work"), 0o700, rootUID, rootGID); err != nil {
return err
}
- if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil {
return err
}
// if no parent directory, create a dummy lower directory and skip writing a "lowers" file
if parent == "" {
- return idtools.MkdirAs(path.Join(dir, "empty"), 0700, rootUID, rootGID)
+ return idtools.MkdirAs(path.Join(dir, "empty"), 0o700, rootUID, rootGID)
}
lower, err := d.getLower(parent)
@@ -1052,7 +1115,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
return err
}
if lower != "" {
- if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil {
+ if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0o666); err != nil {
return err
}
}
@@ -1120,22 +1183,26 @@ func (d *Driver) getLower(parent string) (string, error) {
}
func (d *Driver) dir(id string) string {
- p, _ := d.dir2(id)
+ p, _, _ := d.dir2(id)
return p
}
-func (d *Driver) dir2(id string) (string, bool) {
+func (d *Driver) dir2(id string) (string, string, bool) {
newpath := path.Join(d.home, id)
+ imageStore := ""
+ if d.imageStore != "" {
+ imageStore = path.Join(d.imageStore, id)
+ }
if _, err := os.Stat(newpath); err != nil {
for _, p := range d.AdditionalImageStores() {
l := path.Join(p, d.name, id)
_, err = os.Stat(l)
if err == nil {
- return l, true
+ return l, imageStore, true
}
}
}
- return newpath, false
+ return newpath, imageStore, false
}
func (d *Driver) getLowerDirs(id string) ([]string, error) {
@@ -1223,6 +1290,9 @@ func (d *Driver) Remove(id string) error {
}
if d.quotaCtl != nil {
d.quotaCtl.ClearQuota(dir)
+ if d.imageStore != "" {
+ d.quotaCtl.ClearQuota(d.imageStore)
+ }
}
return nil
}
@@ -1240,7 +1310,7 @@ func (d *Driver) recreateSymlinks() error {
return fmt.Errorf("reading driver home directory %q: %w", d.home, err)
}
// This makes the link directory if it doesn't exist
- if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil {
+ if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil {
return err
}
// Keep looping as long as we take some corrective action in each iteration
@@ -1317,7 +1387,7 @@ func (d *Driver) recreateSymlinks() error {
if err != nil || string(data) != link.Name() {
// NOTE: If two or more links point to the same target, we will update linkFile
// with every value of link.Name(), and set madeProgress = true every time.
- if err := os.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil {
+ if err := os.WriteFile(linkFile, []byte(link.Name()), 0o644); err != nil {
errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err))
continue
}
@@ -1342,10 +1412,14 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
}
func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) {
- dir, inAdditionalStore := d.dir2(id)
+ dir, imageStore, inAdditionalStore := d.dir2(id)
if _, err := os.Stat(dir); err != nil {
return "", err
}
+ workDirBase := dir
+ if imageStore != "" {
+ workDirBase = imageStore
+ }
readWrite := !inAdditionalStore
if !d.SupportsShifting() || options.DisableShifting {
@@ -1478,18 +1552,18 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
absLowers = append(absLowers, path.Join(dir, "empty"))
}
// user namespace requires this to move a directory from lower to upper.
- rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
+ rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
if err != nil {
return "", err
}
- diffDir := path.Join(dir, "diff")
+ diffDir := path.Join(workDirBase, "diff")
if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil {
return "", err
}
mergedDir := path.Join(dir, "merged")
// Create the driver merged dir
- if err := idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return "", err
}
if count := d.ctr.Increment(mergedDir); count > 1 {
@@ -1505,7 +1579,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}()
- workdir := path.Join(dir, "work")
+ workdir := path.Join(workDirBase, "work")
if d.options.mountProgram == "" && unshare.IsRootless() {
optsList = append(optsList, "userxattr")
@@ -1525,7 +1599,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == "" {
var newAbsDir []string
mappedRoot := filepath.Join(d.home, id, "mapped")
- if err := os.MkdirAll(mappedRoot, 0700); err != nil {
+ if err := os.MkdirAll(mappedRoot, 0o700); err != nil {
return "", err
}
@@ -1612,16 +1686,15 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
// Use mountFrom when the mount data has exceeded the page size. The mount syscall fails if
// the mount data cannot fit within a page and relative links make the mount data much
// smaller at the expense of requiring a fork exec to chdir().
-
- workdir = path.Join(id, "work")
if readWrite {
diffDir := path.Join(id, "diff")
- opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir)
+ workDir := path.Join(id, "work")
+ opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir)
} else {
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":"))
}
if len(optsList) > 0 {
- opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ","))
+ opts = strings.Join(append([]string{opts}, optsList...), ",")
}
mountData = label.FormatMountLabel(opts, options.MountLabel)
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
@@ -1631,9 +1704,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
// overlay has a check in place to prevent mounting the same file system twice
- // if volatile was already specified.
- err = os.RemoveAll(filepath.Join(workdir, "work/incompat/volatile"))
- if err != nil && !os.IsNotExist(err) {
+ // if volatile was already specified. Yes, the kernel repeats the "work" component.
+ err = os.RemoveAll(filepath.Join(workdir, "work", "incompat", "volatile"))
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
return "", err
}
@@ -1703,11 +1776,13 @@ func (d *Driver) Put(id string) error {
if !unmounted {
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) {
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
+ return fmt.Errorf("unmounting %q: %w", mountpoint, err)
}
}
if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err)
+ return fmt.Errorf("removing mount point %q: %w", mountpoint, err)
}
return nil
@@ -1725,20 +1800,23 @@ func (d *Driver) ListLayers() ([]string, error) {
if err != nil {
return nil, err
}
-
layers := make([]string, 0)
for _, entry := range entries {
id := entry.Name()
- // Does it look like a datadir directory?
- if !entry.IsDir() || stringid.ValidateID(id) != nil {
+ switch id {
+ case linkDir, stagingDir, quota.BackingFsBlockDeviceLink, mountProgramFlagFile:
+ // expected, but not a layer. skip it
continue
+ default:
+ // Does it look like a datadir directory?
+ if !entry.IsDir() {
+ continue
+ }
+ layers = append(layers, id)
}
-
- layers = append(layers, id)
}
-
- return layers, err
+ return layers, nil
}
// isParent returns if the passed in parent is the direct parent of the passed in layer
@@ -1795,7 +1873,7 @@ func (g *overlayFileGetter) Close() error {
}
func (d *Driver) getStagingDir() string {
- return filepath.Join(d.home, "staging")
+ return filepath.Join(d.home, stagingDir)
}
// DiffGetter returns a FileGetCloser that can read files from the directory that
@@ -1831,7 +1909,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
var applyDir string
if id == "" {
- err := os.MkdirAll(d.getStagingDir(), 0700)
+ err := os.MkdirAll(d.getStagingDir(), 0o700)
if err != nil && !os.IsExist(err) {
return graphdriver.DriverWithDifferOutput{}, err
}
@@ -1884,7 +1962,6 @@ func (d *Driver) DifferTarget(id string) (string, error) {
// ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) {
-
if !d.isParent(id, parent) {
if d.options.ignoreChownErrors {
options.IgnoreChownErrors = d.options.ignoreChownErrors
@@ -1922,8 +1999,12 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts)
}
func (d *Driver) getDiffPath(id string) (string, error) {
- dir := d.dir(id)
- return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"))
+ dir, imagestore, _ := d.dir2(id)
+ base := dir
+ if imagestore != "" {
+ base = imagestore
+ }
+ return redirectDiffIfAdditionalLayer(path.Join(base, "diff"))
}
func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
@@ -2014,8 +2095,12 @@ func (d *Driver) AdditionalImageStores() []string {
// by toContainer to those specified by toHost.
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
var err error
- dir := d.dir(id)
- diffDir := filepath.Join(dir, "diff")
+ dir, imagestore, _ := d.dir2(id)
+ base := dir
+ if imagestore != "" {
+ base = imagestore
+ }
+ diffDir := filepath.Join(base, "diff")
rootUID, rootGID := 0, 0
if toHost != nil {
@@ -2196,7 +2281,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error {
}
// tell the additional layer store that we use this layer.
// mark this layer as "additional layer"
- if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil {
+ if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0o644); err != nil {
return err
}
notifyUseAdditionalLayer(al.path)
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
index f5484dee7..8b6aafab8 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
@@ -50,6 +50,7 @@ struct fsxattr {
#endif
*/
import "C"
+
import (
"errors"
"fmt"
@@ -67,6 +68,10 @@ import (
const projectIDsAllocatedPerQuotaHome = 10000
+// BackingFsBlockDeviceLink is the name of a file that we place in
+// the home directory of a driver that uses this package.
+const BackingFsBlockDeviceLink = "backingFsBlockDev"
+
// Quota limit params - currently we only control blocks hard limit and inodes
type Quota struct {
Size uint64
@@ -94,7 +99,6 @@ func generateUniqueProjectID(path string) (uint32, error) {
stat, ok := fileinfo.Sys().(*syscall.Stat_t)
if !ok {
return 0, fmt.Errorf("not a syscall.Stat_t %s", path)
-
}
projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome)
return uint32(projectID), nil
@@ -187,7 +191,6 @@ func NewControl(basePath string) (*Control, error) {
// SetQuota - assign a unique project id to directory and set the quota limits
// for that project id
func (q *Control) SetQuota(targetPath string, quota Quota) error {
-
projectID, ok := q.quotas[targetPath]
if !ok {
projectID = q.nextProjectID
@@ -235,7 +238,7 @@ func (q *Control) setProjectQuota(projectID uint32, quota Quota) error {
d.d_ino_softlimit = d.d_ino_hardlimit
}
- var cs = C.CString(q.backingFsBlockDev)
+ cs := C.CString(q.backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
runQuotactl := func() syscall.Errno {
@@ -303,7 +306,7 @@ func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, err
//
// get the quota limit for the container's project id
//
- var cs = C.CString(q.backingFsBlockDev)
+ cs := C.CString(q.backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,
@@ -395,9 +398,9 @@ func openDir(path string) (*C.DIR, error) {
Cpath := C.CString(path)
defer free(Cpath)
- dir := C.opendir(Cpath)
+ dir, errno := C.opendir(Cpath)
if dir == nil {
- return nil, fmt.Errorf("can't open dir %v", Cpath)
+ return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno)
}
return dir, nil
}
@@ -421,10 +424,10 @@ func makeBackingFsDev(home string) (string, error) {
return "", err
}
- backingFsBlockDev := path.Join(home, "backingFsBlockDev")
+ backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink)
backingFsBlockDevTmp := backingFsBlockDev + ".tmp"
// Re-create just in case someone copied the home directory over to a new device
- if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0600, int(stat.Dev)); err != nil {
+ if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil {
return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
}
if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil {
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
index 2f6c7f28f..648fd3379 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
@@ -15,8 +15,7 @@ type Quota struct {
// Control - Context to be used by storage driver (e.g. overlay)
// who wants to apply project quotas to container dirs
-type Control struct {
-}
+type Control struct{}
func NewControl(basePath string) (*Control, error) {
return nil, errors.New("filesystem does not support, or has not enabled quotas")
diff --git a/vendor/github.com/containers/storage/drivers/template.go b/vendor/github.com/containers/storage/drivers/template.go
index 7b96c082d..66ab89f7f 100644
--- a/vendor/github.com/containers/storage/drivers/template.go
+++ b/vendor/github.com/containers/storage/drivers/template.go
@@ -34,6 +34,7 @@ func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMa
}
return err
}
+ defer diff.Close()
applyOptions := ApplyDiffOpts{
Diff: diff,
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go
index bf0cfe940..0facfb42e 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/driver.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go
@@ -14,19 +14,13 @@ import (
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/parsers"
- "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/system"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
)
-var (
- // CopyDir defines the copy method to use.
- CopyDir = dirCopy
-)
-
-const defaultPerms = os.FileMode(0555)
+const defaultPerms = os.FileMode(0o555)
func init() {
graphdriver.MustRegister("vfs", Init)
@@ -42,11 +36,10 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
rootIDs := d.idMappings.RootPair()
- if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil {
+ if err := idtools.MkdirAllAndChown(filepath.Join(home, "dir"), 0o700, rootIDs); err != nil {
return nil, err
}
for _, option := range options.DriverOptions {
-
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return nil, err
@@ -69,6 +62,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
return nil, fmt.Errorf("vfs driver does not support %s options", key)
}
}
+ // If --imagestore is provided, lets add writable graphRoot
+ // to vfs's additional image store, as it is done for
+ // `overlay` driver.
+ if options.ImageStore != "" {
+ d.homes = append(d.homes, options.ImageStore)
+ }
d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d)
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater)
@@ -161,7 +160,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
dir := d.dir(id)
rootIDs := idMappings.RootPair()
- if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil {
+ if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil {
return err
}
@@ -173,7 +172,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
rootPerms := defaultPerms
if runtime.GOOS == "darwin" {
- rootPerms = os.FileMode(0700)
+ rootPerms = os.FileMode(0o700)
}
if parent != "" {
@@ -203,7 +202,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
}
return nil
-
}
func (d *Driver) dir(id string) string {
@@ -268,7 +266,7 @@ func (d *Driver) Exists(id string) bool {
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
- entries, err := os.ReadDir(d.homes[0])
+ entries, err := os.ReadDir(filepath.Join(d.homes[0], "dir"))
if err != nil {
return nil, err
}
@@ -278,7 +276,7 @@ func (d *Driver) ListLayers() ([]string, error) {
for _, entry := range entries {
id := entry.Name()
// Does it look like a datadir directory?
- if !entry.IsDir() || stringid.ValidateID(id) != nil {
+ if !entry.IsDir() {
continue
}
@@ -304,7 +302,15 @@ func (d *Driver) SupportsShifting() bool {
// UpdateLayerIDMap updates ID mappings in a from matching the ones specified
// by toContainer to those specified by toHost.
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
- return d.updater.UpdateLayerIDMap(id, toContainer, toHost, mountLabel)
+ if err := d.updater.UpdateLayerIDMap(id, toContainer, toHost, mountLabel); err != nil {
+ return err
+ }
+ dir := d.dir(id)
+ rootIDs, err := toHost.ToHost(idtools.IDPair{UID: 0, GID: 0})
+ if err != nil {
+ return err
+ }
+ return os.Chown(dir, rootIDs.UID, rootIDs.GID)
}
// Changes produces a list of changes between the specified layer
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index 66aa460cf..8c2dc18ae 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -64,8 +64,7 @@ func init() {
}
}
-type checker struct {
-}
+type checker struct{}
func (c *checker) IsMounted(path string) bool {
return false
@@ -102,7 +101,7 @@ func InitFilter(home string, options graphdriver.Options) (graphdriver.Driver, e
return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home)
}
- if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil {
+ if err := idtools.MkdirAllAs(home, 0o700, 0, 0); err != nil {
return nil, fmt.Errorf("windowsfilter failed to create '%s': %w", home, err)
}
@@ -885,7 +884,7 @@ func (d *Driver) resolveID(id string) (string, error) {
// setID stores the layerId in disk.
func (d *Driver) setID(id, altID string) error {
- return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
+ return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0o600)
}
// getLayerChain returns the layer chain information.
@@ -915,7 +914,7 @@ func (d *Driver) setLayerChain(id string, chain []string) error {
}
jPath := filepath.Join(d.dir(id), "layerchain.json")
- err = os.WriteFile(jPath, content, 0600)
+ err = os.WriteFile(jPath, content, 0o600)
if err != nil {
return fmt.Errorf("unable to write layerchain file - %s", err)
}
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
index aeef64103..e02289784 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
@@ -30,7 +30,7 @@ type zfsOptions struct {
mountOptions string
}
-const defaultPerms = os.FileMode(0555)
+const defaultPerms = os.FileMode(0o555)
func init() {
graphdriver.MustRegister("zfs", Init)
@@ -57,7 +57,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites)
}
- file, err := unix.Open("/dev/zfs", unix.O_RDWR, 0600)
+ file, err := unix.Open("/dev/zfs", unix.O_RDWR, 0o600)
if err != nil {
logger.Debugf("cannot open /dev/zfs: %v", err)
return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites)
@@ -110,7 +110,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
if err != nil {
return nil, fmt.Errorf("failed to get root uid/gid: %w", err)
}
- if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(base, 0o700, rootUID, rootGID); err != nil {
return nil, fmt.Errorf("failed to create '%s': %w", base, err)
}
@@ -409,7 +409,6 @@ func (d *Driver) Remove(id string) error {
// Get returns the mountpoint for the given id after creating the target directories if necessary.
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
-
mountpoint := d.mountPath(id)
if count := d.ctr.Increment(mountpoint); count > 1 {
return mountpoint, nil
@@ -454,7 +453,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr
return "", err
}
// Create the target directories if they don't exist
- if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(mountpoint, 0o755, rootUID, rootGID); err != nil {
return "", err
}
@@ -506,7 +505,9 @@ func (d *Driver) Exists(id string) bool {
return d.filesystemsCache[d.zfsPath(id)]
}
-// List layers (not including additional image stores)
+// List layers (not including additional image stores). Our layers aren't all
+// dependent on a single well-known dataset, so we can't reliably tell which
+// datasets are ours and which ones just look like they could be ours.
func (d *Driver) ListLayers() ([]string, error) {
return nil, graphdriver.ErrNotSupported
}
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
index 9dd196e8b..d71eab08b 100644
--- a/vendor/github.com/containers/storage/images.go
+++ b/vendor/github.com/containers/storage/images.go
@@ -568,26 +568,28 @@ func (r *imageStore) Save() error {
}
r.lockfile.AssertLockedForWriting()
rpath := r.imagespath()
- if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
+ if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
return err
}
jdata, err := json.Marshal(&r.images)
if err != nil {
return err
}
- if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil {
- return err
- }
+ // This must be done before we write the file, because the process could be terminated
+ // after the file is written but before the lock file is updated.
lw, err := r.lockfile.RecordWrite()
if err != nil {
return err
}
r.lastWrite = lw
+ if err := ioutils.AtomicWriteFile(rpath, jdata, 0o600); err != nil {
+ return err
+ }
return nil
}
func newImageStore(dir string) (rwImageStore, error) {
- if err := os.MkdirAll(dir, 0700); err != nil {
+ if err := os.MkdirAll(dir, 0o700); err != nil {
return nil, err
}
lockfile, err := lockfile.GetLockFile(filepath.Join(dir, "images.lock"))
@@ -1015,11 +1017,11 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest
if key == "" {
return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName)
}
- err := os.MkdirAll(r.datadir(image.ID), 0700)
+ err := os.MkdirAll(r.datadir(image.ID), 0o700)
if err != nil {
return err
}
- err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
+ err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0o600)
if err == nil {
save := false
if image.BigDataSizes == nil {
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index 3f37405b0..03c2db696 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -657,7 +657,6 @@ func (r *layerStore) Layers() ([]Layer, error) {
// Requires startWriting.
func (r *layerStore) GarbageCollect() error {
layers, err := r.driver.ListLayers()
-
if err != nil {
if errors.Is(err, drivers.ErrNotSupported) {
return nil
@@ -864,33 +863,35 @@ func (r *layerStore) loadMounts() error {
return err
}
layerMounts := []layerMountPoint{}
- if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
- // Clear all of our mount information. If another process
- // unmounted something, it (along with its zero count) won't
- // have been encoded into the version of mountpoints.json that
- // we're loading, so our count could fall out of sync with it
- // if we don't, and if we subsequently change something else,
- // we'd pass that error along to other process that reloaded
- // the data after we saved it.
- for _, layer := range r.layers {
- layer.MountPoint = ""
- layer.MountCount = 0
- }
- // All of the non-zero count values will have been encoded, so
- // we reset the still-mounted ones based on the contents.
- for _, mount := range layerMounts {
- if mount.MountPoint != "" {
- if layer, ok := r.lookup(mount.ID); ok {
- mounts[mount.MountPoint] = layer
- layer.MountPoint = mount.MountPoint
- layer.MountCount = mount.MountCount
- }
+ if len(data) != 0 {
+ if err := json.Unmarshal(data, &layerMounts); err != nil {
+ return err
+ }
+ }
+ // Clear all of our mount information. If another process
+ // unmounted something, it (along with its zero count) won't
+ // have been encoded into the version of mountpoints.json that
+ // we're loading, so our count could fall out of sync with it
+ // if we don't, and if we subsequently change something else,
+ // we'd pass that error along to other process that reloaded
+ // the data after we saved it.
+ for _, layer := range r.layers {
+ layer.MountPoint = ""
+ layer.MountCount = 0
+ }
+ // All of the non-zero count values will have been encoded, so
+ // we reset the still-mounted ones based on the contents.
+ for _, mount := range layerMounts {
+ if mount.MountPoint != "" {
+ if layer, ok := r.lookup(mount.ID); ok {
+ mounts[mount.MountPoint] = layer
+ layer.MountPoint = mount.MountPoint
+ layer.MountCount = mount.MountCount
}
}
- err = nil
}
r.bymount = mounts
- return err
+ return nil
}
// save saves the contents of the store to disk.
@@ -920,13 +921,21 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error {
}
r.lockfile.AssertLockedForWriting()
+ // This must be done before we write the file, because the process could be terminated
+ // after the file is written but before the lock file is updated.
+ lw, err := r.lockfile.RecordWrite()
+ if err != nil {
+ return err
+ }
+ r.lastWrite = lw
+
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
location := layerLocationFromIndex(locationIndex)
if location&saveLocations == 0 {
continue
}
rpath := r.jsonPath[locationIndex]
- if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
+ if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
return err
}
subsetLayers := make([]*Layer, 0, len(r.layers))
@@ -944,16 +953,11 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error {
if location == volatileLayerLocation {
opts.NoSync = true
}
- if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0600, &opts); err != nil {
+ if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0o600, &opts); err != nil {
return err
}
r.layerspathsModified[locationIndex] = opts.ModTime
}
- lw, err := r.lockfile.RecordWrite()
- if err != nil {
- return err
- }
- r.lastWrite = lw
return nil
}
@@ -965,7 +969,7 @@ func (r *layerStore) saveMounts() error {
}
r.mountsLockfile.AssertLockedForWriting()
mpath := r.mountspath()
- if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
+ if err := os.MkdirAll(filepath.Dir(mpath), 0o700); err != nil {
return err
}
mounts := make([]layerMountPoint, 0, len(r.layers))
@@ -982,22 +986,26 @@ func (r *layerStore) saveMounts() error {
if err != nil {
return err
}
- if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
- return err
- }
+
+ // This must be done before we write the file, because the process could be terminated
+ // after the file is written but before the lock file is updated.
lw, err := r.mountsLockfile.RecordWrite()
if err != nil {
return err
}
r.mountsLastWrite = lw
+
+ if err = ioutils.AtomicWriteFile(mpath, jmdata, 0o600); err != nil {
+ return err
+ }
return r.loadMounts()
}
func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
- if err := os.MkdirAll(rundir, 0700); err != nil {
+ if err := os.MkdirAll(rundir, 0o700); err != nil {
return nil, err
}
- if err := os.MkdirAll(layerdir, 0700); err != nil {
+ if err := os.MkdirAll(layerdir, 0o700); err != nil {
return nil, err
}
// Note: While the containers.lock file is in rundir for transient stores
@@ -1213,10 +1221,10 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
if !r.lockfile.IsReadWrite() {
return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
- if err := os.MkdirAll(r.rundir, 0700); err != nil {
+ if err := os.MkdirAll(r.rundir, 0o700); err != nil {
return nil, -1, err
}
- if err := os.MkdirAll(r.layerdir, 0700); err != nil {
+ if err := os.MkdirAll(r.layerdir, 0o700); err != nil {
return nil, -1, err
}
if id == "" {
@@ -1690,7 +1698,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error
if key == "" {
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
}
- err := os.MkdirAll(r.datadir(layer.ID), 0700)
+ err := os.MkdirAll(r.datadir(layer.ID), 0o700)
if err != nil {
return err
}
@@ -1698,7 +1706,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error
// NewAtomicFileWriter doesn't overwrite/truncate the existing inode.
// BigData() relies on this behaviour when opening the file for read
// so that it is either accessing the old data or the new one.
- writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0600)
+ writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0o600)
if err != nil {
return fmt.Errorf("opening bigdata file: %w", err)
}
@@ -1922,6 +1930,18 @@ func (r *layerStore) Wipe() error {
return err
}
}
+ ids, err := r.driver.ListLayers()
+ if err != nil {
+ if !errors.Is(err, drivers.ErrNotSupported) {
+ return err
+ }
+ ids = nil
+ }
+ for _, id := range ids {
+ if err := r.driver.Remove(id); err != nil {
+ return err
+ }
+ }
return nil
}
@@ -2198,7 +2218,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
return -1, err
}
compression := archive.DetectCompression(header[:n])
- defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff)
+ defragmented := io.MultiReader(bytes.NewReader(header[:n]), diff)
// Decide if we need to compute digests
var compressedDigest, uncompressedDigest digest.Digest // = ""
@@ -2270,10 +2290,10 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
return -1, err
}
compressor.Close()
- if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
+ if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil {
return -1, err
}
- if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil {
+ if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil {
return -1, err
}
if compressedDigester != nil {
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 6209205b3..408e4599c 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -132,13 +132,13 @@ const (
)
const (
- modeISDIR = 040000 // Directory
- modeISFIFO = 010000 // FIFO
- modeISREG = 0100000 // Regular file
- modeISLNK = 0120000 // Symbolic link
- modeISBLK = 060000 // Block special file
- modeISCHR = 020000 // Character special file
- modeISSOCK = 0140000 // Socket
+ modeISDIR = 0o40000 // Directory
+ modeISFIFO = 0o10000 // FIFO
+ modeISREG = 0o100000 // Regular file
+ modeISLNK = 0o120000 // Symbolic link
+ modeISBLK = 0o60000 // Block special file
+ modeISCHR = 0o20000 // Character special file
+ modeISSOCK = 0o140000 // Socket
)
// IsArchivePath checks if the (possibly compressed) file at the given path
@@ -328,7 +328,6 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi
}
pipeWriter.Close()
-
}()
return pipeReader
}
@@ -552,9 +551,9 @@ func (ta *tarAppender) addTarFile(path, name string) error {
}
}
- //handle re-mapping container ID mappings back to host ID mappings before
- //writing tar headers/files. We skip whiteout files because they were written
- //by the kernel and already have proper ownership relative to the host
+ // handle re-mapping container ID mappings back to host ID mappings before
+ // writing tar headers/files. We skip whiteout files because they were written
+ // by the kernel and already have proper ownership relative to the host
if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() {
fileIDPair, err := getFileUIDGID(fi.Sys())
if err != nil {
@@ -702,7 +701,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") {
- value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&07777)
+ value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&0o7777)
if err := system.Lsetxattr(path, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
return err
}
@@ -800,7 +799,6 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) {
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
-
// Fix the source path to work with long path names. This is a no-op
// on platforms other than Windows.
srcPath = fixVolumePathPrefix(srcPath)
@@ -1032,7 +1030,7 @@ loop:
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
- err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs)
+ err = idtools.MkdirAllAndChownNew(parentPath, 0o777, rootIDs)
if err != nil {
return err
}
@@ -1239,7 +1237,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
}
// Create dst, copy src's content into it
logrus.Debugf("Creating dest directory: %s", dst)
- if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
+ if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil {
return err
}
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
@@ -1266,7 +1264,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
dst = filepath.Join(dst, filepath.Base(src))
}
// Create the holding directory if necessary
- if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil {
+ if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil {
return err
}
@@ -1422,7 +1420,7 @@ func IsArchive(header []byte) bool {
if compression != Uncompressed {
return true
}
- r := tar.NewReader(bytes.NewBuffer(header))
+ r := tar.NewReader(bytes.NewReader(header))
_, err := r.Next()
return err == nil
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
index 775bd0766..02995d767 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
@@ -153,8 +153,7 @@ func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path str
return true, nil
}
-type directHandler struct {
-}
+type directHandler struct{}
func (d directHandler) Setxattr(path, name string, value []byte) error {
return unix.Setxattr(path, name, value, 0)
@@ -185,7 +184,7 @@ func GetFileOwner(path string) (uint32, uint32, uint32, error) {
}
s, ok := f.Sys().(*syscall.Stat_t)
if ok {
- return s.Uid, s.Gid, s.Mode & 07777, nil
+ return s.Uid, s.Gid, s.Mode & 0o7777, nil
}
return 0, 0, uint32(f.Mode()), nil
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
index f8a34c831..88192f220 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
@@ -88,7 +88,7 @@ func minor(device uint64) uint64 {
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
- mode := uint32(hdr.Mode & 07777)
+ mode := uint32(hdr.Mode & 0o7777)
switch hdr.Typeflag {
case tar.TypeBlock:
mode |= unix.S_IFBLK
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
index e44011775..85a5b3a5d 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
@@ -38,18 +38,17 @@ func CanonicalTarNameForPath(p string) (string, error) {
return "", fmt.Errorf("windows path contains forward slash: %s", p)
}
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
-
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
- //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
+ // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
permPart := perm & os.ModePerm
noPermPart := perm &^ os.ModePerm
// Add the x bit: make everything +x from windows
- permPart |= 0111
- permPart &= 0755
+ permPart |= 0o111
+ permPart &= 0o755
return noPermPart | permPart
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go
index fc705484e..01c6f30c2 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes.go
@@ -131,9 +131,11 @@ func isENOTDIR(err error) bool {
return false
}
-type skipChange func(string) (bool, error)
-type deleteChange func(string, string, os.FileInfo) (string, error)
-type whiteoutChange func(string, string) (bool, error)
+type (
+ skipChange func(string) (bool, error)
+ deleteChange func(string, string, os.FileInfo) (string, error)
+ whiteoutChange func(string, string) (bool, error)
+)
func changes(layers []string, rw string, dc deleteChange, sc skipChange, wc whiteoutChange) ([]Change, error) {
var (
@@ -299,7 +301,6 @@ func (info *FileInfo) path() string {
}
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
-
sizeAtEntry := len(*changes)
if oldInfo == nil {
@@ -373,7 +374,6 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
(*changes)[sizeAtEntry] = change
}
-
}
// Changes add changes to file information.
@@ -398,9 +398,7 @@ func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo {
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
// If oldDir is "", then all files in newDir will be Add-Changes.
func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldMappings *idtools.IDMappings) ([]Change, error) {
- var (
- oldRoot, newRoot *FileInfo
- )
+ var oldRoot, newRoot *FileInfo
if oldDir == "" {
emptyDir, err := os.MkdirTemp("", "empty")
if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
index 77d3d6f51..f8414717b 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
@@ -397,5 +397,4 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str
// We didn't find the same path in any older layers, so it was new in this one.
return "", nil
-
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go
index 966400e59..1bab94fa5 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go
@@ -7,7 +7,6 @@ import (
)
func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool {
-
// Don't look at size for dirs, its not a good measure of change
if oldStat.Mtim() != newStat.Mtim() ||
oldStat.Mode() != newStat.Mode() ||
diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go
index 2c714e8da..55f753bf4 100644
--- a/vendor/github.com/containers/storage/pkg/archive/copy.go
+++ b/vendor/github.com/containers/storage/pkg/archive/copy.go
@@ -297,7 +297,6 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
}
-
}
// RebaseArchiveEntries rewrites the given srcContent archive replacing
diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go
index 8fec5af38..713551859 100644
--- a/vendor/github.com/containers/storage/pkg/archive/diff.go
+++ b/vendor/github.com/containers/storage/pkg/archive/diff.go
@@ -85,7 +85,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
- err = os.MkdirAll(parentPath, 0755)
+ err = os.MkdirAll(parentPath, 0o755)
if err != nil {
return 0, err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go
index 14661c411..92b8d05ed 100644
--- a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go
+++ b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go
@@ -98,7 +98,7 @@ func parseFileFlags(fflags string) (uint32, uint32, error) {
}
func formatFileFlags(fflags uint32) (string, error) {
- var res = []string{}
+ res := []string{}
for fflags != 0 {
// Extract lowest set bit
fflag := uint32(1) << bits.TrailingZeros32(fflags)
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
index 2de95f39a..f221a2283 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -77,7 +77,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
dest = filepath.Clean(dest)
if _, err := os.Stat(dest); os.IsNotExist(err) {
- if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil {
+ if err := idtools.MkdirAllAndChownNew(dest, 0o755, rootIDs); err != nil {
return err
}
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
index 42ee39f48..f7a16e9f9 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
@@ -8,7 +8,8 @@ import (
func invokeUnpack(decompressedArchive io.Reader,
dest string,
- options *archive.TarOptions, root string) error {
+ options *archive.TarOptions, root string,
+) error {
return archive.Unpack(decompressedArchive, dest, options)
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
index 8cc0f33b3..259f8c99a 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
@@ -27,7 +27,7 @@ func untar() {
var options archive.TarOptions
- //read the options from the pipe "ExtraFiles"
+ // read the options from the pipe "ExtraFiles"
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
fatal(err)
}
@@ -99,7 +99,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
return fmt.Errorf("untar error on re-exec cmd: %w", err)
}
- //write the options to the pipe for the untar exec to read
+ // write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
return fmt.Errorf("untar json encode to pipe failed: %w", err)
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
index 1395ff8cd..745502204 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
@@ -14,7 +14,8 @@ func chroot(path string) error {
func invokeUnpack(decompressedArchive io.Reader,
dest string,
- options *archive.TarOptions, root string) error {
+ options *archive.TarOptions, root string,
+) error {
// Windows is different to Linux here because Windows does not support
// chroot. Hence there is no point sandboxing a chrooted process to
// do the unpack. We call inline instead within the daemon process.
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
index 90f453913..71ed094d1 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
@@ -26,7 +26,6 @@ type applyLayerResponse struct {
// used on Windows as it does not support chroot, hence no point sandboxing
// through chroot and rexec.
func applyLayer() {
-
var (
tmpDir string
err error
diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
index 7efd12373..14064717a 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
@@ -48,8 +48,10 @@ type layersCache struct {
created time.Time
}
-var cacheMutex sync.Mutex
-var cache *layersCache
+var (
+ cacheMutex sync.Mutex
+ cache *layersCache
+)
func (c *layersCache) release() {
cacheMutex.Lock()
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
index 2a9bdc675..0d1acafec 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
@@ -15,8 +15,10 @@ import (
"github.com/vbatts/tar-split/archive/tar"
)
-const RollsumBits = 16
-const holesThreshold = int64(1 << 10)
+const (
+ RollsumBits = 16
+ holesThreshold = int64(1 << 10)
+)
type holesFinder struct {
reader *bufio.Reader
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
index f4dfad822..59df6901e 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
@@ -25,11 +25,15 @@ import (
"math/bits"
)
-const windowSize = 64 // Roll assumes windowSize is a power of 2
-const charOffset = 31
+const (
+ windowSize = 64 // Roll assumes windowSize is a power of 2
+ charOffset = 31
+)
-const blobBits = 13
-const blobSize = 1 << blobBits // 8k
+const (
+ blobBits = 13
+ blobSize = 1 << blobBits // 8k
+)
type RollSum struct {
s1, s2 uint32
diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
index 092b03533..5eb9edb38 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
@@ -134,7 +134,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
Entries: metadata,
}
- var json = jsoniter.ConfigCompatibleWithStandardLibrary
+ json := jsoniter.ConfigCompatibleWithStandardLibrary
// Generate the manifest
manifest, err := json.Marshal(toc)
if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index adc1ad398..711962298 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -558,7 +558,7 @@ func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.Fil
func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
how := unix.OpenHow{
Flags: flags,
- Mode: uint64(mode & 07777),
+ Mode: uint64(mode & 0o7777),
Resolve: unix.RESOLVE_IN_ROOT,
}
return unix.Openat2(dirfd, name, &how)
@@ -636,7 +636,7 @@ func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.Fil
baseName := filepath.Base(name)
- if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0755); err2 != nil {
+ if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0o755); err2 != nil {
return nil, err
}
@@ -1384,7 +1384,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
filesToWaitFor := 0
for i, r := range mergedEntries {
if options.ForceMask != nil {
- value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777)
+ value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&0o7777)
r.Xattrs[containersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
r.Mode = int64(*options.ForceMask)
}
diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go
index f6e0cfcfe..20d72ca89 100644
--- a/vendor/github.com/containers/storage/pkg/config/config.go
+++ b/vendor/github.com/containers/storage/pkg/config/config.go
@@ -124,6 +124,11 @@ type OptionsConfig struct {
// for shared image content
AdditionalImageStores []string `toml:"additionalimagestores,omitempty"`
+ // ImageStore is the location of image store which is separated from the
+ // container store. Usually this is not recommended unless users wants
+ // separate store for image and containers.
+ ImageStore string `toml:"imagestore,omitempty"`
+
// AdditionalLayerStores is the location of additional read/only
// Layer stores. Usually used to access Networked File System
// for shared image content
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
index 6b9a7afcd..33bf7184e 100644
--- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
+++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
@@ -239,8 +239,8 @@ func (t *Task) getDriverVersion() (string, error) {
}
func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64,
- length uint64, targetType string, params string) {
-
+ length uint64, targetType string, params string,
+) {
return DmGetNextTarget(t.unmanaged, next, &start, &length,
&targetType, &params),
start, length, targetType, params
@@ -345,8 +345,7 @@ func RemoveDeviceDeferred(name string) error {
// disable udev dm rules and delete the symlink under /dev/mapper by itself,
// even if the removal is deferred by the kernel.
cookie := new(uint)
- var flags uint16
- flags = DmUdevDisableLibraryFallback
+ flags := uint16(DmUdevDisableLibraryFallback)
if err := task.setCookie(cookie, flags); err != nil {
return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
}
@@ -384,7 +383,7 @@ func CancelDeferredRemove(deviceName string) error {
return fmt.Errorf("devicemapper: Can't set sector %s", err)
}
- if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil {
+ if err := task.setMessage("@cancel_deferred_remove"); err != nil {
return fmt.Errorf("devicemapper: Can't set message %s", err)
}
@@ -459,8 +458,7 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize
}
cookie := new(uint)
- var flags uint16
- flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag
+ flags := uint16(DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag)
if err := task.setCookie(cookie, flags); err != nil {
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
}
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
index 7baca8126..9aef4c2fb 100644
--- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
+++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
@@ -138,8 +138,8 @@ func dmTaskSetRoFct(task *cdmTask) int {
}
func dmTaskAddTargetFct(task *cdmTask,
- start, size uint64, ttype, params string) int {
-
+ start, size uint64, ttype, params string,
+) int {
Cttype := C.CString(ttype)
defer free(Cttype)
@@ -156,12 +156,11 @@ func dmTaskGetDepsFct(task *cdmTask) *Deps {
}
// golang issue: https://github.com/golang/go/issues/11925
- hdr := reflect.SliceHeader{
- Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))),
- Len: int(Cdeps.count),
- Cap: int(Cdeps.count),
- }
- devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr))
+ var devices []C.uint64_t
+ devicesHdr := (*reflect.SliceHeader)(unsafe.Pointer(&devices))
+ devicesHdr.Data = uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps)))
+ devicesHdr.Len = int(Cdeps.count)
+ devicesHdr.Cap = int(Cdeps.count)
deps := &Deps{
Count: uint32(Cdeps.count),
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
index bcc2109b6..9d0714b1b 100644
--- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
@@ -183,7 +183,6 @@ func (p *Pattern) Exclusion() bool {
}
func (p *Pattern) match(path string) (bool, error) {
-
if p.regexp == nil {
if err := p.compile(); err != nil {
return false, filepath.ErrBadPattern
@@ -356,12 +355,12 @@ func CreateIfNotExists(path string, isDir bool) error {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
if isDir {
- return os.MkdirAll(path, 0755)
+ return os.MkdirAll(path, 0o755)
}
- if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
return err
}
- f, err := os.OpenFile(path, os.O_CREATE, 0755)
+ f, err := os.OpenFile(path, os.O_CREATE, 0o755)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go
index f52239f87..68c8c867d 100644
--- a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go
+++ b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go
@@ -104,7 +104,7 @@ func CreateIDMappedMount(source, target string, pid int) error {
&attr, uint(unsafe.Sizeof(attr))); err != nil {
return err
}
- if err := os.Mkdir(target, 0700); err != nil && !os.IsExist(err) {
+ if err := os.Mkdir(target, 0o700); err != nil && !os.IsExist(err) {
return err
}
return moveMount(targetDirFd, target)
@@ -140,7 +140,7 @@ func CreateUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int,
for _, m := range idmap {
mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size)
}
- return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600)
+ return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0o600)
}
if err := writeMappings("uid_map", uidMaps); err != nil {
cleanupFunc()
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
index daff1e4a9..4701dc5ac 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
@@ -91,13 +91,13 @@ func CanAccess(path string, pair IDPair) bool {
}
func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
- if isOwner && (perms&0100 == 0100) {
+ if isOwner && (perms&0o100 == 0o100) {
return true
}
- if isGroup && (perms&0010 == 0010) {
+ if isGroup && (perms&0o010 == 0o010) {
return true
}
- if perms&0001 == 0001 {
+ if perms&0o001 == 0o001 {
return true
}
return false
diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
index 40e507f77..ac27718de 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
@@ -89,7 +89,6 @@ func addUser(userName string) error {
}
func createSubordinateRanges(name string) error {
-
// first, we should verify that ranges weren't automatically created
// by the distro tooling
ranges, err := readSubuid(name)
diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
index 33a7dee6c..b3772bdb3 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
@@ -19,8 +19,8 @@ func resolveBinary(binname string) (string, error) {
if err != nil {
return "", err
}
- //only return no error if the final resolved binary basename
- //matches what was searched for
+ // only return no error if the final resolved binary basename
+ // matches what was searched for
if filepath.Base(resolvedPath) == binname {
return resolvedPath, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
index 335980914..a357b809e 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
@@ -135,7 +135,7 @@ func openLock(path string, ro bool) (fd int, err error) {
// the directory of the lockfile seems to be removed, try to create it
if os.IsNotExist(err) {
- if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
+ if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
return fd, fmt.Errorf("creating lock file directory: %w", err)
}
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
index 09f2aca5c..ca27a483d 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
@@ -138,6 +138,7 @@ func (l *LockFile) Modified() (bool, error) {
func (l *LockFile) Touch() error {
return nil
}
+
func (l *LockFile) IsReadWrite() bool {
return false
}
diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
index de10e3324..b8bfa5897 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
@@ -26,7 +26,7 @@ func stringToLoopName(src string) [LoNameSize]uint8 {
}
func getNextFreeLoopbackIndex() (int, error) {
- f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644)
+ f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0o644)
if err != nil {
return 0, err
}
@@ -67,7 +67,7 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
}
// OpenFile adds O_CLOEXEC
- loopFile, err = os.OpenFile(target, os.O_RDWR, 0644)
+ loopFile, err = os.OpenFile(target, os.O_RDWR, 0o644)
if err != nil {
logrus.Errorf("Opening loopback device: %s", err)
return nil, ErrAttachLoopbackDevice
@@ -114,7 +114,6 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
// AttachLoopDevice attaches the given sparse file to the next
// available loopback device. It returns an opened *os.File.
func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
-
// Try to retrieve the next available loopback device via syscall.
// If it fails, we discard error and start looping for a
// loopback from index 0.
@@ -124,7 +123,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
}
// OpenFile adds O_CLOEXEC
- sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644)
+ sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0o644)
if err != nil {
logrus.Errorf("Opening sparse file: %v", err)
return nil, ErrAttachLoopbackDevice
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
index b30da9fad..4b7fdee83 100644
--- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
+++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
@@ -24,7 +24,6 @@ func (k *VersionInfo) String() string {
// GetKernelVersion gets the current kernel version.
func GetKernelVersion() (*VersionInfo, error) {
-
var (
h windows.Handle
dwVersion uint32
diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp.go b/vendor/github.com/containers/storage/pkg/regexp/regexp.go
index 458b83378..17b7ce8a3 100644
--- a/vendor/github.com/containers/storage/pkg/regexp/regexp.go
+++ b/vendor/github.com/containers/storage/pkg/regexp/regexp.go
@@ -11,7 +11,7 @@ import (
// of apps that want to use global regex variables. This library initializes them on
// first use as opposed to the start of the executable.
type Regexp struct {
- once sync.Once
+ once *sync.Once
regexp *regexp.Regexp
val string
}
@@ -22,7 +22,10 @@ func Delayed(val string) Regexp {
}
if precompile {
re.regexp = regexp.MustCompile(re.val)
+ } else {
+ re.once = &sync.Once{}
}
+
return re
}
@@ -44,6 +47,7 @@ func (re *Regexp) ExpandString(dst []byte, template string, src string, match []
re.compile()
return re.regexp.ExpandString(dst, template, src, match)
}
+
func (re *Regexp) Find(b []byte) []byte {
re.compile()
return re.regexp.Find(b)
@@ -153,6 +157,7 @@ func (re *Regexp) MatchReader(r io.RuneReader) bool {
re.compile()
return re.regexp.MatchReader(r)
}
+
func (re *Regexp) MatchString(s string) bool {
re.compile()
return re.regexp.MatchString(s)
diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go
index 3ae44fd8a..20abc7407 100644
--- a/vendor/github.com/containers/storage/pkg/stringid/stringid.go
+++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go
@@ -63,7 +63,7 @@ func generateID(r io.Reader) string {
}
}
-// GenerateRandomID returns a unique id.
+// GenerateRandomID returns a pseudorandom 64-character hex string.
func GenerateRandomID() string {
return generateID(cryptorand.Reader)
}
diff --git a/vendor/github.com/containers/storage/pkg/system/errors.go b/vendor/github.com/containers/storage/pkg/system/errors.go
index 288318985..b87d419b5 100644
--- a/vendor/github.com/containers/storage/pkg/system/errors.go
+++ b/vendor/github.com/containers/storage/pkg/system/errors.go
@@ -4,7 +4,5 @@ import (
"errors"
)
-var (
- // ErrNotSupportedPlatform means the platform is not supported.
- ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
-)
+// ErrNotSupportedPlatform means the platform is not supported.
+var ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
diff --git a/vendor/github.com/containers/storage/pkg/system/init_windows.go b/vendor/github.com/containers/storage/pkg/system/init_windows.go
index 019c66441..5f6fea1d3 100644
--- a/vendor/github.com/containers/storage/pkg/system/init_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/init_windows.go
@@ -13,5 +13,4 @@ func init() {
if os.Getenv("LCOW_SUPPORTED") != "" {
lcowSupported = true
}
-
}
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
index df53c40e2..a90b23e03 100644
--- a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
@@ -84,7 +84,6 @@ func getFreeMem() int64 {
//
// MemInfo type.
func ReadMemInfo() (*MemInfo, error) {
-
ppKernel := C.getPpKernel()
MemTotal := getTotalMem()
MemFree := getFreeMem()
diff --git a/vendor/github.com/containers/storage/pkg/system/path.go b/vendor/github.com/containers/storage/pkg/system/path.go
index f634a6be6..ca076f2bc 100644
--- a/vendor/github.com/containers/storage/pkg/system/path.go
+++ b/vendor/github.com/containers/storage/pkg/system/path.go
@@ -17,5 +17,4 @@ func DefaultPathEnv(platform string) string {
return ""
}
return defaultUnixPathEnv
-
}
diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go
index 60c7d8bd9..5917fa251 100644
--- a/vendor/github.com/containers/storage/pkg/system/rm.go
+++ b/vendor/github.com/containers/storage/pkg/system/rm.go
@@ -30,6 +30,12 @@ func EnsureRemoveAll(dir string) error {
exitOnErr := make(map[string]int)
maxRetry := 100
+ // Attempt a simple remove all first, this avoids the more expensive
+ // RecursiveUnmount call if not needed.
+ if err := os.RemoveAll(dir); err == nil {
+ return nil
+ }
+
// Attempt to unmount anything beneath this dir first
if err := mount.RecursiveUnmount(dir); err != nil {
logrus.Debugf("RecusiveUnmount on %s failed: %v", dir, err)
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_common.go b/vendor/github.com/containers/storage/pkg/system/stat_common.go
index e965c54c2..2f44d18b6 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_common.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_common.go
@@ -3,8 +3,7 @@
package system
-type platformStatT struct {
-}
+type platformStatT struct{}
// Flags return file flags if supported or zero otherwise
func (s StatT) Flags() uint32 {
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go
index 715f05b93..57850a883 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go
@@ -4,10 +4,12 @@ import "syscall"
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
- return &StatT{size: s.Size,
+ return &StatT{
+ size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
- mtim: s.Mtimespec}, nil
+ mtim: s.Mtimespec,
+ }, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go
index 9c510468f..4b95073a3 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go
@@ -13,13 +13,15 @@ func (s StatT) Flags() uint32 {
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
- st := &StatT{size: s.Size,
+ st := &StatT{
+ size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
mtim: s.Mtimespec,
- dev: s.Dev}
+ dev: s.Dev,
+ }
st.flags = s.Flags
st.dev = s.Dev
return st, nil
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go
index e5dcba822..e3d13463f 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go
@@ -4,13 +4,15 @@ import "syscall"
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
- return &StatT{size: s.Size,
+ return &StatT{
+ size: s.Size,
mode: s.Mode,
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
mtim: s.Mtim,
- dev: uint64(s.Dev)}, nil
+ dev: uint64(s.Dev),
+ }, nil
}
// FromStatT converts a syscall.Stat_t type to a system.Stat_t type
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go
index b607dea94..a413e1714 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go
@@ -4,10 +4,12 @@ import "syscall"
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
- return &StatT{size: s.Size,
+ return &StatT{
+ size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
- mtim: s.Mtim}, nil
+ mtim: s.Mtim,
+ }, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go
index b607dea94..a413e1714 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go
@@ -4,10 +4,12 @@ import "syscall"
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
- return &StatT{size: s.Size,
+ return &StatT{
+ size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
- mtim: s.Mtim}, nil
+ mtim: s.Mtim,
+ }, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go
index 81edaadbb..6d5c6c142 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go
@@ -65,5 +65,6 @@ func fromStatT(fi *os.FileInfo) (*StatT, error) {
return &StatT{
size: (*fi).Size(),
mode: (*fi).Mode(),
- mtim: (*fi).ModTime()}, nil
+ mtim: (*fi).ModTime(),
+ }, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go
index b81793adc..c14a5cc4d 100644
--- a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go
+++ b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go
@@ -102,9 +102,7 @@ func (idx *TruncIndex) Get(s string) (string, error) {
if s == "" {
return "", ErrEmptyPrefix
}
- var (
- id string
- )
+ var id string
subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error {
if id != "" {
// we haven't found the ID if there are two or more IDs
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf
index 99cc94a36..93a9a236e 100644
--- a/vendor/github.com/containers/storage/storage.conf
+++ b/vendor/github.com/containers/storage/storage.conf
@@ -55,7 +55,7 @@ additionalimagestores = [
# can deduplicate pulling of content, disk storage of content and can allow the
# kernel to use less memory when running containers.
-# containers/storage supports four keys
+# containers/storage supports three keys
# * enable_partial_images="true" | "false"
# Tells containers/storage to look for files previously pulled in storage
# rather then always pulling them from the container registry.
@@ -75,8 +75,8 @@ pull_options = {enable_partial_images = "false", use_hard_links = "false", ostre
# mappings which the kernel will allow when you later attempt to run a
# container.
#
-# remap-uids = 0:1668442479:65536
-# remap-gids = 0:1668442479:65536
+# remap-uids = "0:1668442479:65536"
+# remap-gids = "0:1668442479:65536"
# Remap-User/Group is a user name which can be used to look up one or more UID/GID
# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting
@@ -84,7 +84,8 @@ pull_options = {enable_partial_images = "false", use_hard_links = "false", ostre
# range that matches the specified name, and using the length of that range.
# Additional ranges are then assigned, using the ranges which specify the
# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID,
-# until all of the entries have been used for maps.
+# until all of the entries have been used for maps. This setting overrides the
+# Remap-UIDs/GIDs setting.
#
# remap-user = "containers"
# remap-group = "containers"
@@ -100,7 +101,7 @@ pull_options = {enable_partial_images = "false", use_hard_links = "false", ostre
# Auto-userns-min-size is the minimum size for a user namespace created automatically.
# auto-userns-min-size=1024
#
-# Auto-userns-max-size is the minimum size for a user namespace created automatically.
+# Auto-userns-max-size is the maximum size for a user namespace created automatically.
# auto-userns-max-size=65536
[storage.options.overlay]
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 4c4082084..e75420089 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -520,6 +520,13 @@ type Store interface {
// references in the json files. These can happen in the case of unclean
// shutdowns or regular restarts in transient store mode.
GarbageCollect() error
+
+ // Check returns a report of things that look wrong in the store.
+ Check(options *CheckOptions) (CheckReport, error)
+ // Repair attempts to remediate problems mentioned in the CheckReport,
+ // usually by deleting layers and images which are damaged. If the
+ // right options are set, it will remove containers as well.
+ Repair(report CheckReport, options *RepairOptions) []error
}
// AdditionalLayer represents a layer that is contained in the additional layer store
@@ -661,6 +668,7 @@ type store struct {
usernsLock *lockfile.LockFile
graphRoot string
graphOptions []string
+ imageStoreDir string
pullOptions map[string]string
uidMap []idtools.IDMap
gidMap []idtools.IDMap
@@ -668,6 +676,7 @@ type store struct {
autoNsMinSize uint32
autoNsMaxSize uint32
imageStore rwImageStore
+ rwImageStores []rwImageStore
roImageStores []roImageStore
containerStore rwContainerStore
digestLockRoot string
@@ -749,15 +758,25 @@ func GetStore(options types.StoreOptions) (Store, error) {
options.RunRoot = defaultOpts.RunRoot
}
- if err := os.MkdirAll(options.RunRoot, 0700); err != nil {
+ if err := os.MkdirAll(options.RunRoot, 0o700); err != nil {
return nil, err
}
- if err := os.MkdirAll(options.GraphRoot, 0700); err != nil {
+ if err := os.MkdirAll(options.GraphRoot, 0o700); err != nil {
return nil, err
}
- if err := os.MkdirAll(filepath.Join(options.GraphRoot, options.GraphDriverName), 0700); err != nil {
+ if options.ImageStore != "" {
+ if err := os.MkdirAll(options.ImageStore, 0o700); err != nil {
+ return nil, err
+ }
+ }
+ if err := os.MkdirAll(filepath.Join(options.GraphRoot, options.GraphDriverName), 0o700); err != nil {
return nil, err
}
+ if options.ImageStore != "" {
+ if err := os.MkdirAll(filepath.Join(options.ImageStore, options.GraphDriverName), 0o700); err != nil {
+ return nil, err
+ }
+ }
graphLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "storage.lock"))
if err != nil {
@@ -785,6 +804,7 @@ func GetStore(options types.StoreOptions) (Store, error) {
usernsLock: usernsLock,
graphRoot: options.GraphRoot,
graphOptions: options.GraphDriverOptions,
+ imageStoreDir: options.ImageStore,
pullOptions: options.PullOptions,
uidMap: copyIDMap(options.UIDMap),
gidMap: copyIDMap(options.GIDMap),
@@ -889,8 +909,12 @@ func (s *store) load() error {
}
driverPrefix := s.graphDriverName + "-"
- gipath := filepath.Join(s.graphRoot, driverPrefix+"images")
- if err := os.MkdirAll(gipath, 0700); err != nil {
+ imgStoreRoot := s.imageStoreDir
+ if imgStoreRoot == "" {
+ imgStoreRoot = s.graphRoot
+ }
+ gipath := filepath.Join(imgStoreRoot, driverPrefix+"images")
+ if err := os.MkdirAll(gipath, 0o700); err != nil {
return err
}
ris, err := newImageStore(gipath)
@@ -900,11 +924,11 @@ func (s *store) load() error {
s.imageStore = ris
gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers")
- if err := os.MkdirAll(gcpath, 0700); err != nil {
+ if err := os.MkdirAll(gcpath, 0o700); err != nil {
return err
}
rcpath := filepath.Join(s.runRoot, driverPrefix+"containers")
- if err := os.MkdirAll(rcpath, 0700); err != nil {
+ if err := os.MkdirAll(rcpath, 0o700); err != nil {
return err
}
@@ -917,15 +941,28 @@ func (s *store) load() error {
for _, store := range driver.AdditionalImageStores() {
gipath := filepath.Join(store, driverPrefix+"images")
- ris, err := newROImageStore(gipath)
- if err != nil {
- return err
+ var ris roImageStore
+ if s.imageStoreDir != "" && store == s.graphRoot {
+ // If --imagestore was set and current store
+ // is `graphRoot` then mount it as a `rw` additional
+ // store instead of `readonly` additional store.
+ imageStore, err := newImageStore(gipath)
+ if err != nil {
+ return err
+ }
+ s.rwImageStores = append(s.rwImageStores, imageStore)
+ ris = imageStore
+ } else {
+ ris, err = newROImageStore(gipath)
+ if err != nil {
+ return err
+ }
}
s.roImageStores = append(s.roImageStores, ris)
}
s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks")
- if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil {
+ if err := os.MkdirAll(s.digestLockRoot, 0o700); err != nil {
return err
}
@@ -989,8 +1026,15 @@ func (s *store) stopUsingGraphDriver() {
// Almost all users should use startUsingGraphDriver instead.
// The caller must hold s.graphLock.
func (s *store) createGraphDriverLocked() (drivers.Driver, error) {
+ driverRoot := s.imageStoreDir
+ imageStoreBase := s.graphRoot
+ if driverRoot == "" {
+ driverRoot = s.graphRoot
+ imageStoreBase = ""
+ }
config := drivers.Options{
- Root: s.graphRoot,
+ Root: driverRoot,
+ ImageStore: imageStoreBase,
RunRoot: s.runRoot,
DriverPriority: s.graphDriverPriority,
DriverOptions: s.graphOptions,
@@ -1017,11 +1061,15 @@ func (s *store) getLayerStoreLocked() (rwLayerStore, error) {
}
driverPrefix := s.graphDriverName + "-"
rlpath := filepath.Join(s.runRoot, driverPrefix+"layers")
- if err := os.MkdirAll(rlpath, 0700); err != nil {
+ if err := os.MkdirAll(rlpath, 0o700); err != nil {
return nil, err
}
- glpath := filepath.Join(s.graphRoot, driverPrefix+"layers")
- if err := os.MkdirAll(glpath, 0700); err != nil {
+ imgStoreRoot := s.imageStoreDir
+ if imgStoreRoot == "" {
+ imgStoreRoot = s.graphRoot
+ }
+ glpath := filepath.Join(imgStoreRoot, driverPrefix+"layers")
+ if err := os.MkdirAll(glpath, 0o700); err != nil {
return nil, err
}
rls, err := s.newLayerStore(rlpath, glpath, s.graphDriver, s.transientStore)
@@ -1052,7 +1100,7 @@ func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) {
}
driverPrefix := s.graphDriverName + "-"
rlpath := filepath.Join(s.runRoot, driverPrefix+"layers")
- if err := os.MkdirAll(rlpath, 0700); err != nil {
+ if err := os.MkdirAll(rlpath, 0o700); err != nil {
return nil, err
}
for _, store := range s.graphDriver.AdditionalImageStores() {
@@ -1081,7 +1129,7 @@ func (s *store) bothLayerStoreKindsLocked() (rwLayerStore, []roLayerStore, error
}
// bothLayerStoreKinds returns the primary, and additional read-only, layer store objects used by the store.
-// It must be called with s.graphLock held.
+// It must be called WITHOUT s.graphLock held.
func (s *store) bothLayerStoreKinds() (rwLayerStore, []roLayerStore, error) {
if err := s.startUsingGraphDriver(); err != nil {
return nil, nil, err
@@ -1115,51 +1163,54 @@ func (s *store) allLayerStores() ([]roLayerStore, error) {
// readAllLayerStores processes allLayerStores() in order:
// It locks the store for reading, checks for updates, and calls
//
-// (done, err) := fn(store)
+// (data, done, err) := fn(store)
//
// until the callback returns done == true, and returns the data from the callback.
//
-// If reading any layer store fails, it immediately returns (true, err).
+// If reading any layer store fails, it immediately returns ({}, true, err).
//
-// If all layer stores are processed without setting done == true, it returns (false, nil).
+// If all layer stores are processed without setting done == true, it returns ({}, false, nil).
//
// Typical usage:
//
-// var res T = failureValue
-// if done, err := s.readAllLayerStores(store, func(…) {
+// if res, done, err := s.readAllLayerStores(store, func(…) {
// …
// }; done {
// return res, err
// }
-func (s *store) readAllLayerStores(fn func(store roLayerStore) (bool, error)) (bool, error) {
+func readAllLayerStores[T any](s *store, fn func(store roLayerStore) (T, bool, error)) (T, bool, error) {
+ var zeroRes T // A zero value of T
+
layerStores, err := s.allLayerStores()
if err != nil {
- return true, err
+ return zeroRes, true, err
}
for _, s := range layerStores {
store := s
if err := store.startReading(); err != nil {
- return true, err
+ return zeroRes, true, err
}
defer store.stopReading()
- if done, err := fn(store); done {
- return true, err
+ if res, done, err := fn(store); done {
+ return res, true, err
}
}
- return false, nil
+ return zeroRes, false, nil
}
// writeToLayerStore is a helper for working with store.getLayerStore():
// It locks the store for writing, checks for updates, and calls fn()
// It returns the return value of fn, or its own error initializing the store.
-func (s *store) writeToLayerStore(fn func(store rwLayerStore) error) error {
+func writeToLayerStore[T any](s *store, fn func(store rwLayerStore) (T, error)) (T, error) {
+ var zeroRes T // A zero value of T
+
store, err := s.getLayerStore()
if err != nil {
- return err
+ return zeroRes, err
}
if err := store.startWriting(); err != nil {
- return err
+ return zeroRes, err
}
defer store.stopWriting()
return fn(store)
@@ -1174,53 +1225,69 @@ func (s *store) allImageStores() []roImageStore {
// readAllImageStores processes allImageStores() in order:
// It locks the store for reading, checks for updates, and calls
//
-// (done, err) := fn(store)
+// (data, done, err) := fn(store)
//
// until the callback returns done == true, and returns the data from the callback.
//
-// If reading any Image store fails, it immediately returns (true, err).
+// If reading any Image store fails, it immediately returns ({}, true, err).
//
-// If all Image stores are processed without setting done == true, it returns (false, nil).
+// If all Image stores are processed without setting done == true, it returns ({}, false, nil).
//
// Typical usage:
//
-// var res T = failureValue
-// if done, err := s.readAllImageStores(store, func(…) {
+// if res, done, err := readAllImageStores(store, func(…) {
// …
// }; done {
// return res, err
// }
-func (s *store) readAllImageStores(fn func(store roImageStore) (bool, error)) (bool, error) {
+func readAllImageStores[T any](s *store, fn func(store roImageStore) (T, bool, error)) (T, bool, error) {
+ var zeroRes T // A zero value of T
+
for _, s := range s.allImageStores() {
store := s
if err := store.startReading(); err != nil {
- return true, err
+ return zeroRes, true, err
}
defer store.stopReading()
- if done, err := fn(store); done {
- return true, err
+ if res, done, err := fn(store); done {
+ return res, true, err
}
}
- return false, nil
+ return zeroRes, false, nil
}
-// writeToImageStore is a convenience helper for working with store.getImageStore():
+// writeToImageStore is a convenience helper for working with store.imageStore:
// It locks the store for writing, checks for updates, and calls fn(), which can then access store.imageStore.
// It returns the return value of fn, or its own error initializing the store.
-func (s *store) writeToImageStore(fn func() error) error {
+func writeToImageStore[T any](s *store, fn func() (T, error)) (T, error) {
if err := s.imageStore.startWriting(); err != nil {
- return err
+ var zeroRes T // A zero value of T
+ return zeroRes, err
}
defer s.imageStore.stopWriting()
return fn()
}
-// writeToContainerStore is a convenience helper for working with store.getContainerStore():
+// readContainerStore is a convenience helper for working with store.containerStore:
+// It locks the store for reading, checks for updates, and calls fn(), which can then access store.containerStore.
+// If reading the container store fails, it returns ({}, true, err).
+// Returns the return value of fn on success.
+func readContainerStore[T any](s *store, fn func() (T, bool, error)) (T, bool, error) {
+ if err := s.containerStore.startReading(); err != nil {
+ var zeroRes T // A zero value of T
+ return zeroRes, true, err
+ }
+ defer s.containerStore.stopReading()
+ return fn()
+}
+
+// writeToContainerStore is a convenience helper for working with store.containerStore:
// It locks the store for writing, checks for updates, and calls fn(), which can then access store.containerStore.
// It returns the return value of fn, or its own error initializing the store.
-func (s *store) writeToContainerStore(fn func() error) error {
+func writeToContainerStore[T any](s *store, fn func() (T, error)) (T, error) {
if err := s.containerStore.startWriting(); err != nil {
- return err
+ var zeroRes T // A zero value of T
+ return zeroRes, err
}
defer s.containerStore.stopWriting()
return fn()
@@ -1384,91 +1451,88 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i
layer = ilayer.ID
}
- var options ImageOptions
- var namesToAddAfterCreating []string
-
- if err := s.imageStore.startWriting(); err != nil {
- return nil, err
- }
- defer s.imageStore.stopWriting()
+ return writeToImageStore(s, func() (*Image, error) {
+ var options ImageOptions
+ var namesToAddAfterCreating []string
- // Check if the ID refers to an image in a read-only store -- we want
- // to allow images in read-only stores to have their names changed, so
- // if we find one, merge the new values in with what we know about the
- // image that's already there.
- if id != "" {
- for _, is := range s.roImageStores {
- store := is
- if err := store.startReading(); err != nil {
- return nil, err
- }
- defer store.stopReading()
- if i, err := store.Get(id); err == nil {
- // set information about this image in "options"
- options = ImageOptions{
- Metadata: i.Metadata,
- CreationDate: i.Created,
- Digest: i.Digest,
- Digests: copyDigestSlice(i.Digests),
- NamesHistory: copyStringSlice(i.NamesHistory),
+ // Check if the ID refers to an image in a read-only store -- we want
+ // to allow images in read-only stores to have their names changed, so
+ // if we find one, merge the new values in with what we know about the
+ // image that's already there.
+ if id != "" {
+ for _, is := range s.roImageStores {
+ store := is
+ if err := store.startReading(); err != nil {
+ return nil, err
}
- for _, key := range i.BigDataNames {
- data, err := store.BigData(id, key)
- if err != nil {
- return nil, err
+ defer store.stopReading()
+ if i, err := store.Get(id); err == nil {
+ // set information about this image in "options"
+ options = ImageOptions{
+ Metadata: i.Metadata,
+ CreationDate: i.Created,
+ Digest: i.Digest,
+ Digests: copyDigestSlice(i.Digests),
+ NamesHistory: copyStringSlice(i.NamesHistory),
}
- dataDigest, err := store.BigDataDigest(id, key)
- if err != nil {
- return nil, err
+ for _, key := range i.BigDataNames {
+ data, err := store.BigData(id, key)
+ if err != nil {
+ return nil, err
+ }
+ dataDigest, err := store.BigDataDigest(id, key)
+ if err != nil {
+ return nil, err
+ }
+ options.BigData = append(options.BigData, ImageBigDataOption{
+ Key: key,
+ Data: data,
+ Digest: dataDigest,
+ })
}
- options.BigData = append(options.BigData, ImageBigDataOption{
- Key: key,
- Data: data,
- Digest: dataDigest,
- })
+ namesToAddAfterCreating = dedupeStrings(append(append([]string{}, i.Names...), names...))
+ break
}
- namesToAddAfterCreating = dedupeStrings(append(append([]string{}, i.Names...), names...))
- break
}
}
- }
- // merge any passed-in options into "options" as best we can
- if iOptions != nil {
- if !iOptions.CreationDate.IsZero() {
- options.CreationDate = iOptions.CreationDate
- }
- if iOptions.Digest != "" {
- options.Digest = iOptions.Digest
- }
- options.Digests = append(options.Digests, copyDigestSlice(iOptions.Digests)...)
- if iOptions.Metadata != "" {
- options.Metadata = iOptions.Metadata
+ // merge any passed-in options into "options" as best we can
+ if iOptions != nil {
+ if !iOptions.CreationDate.IsZero() {
+ options.CreationDate = iOptions.CreationDate
+ }
+ if iOptions.Digest != "" {
+ options.Digest = iOptions.Digest
+ }
+ options.Digests = append(options.Digests, copyDigestSlice(iOptions.Digests)...)
+ if iOptions.Metadata != "" {
+ options.Metadata = iOptions.Metadata
+ }
+ options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...)
+ options.NamesHistory = append(options.NamesHistory, copyStringSlice(iOptions.NamesHistory)...)
+ if options.Flags == nil {
+ options.Flags = make(map[string]interface{})
+ }
+ for k, v := range iOptions.Flags {
+ options.Flags[k] = v
+ }
}
- options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...)
- options.NamesHistory = append(options.NamesHistory, copyStringSlice(iOptions.NamesHistory)...)
- if options.Flags == nil {
- options.Flags = make(map[string]interface{})
+
+ if options.CreationDate.IsZero() {
+ options.CreationDate = time.Now().UTC()
}
- for k, v := range iOptions.Flags {
- options.Flags[k] = v
+ if metadata != "" {
+ options.Metadata = metadata
}
- }
-
- if options.CreationDate.IsZero() {
- options.CreationDate = time.Now().UTC()
- }
- if metadata != "" {
- options.Metadata = metadata
- }
- res, err := s.imageStore.create(id, names, layer, options)
- if err == nil && len(namesToAddAfterCreating) > 0 {
- // set any names we pulled up from an additional image store, now that we won't be
- // triggering a duplicate names error
- err = s.imageStore.updateNames(res.ID, namesToAddAfterCreating, addNames)
- }
- return res, err
+ res, err := s.imageStore.create(id, names, layer, options)
+ if err == nil && len(namesToAddAfterCreating) > 0 {
+ // set any names we pulled up from an additional image store, now that we won't be
+ // triggering a duplicate names error
+ err = s.imageStore.updateNames(res.ID, namesToAddAfterCreating, addNames)
+ }
+ return res, err
+ })
}
// imageTopLayerForMapping does ???
@@ -1745,16 +1809,14 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
options.Volatile = true
}
- var container *Container
- err = s.writeToContainerStore(func() error {
+ return writeToContainerStore(s, func() (*Container, error) {
options.IDMappingOptions = types.IDMappingOptions{
HostUIDMapping: len(options.UIDMap) == 0,
HostGIDMapping: len(options.GIDMap) == 0,
UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap),
}
- var err error
- container, err = s.containerStore.create(id, names, imageID, layer, &options)
+ container, err := s.containerStore.create(id, names, imageID, layer, &options)
if err != nil || container == nil {
if err2 := rlstore.Delete(layer); err2 != nil {
if err == nil {
@@ -1764,9 +1826,8 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
}
}
}
- return err
+ return container, err
})
- return container, err
}
func (s *store) SetMetadata(id, metadata string) error {
@@ -1785,49 +1846,46 @@ func (s *store) SetMetadata(id, metadata string) error {
}
func (s *store) Metadata(id string) (string, error) {
- var res string
-
- if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) {
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) (string, bool, error) {
if store.Exists(id) {
- var err error
- res, err = store.Metadata(id)
- return true, err
+ res, err := store.Metadata(id)
+ return res, true, err
}
- return false, nil
+ return "", false, nil
}); done {
return res, err
}
- if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) {
+ if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) {
if store.Exists(id) {
- var err error
- res, err = store.Metadata(id)
- return true, err
+ res, err := store.Metadata(id)
+ return res, true, err
}
- return false, nil
+ return "", false, nil
}); done {
return res, err
}
- if err := s.containerStore.startReading(); err != nil {
- return "", err
- }
- defer s.containerStore.stopReading()
- if s.containerStore.Exists(id) {
- return s.containerStore.Metadata(id)
+ if res, done, err := readContainerStore(s, func() (string, bool, error) {
+ if s.containerStore.Exists(id) {
+ res, err := s.containerStore.Metadata(id)
+ return res, true, err
+ }
+ return "", false, nil
+ }); done {
+ return res, err
}
+
return "", ErrNotAnID
}
func (s *store) ListImageBigData(id string) ([]string, error) {
- var res []string
- if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) {
+ if res, done, err := readAllImageStores(s, func(store roImageStore) ([]string, bool, error) {
bigDataNames, err := store.BigDataNames(id)
if err == nil {
- res = bigDataNames
- return true, nil
+ return bigDataNames, true, nil
}
- return false, nil
+ return nil, false, nil
}); done {
return res, err
}
@@ -1835,29 +1893,28 @@ func (s *store) ListImageBigData(id string) ([]string, error) {
}
func (s *store) ImageBigDataSize(id, key string) (int64, error) {
- var res int64 = -1
- if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) {
+ if res, done, err := readAllImageStores(s, func(store roImageStore) (int64, bool, error) {
size, err := store.BigDataSize(id, key)
if err == nil {
- res = size
- return true, nil
+ return size, true, nil
}
- return false, nil
+ return -1, false, nil
}); done {
- return res, err
+ if err != nil {
+ return -1, err
+ }
+ return res, nil
}
return -1, ErrSizeUnknown
}
func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) {
- var res digest.Digest
- if done, err := s.readAllImageStores(func(ristore roImageStore) (bool, error) {
+ if res, done, err := readAllImageStores(s, func(ristore roImageStore) (digest.Digest, bool, error) {
d, err := ristore.BigDataDigest(id, key)
if err == nil && d.Validate() == nil {
- res = d
- return true, nil
+ return d, true, nil
}
- return false, nil
+ return "", false, nil
}); done {
return res, err
}
@@ -1866,17 +1923,15 @@ func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) {
func (s *store) ImageBigData(id, key string) ([]byte, error) {
foundImage := false
- var res []byte
- if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) {
+ if res, done, err := readAllImageStores(s, func(store roImageStore) ([]byte, bool, error) {
data, err := store.BigData(id, key)
if err == nil {
- res = data
- return true, nil
+ return data, true, nil
}
if store.Exists(id) {
foundImage = true
}
- return false, nil
+ return nil, false, nil
}); done {
return res, err
}
@@ -1890,17 +1945,15 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) {
// named data associated with an layer.
func (s *store) ListLayerBigData(id string) ([]string, error) {
foundLayer := false
- var res []string
- if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) {
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]string, bool, error) {
data, err := store.BigDataNames(id)
if err == nil {
- res = data
- return true, nil
+ return data, true, nil
}
if store.Exists(id) {
foundLayer = true
}
- return false, nil
+ return nil, false, nil
}); done {
return res, err
}
@@ -1914,17 +1967,15 @@ func (s *store) ListLayerBigData(id string) ([]string, error) {
// associated with a layer.
func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) {
foundLayer := false
- var res io.ReadCloser
- if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) {
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) (io.ReadCloser, bool, error) {
data, err := store.BigData(id, key)
if err == nil {
- res = data
- return true, nil
+ return data, true, nil
}
if store.Exists(id) {
foundLayer = true
}
- return false, nil
+ return nil, false, nil
}); done {
return res, err
}
@@ -1937,15 +1988,17 @@ func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) {
// SetLayerBigData stores a (possibly large) chunk of named data
// associated with a layer.
func (s *store) SetLayerBigData(id, key string, data io.Reader) error {
- return s.writeToLayerStore(func(store rwLayerStore) error {
- return store.SetBigData(id, key, data)
+ _, err := writeToLayerStore(s, func(store rwLayerStore) (struct{}, error) {
+ return struct{}{}, store.SetBigData(id, key, data)
})
+ return err
}
func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
- return s.writeToImageStore(func() error {
- return s.imageStore.SetBigData(id, key, data, digestManifest)
+ _, err := writeToImageStore(s, func() (struct{}, error) {
+ return struct{}{}, s.imageStore.SetBigData(id, key, data, digestManifest)
})
+ return err
}
func (s *store) ImageSize(id string) (int64, error) {
@@ -2066,12 +2119,11 @@ func (s *store) ContainerSize(id string) (int64, error) {
return -1, err
}
- var res int64 = -1
- err = s.writeToContainerStore(func() error { // Yes, s.containerStore.BigDataSize requires a write lock.
+ return writeToContainerStore(s, func() (int64, error) { // Yes, s.containerStore.BigDataSize requires a write lock.
// Read the container record.
container, err := s.containerStore.Get(id)
if err != nil {
- return err
+ return -1, err
}
// Read the container's layer's size.
@@ -2081,24 +2133,24 @@ func (s *store) ContainerSize(id string) (int64, error) {
if layer, err = store.Get(container.LayerID); err == nil {
size, err = store.DiffSize("", layer.ID)
if err != nil {
- return fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err)
+ return -1, fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err)
}
break
}
}
if layer == nil {
- return fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown)
+ return -1, fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown)
}
// Count big data items.
names, err := s.containerStore.BigDataNames(id)
if err != nil {
- return fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err)
+ return -1, fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err)
}
for _, name := range names {
n, err := s.containerStore.BigDataSize(id, name)
if err != nil {
- return fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err)
+ return -1, fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err)
}
size += n
}
@@ -2106,92 +2158,88 @@ func (s *store) ContainerSize(id string) (int64, error) {
// Count the size of our container directory and container run directory.
n, err := directory.Size(cdir)
if err != nil {
- return err
+ return -1, err
}
size += n
n, err = directory.Size(rdir)
if err != nil {
- return err
+ return -1, err
}
size += n
- res = size
- return nil
+ return size, nil
})
- return res, err
}
func (s *store) ListContainerBigData(id string) ([]string, error) {
- if err := s.containerStore.startReading(); err != nil {
- return nil, err
- }
- defer s.containerStore.stopReading()
-
- return s.containerStore.BigDataNames(id)
+ res, _, err := readContainerStore(s, func() ([]string, bool, error) {
+ res, err := s.containerStore.BigDataNames(id)
+ return res, true, err
+ })
+ return res, err
}
func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
- var res int64 = -1
- err := s.writeToContainerStore(func() error { // Yes, BigDataSize requires a write lock.
- var err error
- res, err = s.containerStore.BigDataSize(id, key)
- return err
+ return writeToContainerStore(s, func() (int64, error) { // Yes, BigDataSize requires a write lock.
+ return s.containerStore.BigDataSize(id, key)
})
- return res, err
}
func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
- var res digest.Digest
- err := s.writeToContainerStore(func() error { // Yes, BigDataDigest requires a write lock.
- var err error
- res, err = s.containerStore.BigDataDigest(id, key)
- return err
+ return writeToContainerStore(s, func() (digest.Digest, error) { // Yes, BigDataDigest requires a write lock.
+ return s.containerStore.BigDataDigest(id, key)
})
- return res, err
}
func (s *store) ContainerBigData(id, key string) ([]byte, error) {
- if err := s.containerStore.startReading(); err != nil {
- return nil, err
- }
- defer s.containerStore.stopReading()
- return s.containerStore.BigData(id, key)
+ res, _, err := readContainerStore(s, func() ([]byte, bool, error) {
+ res, err := s.containerStore.BigData(id, key)
+ return res, true, err
+ })
+ return res, err
}
func (s *store) SetContainerBigData(id, key string, data []byte) error {
- return s.writeToContainerStore(func() error {
- return s.containerStore.SetBigData(id, key, data)
+ _, err := writeToContainerStore(s, func() (struct{}, error) {
+ return struct{}{}, s.containerStore.SetBigData(id, key, data)
})
+ return err
}
func (s *store) Exists(id string) bool {
- var res = false
-
- if done, _ := s.readAllLayerStores(func(store roLayerStore) (bool, error) {
+ found, _, err := readAllLayerStores(s, func(store roLayerStore) (bool, bool, error) {
if store.Exists(id) {
- res = true
- return true, nil
+ return true, true, nil
}
- return false, nil
- }); done {
- return res
+ return false, false, nil
+ })
+ if err != nil {
+ return false
+ }
+ if found {
+ return true
}
- if done, _ := s.readAllImageStores(func(store roImageStore) (bool, error) {
+ found, _, err = readAllImageStores(s, func(store roImageStore) (bool, bool, error) {
if store.Exists(id) {
- res = true
- return true, nil
+ return true, true, nil
}
- return false, nil
- }); done {
- return res
+ return false, false, nil
+ })
+ if err != nil {
+ return false
+ }
+ if found {
+ return true
}
- if err := s.containerStore.startReading(); err != nil {
+ found, _, err = readContainerStore(s, func() (bool, bool, error) {
+ return s.containerStore.Exists(id), true, nil
+ })
+ if err != nil {
return false
}
- defer s.containerStore.stopReading()
- return s.containerStore.Exists(id)
+ return found
}
func dedupeStrings(names []string) []string {
@@ -2234,14 +2282,12 @@ func (s *store) RemoveNames(id string, names []string) error {
func (s *store) updateNames(id string, names []string, op updateNameOperation) error {
deduped := dedupeStrings(names)
- layerFound := false
- if err := s.writeToLayerStore(func(rlstore rwLayerStore) error {
+ if found, err := writeToLayerStore(s, func(rlstore rwLayerStore) (bool, error) {
if !rlstore.Exists(id) {
- return nil
+ return false, nil
}
- layerFound = true
- return rlstore.updateNames(id, deduped, op)
- }); err != nil || layerFound {
+ return true, rlstore.updateNames(id, deduped, op)
+ }); err != nil || found {
return err
}
@@ -2295,14 +2341,12 @@ func (s *store) updateNames(id string, names []string, op updateNameOperation) e
}
}
- containerFound := false
- if err := s.writeToContainerStore(func() error {
+ if found, err := writeToContainerStore(s, func() (bool, error) {
if !s.containerStore.Exists(id) {
- return nil
+ return false, nil
}
- containerFound = true
- return s.containerStore.updateNames(id, deduped, op)
- }); err != nil || containerFound {
+ return true, s.containerStore.updateNames(id, deduped, op)
+ }); err != nil || found {
return err
}
@@ -2310,67 +2354,62 @@ func (s *store) updateNames(id string, names []string, op updateNameOperation) e
}
func (s *store) Names(id string) ([]string, error) {
- var res []string
-
- if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) {
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]string, bool, error) {
if l, err := store.Get(id); l != nil && err == nil {
- res = l.Names
- return true, nil
+ return l.Names, true, nil
}
- return false, nil
+ return nil, false, nil
}); done {
return res, err
}
- if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) {
+ if res, done, err := readAllImageStores(s, func(store roImageStore) ([]string, bool, error) {
if i, err := store.Get(id); i != nil && err == nil {
- res = i.Names
- return true, nil
+ return i.Names, true, nil
}
- return false, nil
+ return nil, false, nil
}); done {
return res, err
}
- if err := s.containerStore.startReading(); err != nil {
- return nil, err
- }
- defer s.containerStore.stopReading()
- if c, err := s.containerStore.Get(id); c != nil && err == nil {
- return c.Names, nil
+ if res, done, err := readContainerStore(s, func() ([]string, bool, error) {
+ if c, err := s.containerStore.Get(id); c != nil && err == nil {
+ return c.Names, true, nil
+ }
+ return nil, false, nil
+ }); done {
+ return res, err
}
+
return nil, ErrLayerUnknown
}
func (s *store) Lookup(name string) (string, error) {
- var res string
-
- if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) {
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) (string, bool, error) {
if l, err := store.Get(name); l != nil && err == nil {
- res = l.ID
- return true, nil
+ return l.ID, true, nil
}
- return false, nil
+ return "", false, nil
}); done {
return res, err
}
- if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) {
+ if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) {
if i, err := store.Get(name); i != nil && err == nil {
- res = i.ID
- return true, nil
+ return i.ID, true, nil
}
- return false, nil
+ return "", false, nil
}); done {
return res, err
}
- if err := s.containerStore.startReading(); err != nil {
- return "", err
- }
- defer s.containerStore.stopReading()
- if c, err := s.containerStore.Get(name); c != nil && err == nil {
- return c.ID, nil
+ if res, done, err := readContainerStore(s, func() (string, bool, error) {
+ if c, err := s.containerStore.Get(name); c != nil && err == nil {
+ return c.ID, true, nil
+ }
+ return "", false, nil
+ }); done {
+ return res, err
}
return "", ErrLayerUnknown
@@ -2430,8 +2469,22 @@ func (s *store) DeleteLayer(id string) error {
func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) {
layersToRemove := []string{}
if err := s.writeToAllStores(func(rlstore rwLayerStore) error {
- if s.imageStore.Exists(id) {
- image, err := s.imageStore.Get(id)
+ // Delete image from all available imagestores configured to be used.
+ imageFound := false
+ for _, is := range append([]rwImageStore{s.imageStore}, s.rwImageStores...) {
+ if is != s.imageStore {
+ // This is an additional writeable image store
+ // so we must perform lock
+ if err := is.startWriting(); err != nil {
+ return err
+ }
+ defer is.stopWriting()
+ }
+ if !is.Exists(id) {
+ continue
+ }
+ imageFound = true
+ image, err := is.Get(id)
if err != nil {
return err
}
@@ -2447,7 +2500,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
if container, ok := aContainerByImage[id]; ok {
return fmt.Errorf("image used by %v: %w", container, ErrImageUsedByContainer)
}
- images, err := s.imageStore.Images()
+ images, err := is.Images()
if err != nil {
return err
}
@@ -2469,7 +2522,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
}
}
if commit {
- if err = s.imageStore.Delete(id); err != nil {
+ if err = is.Delete(id); err != nil {
return err
}
}
@@ -2514,7 +2567,8 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
layersToRemoveMap[layer] = struct{}{}
layer = parent
}
- } else {
+ }
+ if !imageFound {
return ErrNotAnImage
}
if commit {
@@ -2542,63 +2596,45 @@ func (s *store) DeleteContainer(id string) error {
return ErrNotAContainer
}
- errChan := make(chan error)
- var wg sync.WaitGroup
-
+ // delete the layer first, separately, so that if we get an
+ // error while trying to do so, we don't go ahead and delete
+ // the container record that refers to it, effectively losing
+ // track of it
if rlstore.Exists(container.LayerID) {
- wg.Add(1)
- go func() {
- errChan <- rlstore.Delete(container.LayerID)
- wg.Done()
- }()
- }
- wg.Add(1)
- go func() {
- errChan <- s.containerStore.Delete(id)
- wg.Done()
- }()
+ if err := rlstore.Delete(container.LayerID); err != nil {
+ return err
+ }
+ }
+
+ var wg multierror.Group
+ wg.Go(func() error { return s.containerStore.Delete(id) })
middleDir := s.graphDriverName + "-containers"
- gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
- wg.Add(1)
- go func() {
- defer wg.Done()
+
+ wg.Go(func() error {
+ gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
// attempt a simple rm -rf first
- err := os.RemoveAll(gcpath)
- if err == nil {
- errChan <- nil
- return
+ if err := os.RemoveAll(gcpath); err == nil {
+ return nil
}
// and if it fails get to the more complicated cleanup
- errChan <- system.EnsureRemoveAll(gcpath)
- }()
+ return system.EnsureRemoveAll(gcpath)
+ })
- rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
- wg.Add(1)
- go func() {
- defer wg.Done()
+ wg.Go(func() error {
+ rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
// attempt a simple rm -rf first
- err := os.RemoveAll(rcpath)
- if err == nil {
- errChan <- nil
- return
+ if err := os.RemoveAll(rcpath); err == nil {
+ return nil
}
// and if it fails get to the more complicated cleanup
- errChan <- system.EnsureRemoveAll(rcpath)
- }()
+ return system.EnsureRemoveAll(rcpath)
+ })
- go func() {
- wg.Wait()
- close(errChan)
- }()
-
- var errors []error
- for err := range errChan {
- if err != nil {
- errors = append(errors, err)
- }
+ if multierr := wg.Wait(); multierr != nil {
+ return multierr.ErrorOrNil()
}
- return multierror.Append(nil, errors...).ErrorOrNil()
+ return nil
})
}
@@ -2756,27 +2792,21 @@ func (s *store) Unmount(id string, force bool) (bool, error) {
if layerID, err := s.ContainerLayerID(id); err == nil {
id = layerID
}
- var res bool
- err := s.writeToLayerStore(func(rlstore rwLayerStore) error {
+ return writeToLayerStore(s, func(rlstore rwLayerStore) (bool, error) {
if rlstore.Exists(id) {
- var err error
- res, err = rlstore.unmount(id, force, false)
- return err
+ return rlstore.unmount(id, force, false)
}
- return ErrLayerUnknown
+ return false, ErrLayerUnknown
})
- return res, err
}
func (s *store) Changes(from, to string) ([]archive.Change, error) {
- var res []archive.Change
- if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) {
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]archive.Change, bool, error) {
if store.Exists(to) {
- var err error
- res, err = store.Changes(from, to)
- return true, err
+ res, err := store.Changes(from, to)
+ return res, true, err
}
- return false, nil
+ return nil, false, nil
}); done {
return res, err
}
@@ -2784,16 +2814,17 @@ func (s *store) Changes(from, to string) ([]archive.Change, error) {
}
func (s *store) DiffSize(from, to string) (int64, error) {
- var res int64 = -1
- if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) {
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) {
if store.Exists(to) {
- var err error
- res, err = store.DiffSize(from, to)
- return true, err
+ res, err := store.DiffSize(from, to)
+ return res, true, err
}
- return false, nil
+ return -1, false, nil
}); done {
- return res, err
+ if err != nil {
+ return -1, err
+ }
+ return res, nil
}
return -1, ErrLayerUnknown
}
@@ -2837,71 +2868,61 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
}
func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error {
- return s.writeToLayerStore(func(rlstore rwLayerStore) error {
+ _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
if !rlstore.Exists(to) {
- return ErrLayerUnknown
+ return struct{}{}, ErrLayerUnknown
}
- return rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options)
+ return struct{}{}, rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options)
})
+ return err
}
func (s *store) CleanupStagingDirectory(stagingDirectory string) error {
- return s.writeToLayerStore(func(rlstore rwLayerStore) error {
- return rlstore.CleanupStagingDirectory(stagingDirectory)
+ _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
+ return struct{}{}, rlstore.CleanupStagingDirectory(stagingDirectory)
})
+ return err
}
func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
- var res *drivers.DriverWithDifferOutput
- err := s.writeToLayerStore(func(rlstore rwLayerStore) error {
+ return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) {
if to != "" && !rlstore.Exists(to) {
- return ErrLayerUnknown
+ return nil, ErrLayerUnknown
}
- var err error
- res, err = rlstore.ApplyDiffWithDiffer(to, options, differ)
- return err
+ return rlstore.ApplyDiffWithDiffer(to, options, differ)
})
- return res, err
}
func (s *store) DifferTarget(id string) (string, error) {
- var res string
- err := s.writeToLayerStore(func(rlstore rwLayerStore) error {
+ return writeToLayerStore(s, func(rlstore rwLayerStore) (string, error) {
if rlstore.Exists(id) {
- var err error
- res, err = rlstore.DifferTarget(id)
- return err
+ return rlstore.DifferTarget(id)
}
- return ErrLayerUnknown
+ return "", ErrLayerUnknown
})
- return res, err
}
func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) {
- var res int64 = -1
- err := s.writeToLayerStore(func(rlstore rwLayerStore) error {
+ return writeToLayerStore(s, func(rlstore rwLayerStore) (int64, error) {
if rlstore.Exists(to) {
- var err error
- res, err = rlstore.ApplyDiff(to, diff)
- return err
+ return rlstore.ApplyDiff(to, diff)
}
- return ErrLayerUnknown
+ return -1, ErrLayerUnknown
})
- return res, err
}
func (s *store) layersByMappedDigest(m func(roLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) {
var layers []Layer
- if _, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) {
+ if _, _, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) {
storeLayers, err := m(store, d)
if err != nil {
if !errors.Is(err, ErrLayerUnknown) {
- return true, err
+ return struct{}{}, true, err
}
- return false, nil
+ return struct{}{}, false, nil
}
layers = append(layers, storeLayers...)
- return false, nil
+ return struct{}{}, false, nil
}); err != nil {
return nil, err
}
@@ -2926,16 +2947,17 @@ func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
}
func (s *store) LayerSize(id string) (int64, error) {
- var res int64 = -1
- if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) {
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) {
if store.Exists(id) {
- var err error
- res, err = store.Size(id)
- return true, err
+ res, err := store.Size(id)
+ return res, true, err
}
- return false, nil
+ return -1, false, nil
}); done {
- return res, err
+ if err != nil {
+ return -1, err
+ }
+ return res, nil
}
return -1, ErrLayerUnknown
}
@@ -2980,13 +3002,13 @@ func (s *store) ContainerParentOwners(id string) ([]int, []int, error) {
func (s *store) Layers() ([]Layer, error) {
var layers []Layer
- if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) {
+ if _, done, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) {
storeLayers, err := store.Layers()
if err != nil {
- return true, err
+ return struct{}{}, true, err
}
layers = append(layers, storeLayers...)
- return false, nil
+ return struct{}{}, false, nil
}); done {
return nil, err
}
@@ -2995,13 +3017,13 @@ func (s *store) Layers() ([]Layer, error) {
func (s *store) Images() ([]Image, error) {
var images []Image
- if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) {
+ if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) {
storeImages, err := store.Images()
if err != nil {
- return true, err
+ return struct{}{}, true, err
}
images = append(images, storeImages...)
- return false, nil
+ return struct{}{}, false, nil
}); err != nil {
return nil, err
}
@@ -3009,23 +3031,20 @@ func (s *store) Images() ([]Image, error) {
}
func (s *store) Containers() ([]Container, error) {
- if err := s.containerStore.startReading(); err != nil {
- return nil, err
- }
- defer s.containerStore.stopReading()
-
- return s.containerStore.Containers()
+ res, _, err := readContainerStore(s, func() ([]Container, bool, error) {
+ res, err := s.containerStore.Containers()
+ return res, true, err
+ })
+ return res, err
}
func (s *store) Layer(id string) (*Layer, error) {
- var res *Layer
- if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) {
+ if res, done, err := readAllLayerStores(s, func(store roLayerStore) (*Layer, bool, error) {
layer, err := store.Get(id)
if err == nil {
- res = layer
- return true, nil
+ return layer, true, nil
}
- return false, nil
+ return nil, false, nil
}); done {
return res, err
}
@@ -3119,8 +3138,7 @@ func (al *additionalLayer) Release() {
}
func (s *store) Image(id string) (*Image, error) {
- var res *Image
- if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) {
+ if res, done, err := readAllImageStores(s, func(store roImageStore) (*Image, bool, error) {
image, err := store.Get(id)
if err == nil {
if store != s.imageStore {
@@ -3130,13 +3148,12 @@ func (s *store) Image(id string) (*Image, error) {
// store, but we have an entry with the same ID in the read-write store,
// then the name was removed when we duplicated the image's
// record into writable storage, so we should ignore this entry
- return false, nil
+ return nil, false, nil
}
}
- res = image
- return true, nil
+ return image, true, nil
}
- return false, nil
+ return nil, false, nil
}); done {
return res, err
}
@@ -3150,10 +3167,10 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
}
images := []*Image{}
- if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) {
+ if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) {
imageList, err := store.Images()
if err != nil {
- return true, err
+ return struct{}{}, true, err
}
for _, image := range imageList {
image := image
@@ -3161,7 +3178,7 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
images = append(images, &image)
}
}
- return false, nil
+ return struct{}{}, false, nil
}); err != nil {
return nil, err
}
@@ -3170,13 +3187,13 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) {
images := []*Image{}
- if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) {
+ if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) {
imageList, err := store.ByDigest(d)
if err != nil && !errors.Is(err, ErrImageUnknown) {
- return true, err
+ return struct{}{}, true, err
}
images = append(images, imageList...)
- return false, nil
+ return struct{}{}, false, nil
}); err != nil {
return nil, err
}
@@ -3184,20 +3201,18 @@ func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) {
}
func (s *store) Container(id string) (*Container, error) {
- if err := s.containerStore.startReading(); err != nil {
- return nil, err
- }
- defer s.containerStore.stopReading()
-
- return s.containerStore.Get(id)
+ res, _, err := readContainerStore(s, func() (*Container, bool, error) {
+ res, err := s.containerStore.Get(id)
+ return res, true, err
+ })
+ return res, err
}
func (s *store) ContainerLayerID(id string) (string, error) {
- if err := s.containerStore.startReading(); err != nil {
- return "", err
- }
- defer s.containerStore.stopReading()
- container, err := s.containerStore.Get(id)
+ container, _, err := readContainerStore(s, func() (*Container, bool, error) {
+ res, err := s.containerStore.Get(id)
+ return res, true, err
+ })
if err != nil {
return "", err
}
@@ -3209,11 +3224,10 @@ func (s *store) ContainerByLayer(id string) (*Container, error) {
if err != nil {
return nil, err
}
- if err := s.containerStore.startReading(); err != nil {
- return nil, err
- }
- defer s.containerStore.stopReading()
- containerList, err := s.containerStore.Containers()
+ containerList, _, err := readContainerStore(s, func() ([]Container, bool, error) {
+ res, err := s.containerStore.Containers()
+ return res, true, err
+ })
if err != nil {
return nil, err
}
@@ -3227,41 +3241,37 @@ func (s *store) ContainerByLayer(id string) (*Container, error) {
}
func (s *store) ContainerDirectory(id string) (string, error) {
- if err := s.containerStore.startReading(); err != nil {
- return "", err
- }
- defer s.containerStore.stopReading()
-
- id, err := s.containerStore.Lookup(id)
- if err != nil {
- return "", err
- }
+ res, _, err := readContainerStore(s, func() (string, bool, error) {
+ id, err := s.containerStore.Lookup(id)
+ if err != nil {
+ return "", true, err
+ }
- middleDir := s.graphDriverName + "-containers"
- gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata")
- if err := os.MkdirAll(gcpath, 0700); err != nil {
- return "", err
- }
- return gcpath, nil
+ middleDir := s.graphDriverName + "-containers"
+ gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata")
+ if err := os.MkdirAll(gcpath, 0o700); err != nil {
+ return "", true, err
+ }
+ return gcpath, true, nil
+ })
+ return res, err
}
func (s *store) ContainerRunDirectory(id string) (string, error) {
- if err := s.containerStore.startReading(); err != nil {
- return "", err
- }
- defer s.containerStore.stopReading()
-
- id, err := s.containerStore.Lookup(id)
- if err != nil {
- return "", err
- }
+ res, _, err := readContainerStore(s, func() (string, bool, error) {
+ id, err := s.containerStore.Lookup(id)
+ if err != nil {
+ return "", true, err
+ }
- middleDir := s.graphDriverName + "-containers"
- rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata")
- if err := os.MkdirAll(rcpath, 0700); err != nil {
- return "", err
- }
- return rcpath, nil
+ middleDir := s.graphDriverName + "-containers"
+ rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata")
+ if err := os.MkdirAll(rcpath, 0o700); err != nil {
+ return "", true, err
+ }
+ return rcpath, true, nil
+ })
+ return res, err
}
func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error {
@@ -3269,11 +3279,11 @@ func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error {
if err != nil {
return err
}
- err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700)
+ err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0o700)
if err != nil {
return err
}
- return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600)
+ return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0o600)
}
func (s *store) FromContainerDirectory(id, file string) ([]byte, error) {
@@ -3289,11 +3299,11 @@ func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error
if err != nil {
return err
}
- err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700)
+ err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0o700)
if err != nil {
return err
}
- return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600)
+ return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0o600)
}
func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) {
@@ -3541,19 +3551,19 @@ func (s *store) Free() {
// Tries to clean up old unreferenced container leftovers. returns the first error
// but continues as far as it can
func (s *store) GarbageCollect() error {
- firstErr := s.writeToContainerStore(func() error {
- return s.containerStore.GarbageCollect()
+ _, firstErr := writeToContainerStore(s, func() (struct{}, error) {
+ return struct{}{}, s.containerStore.GarbageCollect()
})
- moreErr := s.writeToImageStore(func() error {
- return s.imageStore.GarbageCollect()
+ _, moreErr := writeToImageStore(s, func() (struct{}, error) {
+ return struct{}{}, s.imageStore.GarbageCollect()
})
if firstErr == nil {
firstErr = moreErr
}
- moreErr = s.writeToLayerStore(func(rlstore rwLayerStore) error {
- return rlstore.GarbageCollect()
+ _, moreErr = writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
+ return struct{}{}, rlstore.GarbageCollect()
})
if firstErr == nil {
firstErr = moreErr
diff --git a/vendor/github.com/containers/storage/types/errors.go b/vendor/github.com/containers/storage/types/errors.go
index dc6ee3e0c..845b14eed 100644
--- a/vendor/github.com/containers/storage/types/errors.go
+++ b/vendor/github.com/containers/storage/types/errors.go
@@ -59,4 +59,41 @@ var (
ErrInvalidMappings = errors.New("invalid mappings specified")
// ErrNoAvailableIDs is returned when there are not enough unused IDS within the user namespace.
ErrNoAvailableIDs = errors.New("not enough unused IDs in user namespace")
+
+ // ErrLayerUnaccounted describes a layer that is present in the lower-level storage driver,
+ // but which is not known to or managed by the higher-level driver-agnostic logic.
+ ErrLayerUnaccounted = errors.New("layer in lower level storage driver not accounted for")
+ // ErrLayerUnreferenced describes a layer which is not used by any image or container.
+ ErrLayerUnreferenced = errors.New("layer not referenced by any images or containers")
+ // ErrLayerIncorrectContentDigest describes a layer for which the contents of one or more
+ // files which were added in the layer appear to have changed. It may instead look like an
+ // unnamed "file integrity checksum failed" error.
+ ErrLayerIncorrectContentDigest = errors.New("layer content incorrect digest")
+ // ErrLayerIncorrectContentSize describes a layer for which regenerating the diff that was
+ // used to populate the layer produced a diff of a different size. We check the digest
+ // first, so it's highly unlikely you'll ever see this error.
+ ErrLayerIncorrectContentSize = errors.New("layer content incorrect size")
+ // ErrLayerContentModified describes a layer which contains contents which should not be
+ // there, or for which ownership/permissions/dates have been changed.
+ ErrLayerContentModified = errors.New("layer content modified")
+ // ErrLayerDataMissing describes a layer which is missing a big data item.
+ ErrLayerDataMissing = errors.New("layer data item is missing")
+ // ErrLayerMissing describes a layer which is the missing parent of a layer.
+ ErrLayerMissing = errors.New("layer is missing")
+ // ErrImageLayerMissing describes an image which claims to have a layer that we don't know
+ // about.
+ ErrImageLayerMissing = errors.New("image layer is missing")
+ // ErrImageDataMissing describes an image which is missing a big data item.
+ ErrImageDataMissing = errors.New("image data item is missing")
+ // ErrImageDataIncorrectSize describes an image which has a big data item which looks like
+ // its size has changed, likely because it's been modified somehow.
+ ErrImageDataIncorrectSize = errors.New("image data item has incorrect size")
+ // ErrContainerImageMissing describes a container which claims to be based on an image that
+ // we don't know about.
+ ErrContainerImageMissing = errors.New("image missing")
+ // ErrContainerDataMissing describes a container which is missing a big data item.
+ ErrContainerDataMissing = errors.New("container data item is missing")
+ // ErrContainerDataIncorrectSize describes a container which has a big data item which looks
+ // like its size has changed, likely because it's been modified somehow.
+ ErrContainerDataIncorrectSize = errors.New("container data item has incorrect size")
)
diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go
index 7189a8e6a..15ee9c5cf 100644
--- a/vendor/github.com/containers/storage/types/options.go
+++ b/vendor/github.com/containers/storage/types/options.go
@@ -10,6 +10,8 @@ import (
"time"
"github.com/BurntSushi/toml"
+ drivers "github.com/containers/storage/drivers"
+ _ "github.com/containers/storage/drivers/register"
cfg "github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/idtools"
"github.com/sirupsen/logrus"
@@ -21,6 +23,7 @@ type TomlConfig struct {
Driver string `toml:"driver,omitempty"`
DriverPriority []string `toml:"driver_priority,omitempty"`
RunRoot string `toml:"runroot,omitempty"`
+ ImageStore string `toml:"imagestore,omitempty"`
GraphRoot string `toml:"graphroot,omitempty"`
RootlessStoragePath string `toml:"rootless_storage_path,omitempty"`
TransientStore bool `toml:"transient_store,omitempty"`
@@ -215,6 +218,10 @@ type StoreOptions struct {
// GraphRoot is the filesystem path under which we will store the
// contents of layers, images, and containers.
GraphRoot string `json:"root,omitempty"`
+ // Image Store is the location of image store which is seperated from the
+ // container store. Usually this is not recommended unless users wants
+ // seperate store for image and containers.
+ ImageStore string `json:"imagestore,omitempty"`
// RootlessStoragePath is the storage path for rootless users
// default $HOME/.local/share/containers/storage
RootlessStoragePath string `toml:"rootless_storage_path"`
@@ -305,7 +312,22 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
}
if opts.GraphDriverName == "" {
if len(systemOpts.GraphDriverPriority) == 0 {
- opts.GraphDriverName = "vfs"
+ driversMap := drivers.ScanPriorDrivers(opts.GraphRoot)
+
+ for _, name := range drivers.Priority {
+ if _, prior := driversMap[name]; prior {
+ opts.GraphDriverName = name
+ break
+ }
+ }
+
+ if opts.GraphDriverName == "" {
+ if canUseRootlessOverlay(opts.GraphRoot, opts.RunRoot) {
+ opts.GraphDriverName = overlayDriver
+ } else {
+ opts.GraphDriverName = "vfs"
+ }
+ }
} else {
opts.GraphDriverPriority = systemOpts.GraphDriverPriority
}
@@ -405,6 +427,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro
if config.Storage.GraphRoot != "" {
storeOptions.GraphRoot = config.Storage.GraphRoot
}
+ if config.Storage.ImageStore != "" {
+ storeOptions.ImageStore = config.Storage.ImageStore
+ }
if config.Storage.RootlessStoragePath != "" {
storeOptions.RootlessStoragePath = config.Storage.RootlessStoragePath
}
@@ -432,6 +457,16 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro
if config.Storage.Options.MountOpt != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt))
}
+
+ uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
+ if err != nil {
+ return err
+ }
+ gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
+ if err != nil {
+ return err
+ }
+
if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" {
config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser
}
@@ -444,19 +479,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro
logrus.Warningf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err)
return err
}
- storeOptions.UIDMap = mappings.UIDs()
- storeOptions.GIDMap = mappings.GIDs()
- }
-
- uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
- if err != nil {
- return err
+ uidmap = mappings.UIDs()
+ gidmap = mappings.GIDs()
}
- gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
- if err != nil {
- return err
- }
-
storeOptions.UIDMap = uidmap
storeOptions.GIDMap = gidmap
storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser
diff --git a/vendor/github.com/containers/storage/types/options_darwin.go b/vendor/github.com/containers/storage/types/options_darwin.go
index eed1a3d94..3eecc2b82 100644
--- a/vendor/github.com/containers/storage/types/options_darwin.go
+++ b/vendor/github.com/containers/storage/types/options_darwin.go
@@ -8,6 +8,9 @@ const (
SystemConfigFile = "/usr/share/containers/storage.conf"
)
-var (
- defaultOverrideConfigFile = "/etc/containers/storage.conf"
-)
+var defaultOverrideConfigFile = "/etc/containers/storage.conf"
+
+// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
+func canUseRootlessOverlay(home, runhome string) bool {
+ return false
+}
diff --git a/vendor/github.com/containers/storage/types/options_freebsd.go b/vendor/github.com/containers/storage/types/options_freebsd.go
index afb7ec6b4..be2bc2f27 100644
--- a/vendor/github.com/containers/storage/types/options_freebsd.go
+++ b/vendor/github.com/containers/storage/types/options_freebsd.go
@@ -12,3 +12,8 @@ const (
var (
defaultOverrideConfigFile = "/usr/local/etc/containers/storage.conf"
)
+
+// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
+func canUseRootlessOverlay(home, runhome string) bool {
+ return false
+}
diff --git a/vendor/github.com/containers/storage/types/options_linux.go b/vendor/github.com/containers/storage/types/options_linux.go
index d44aaf76a..a28e82883 100644
--- a/vendor/github.com/containers/storage/types/options_linux.go
+++ b/vendor/github.com/containers/storage/types/options_linux.go
@@ -1,5 +1,13 @@
package types
+import (
+ "os/exec"
+ "strconv"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
const (
// these are default path for run and graph root for rootful users
// for rootless path is constructed via getRootlessStorageOpts
@@ -12,3 +20,33 @@ const (
var (
defaultOverrideConfigFile = "/etc/containers/storage.conf"
)
+
+// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
+func canUseRootlessOverlay(home, runhome string) bool {
+ // we check first for fuse-overlayfs since it is cheaper.
+ if path, _ := exec.LookPath("fuse-overlayfs"); path != "" {
+ return true
+ }
+
+ // We cannot use overlay.SupportsNativeOverlay since canUseRootlessOverlay is called by Podman
+ // before we enter the user namespace and the driver we pick here is written in the podman database.
+ // Checking the kernel version is usually not a good idea since the feature could be back-ported, e.g. RHEL
+ // but this is just an heuristic and on RHEL we always install the storage.conf file.
+ // native overlay for rootless was added upstream in 5.13 (at least the first version that we support), so check
+ // that the kernel is >= 5.13.
+ var uts unix.Utsname
+ if err := unix.Uname(&uts); err == nil {
+ parts := strings.Split(string(uts.Release[:]), ".")
+ major, _ := strconv.Atoi(parts[0])
+ if major >= 6 {
+ return true
+ }
+ if major == 5 && len(parts) > 1 {
+ minor, _ := strconv.Atoi(parts[1])
+ if minor >= 13 {
+ return true
+ }
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/containers/storage/types/options_windows.go b/vendor/github.com/containers/storage/types/options_windows.go
index d44aaf76a..c1bea9fac 100644
--- a/vendor/github.com/containers/storage/types/options_windows.go
+++ b/vendor/github.com/containers/storage/types/options_windows.go
@@ -12,3 +12,8 @@ const (
var (
defaultOverrideConfigFile = "/etc/containers/storage.conf"
)
+
+// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
+func canUseRootlessOverlay(home, runhome string) bool {
+ return false
+}
diff --git a/vendor/github.com/containers/storage/types/storage_test.conf b/vendor/github.com/containers/storage/types/storage_test.conf
index 9b682fe15..87b0c9bb1 100644
--- a/vendor/github.com/containers/storage/types/storage_test.conf
+++ b/vendor/github.com/containers/storage/types/storage_test.conf
@@ -25,6 +25,16 @@ rootless_storage_path = "$HOME/$UID/containers/storage"
additionalimagestores = [
]
+# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
+# a container, to the UIDs/GIDs as they should appear outside of the container,
+# and the length of the range of UIDs/GIDs. Additional mapped sets can be
+# listed and will be heeded by libraries, but there are limits to the number of
+# mappings which the kernel will allow when you later attempt to run a
+# container.
+#
+remap-uids = "0:1000000000:30000"
+remap-gids = "0:1500000000:60000"
+
[storage.options.overlay]
# mountopt specifies comma separated list of extra mount options
diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go
index 72c38f861..73134f82d 100644
--- a/vendor/github.com/containers/storage/types/utils.go
+++ b/vendor/github.com/containers/storage/types/utils.go
@@ -20,7 +20,7 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
return "", err
}
path = filepath.Join(path, "containers")
- if err := os.MkdirAll(path, 0700); err != nil {
+ if err := os.MkdirAll(path, 0o700); err != nil {
return "", fmt.Errorf("unable to make rootless runtime: %w", err)
}
return path, nil
@@ -45,25 +45,30 @@ type rootlessRuntimeDirEnvironmentImplementation struct {
func (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string {
return env.procCommandFile
}
+
func (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string {
return env.runUserDir
}
+
func (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string {
return env.tmpPerUserDir
}
+
func (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) {
return homedir.GetRuntimeDir()
}
+
func (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) {
return system.Lstat(path)
}
+
func (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string {
return homedir.Get()
}
func isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool {
st, err := env.systemLstat(dir)
- return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000
+ return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0o700 == 0o700 && st.Mode()&0o066 == 0o000
}
// getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing.
@@ -85,7 +90,7 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e
tmpPerUserDir := env.getTmpPerUserDir()
if tmpPerUserDir != "" {
if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) {
- if err := os.Mkdir(tmpPerUserDir, 0700); err != nil {
+ if err := os.Mkdir(tmpPerUserDir, 0o700); err != nil {
logrus.Errorf("Failed to create temp directory for user: %v", err)
} else {
return tmpPerUserDir, nil
diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go
index 8afa895c1..b7bd09275 100644
--- a/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go
+++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go
@@ -13,4 +13,5 @@ const (
PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256
PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384
PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
+ EdDSA = "EdDSA" // Ed25519 using SHA-512
)
diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
index 50dad7e05..539933b3d 100644
--- a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
+++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
@@ -4,6 +4,7 @@ import (
"context"
"crypto"
"crypto/ecdsa"
+ "crypto/ed25519"
"crypto/rsa"
"errors"
"fmt"
@@ -32,6 +33,7 @@ func (s *StaticKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte,
switch pub.(type) {
case *rsa.PublicKey:
case *ecdsa.PublicKey:
+ case ed25519.PublicKey:
default:
return nil, fmt.Errorf("invalid public key type provided: %T", pub)
}
@@ -60,7 +62,7 @@ func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time)
if now == nil {
now = time.Now
}
- return &RemoteKeySet{jwksURL: jwksURL, ctx: cloneContext(ctx), now: now}
+ return &RemoteKeySet{jwksURL: jwksURL, ctx: ctx, now: now}
}
// RemoteKeySet is a KeySet implementation that validates JSON web tokens against
diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
index ae73eb028..b159d1ccd 100644
--- a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
+++ b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
@@ -14,6 +14,7 @@ import (
"mime"
"net/http"
"strings"
+ "sync"
"time"
"golang.org/x/oauth2"
@@ -48,39 +49,34 @@ var issuerURLKey contextKey
// This method sets the same context key used by the golang.org/x/oauth2 package,
// so the returned context works for that package too.
//
-// myClient := &http.Client{}
-// ctx := oidc.ClientContext(parentContext, myClient)
-//
-// // This will use the custom client
-// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com")
+// myClient := &http.Client{}
+// ctx := oidc.ClientContext(parentContext, myClient)
//
+// // This will use the custom client
+// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com")
func ClientContext(ctx context.Context, client *http.Client) context.Context {
return context.WithValue(ctx, oauth2.HTTPClient, client)
}
-// cloneContext copies a context's bag-of-values into a new context that isn't
-// associated with its cancellation. This is used to initialize remote keys sets
-// which run in the background and aren't associated with the initial context.
-func cloneContext(ctx context.Context) context.Context {
- cp := context.Background()
+func getClient(ctx context.Context) *http.Client {
if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
- cp = ClientContext(cp, c)
+ return c
}
- return cp
+ return nil
}
// InsecureIssuerURLContext allows discovery to work when the issuer_url reported
// by upstream is mismatched with the discovery URL. This is meant for integration
// with off-spec providers such as Azure.
//
-// discoveryBaseURL := "https://login.microsoftonline.com/organizations/v2.0"
-// issuerURL := "https://login.microsoftonline.com/my-tenantid/v2.0"
+// discoveryBaseURL := "https://login.microsoftonline.com/organizations/v2.0"
+// issuerURL := "https://login.microsoftonline.com/my-tenantid/v2.0"
//
-// ctx := oidc.InsecureIssuerURLContext(parentContext, issuerURL)
+// ctx := oidc.InsecureIssuerURLContext(parentContext, issuerURL)
//
-// // Provider will be discovered with the discoveryBaseURL, but use issuerURL
-// // for future issuer validation.
-// provider, err := oidc.NewProvider(ctx, discoveryBaseURL)
+// // Provider will be discovered with the discoveryBaseURL, but use issuerURL
+// // for future issuer validation.
+// provider, err := oidc.NewProvider(ctx, discoveryBaseURL)
//
// This is insecure because validating the correct issuer is critical for multi-tenant
// proivders. Any overrides here MUST be carefully reviewed.
@@ -90,7 +86,7 @@ func InsecureIssuerURLContext(ctx context.Context, issuerURL string) context.Con
func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
client := http.DefaultClient
- if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
+ if c := getClient(ctx); c != nil {
client = c
}
return client.Do(req.WithContext(ctx))
@@ -102,12 +98,33 @@ type Provider struct {
authURL string
tokenURL string
userInfoURL string
+ jwksURL string
algorithms []string
// Raw claims returned by the server.
rawClaims []byte
- remoteKeySet KeySet
+ // Guards all of the following fields.
+ mu sync.Mutex
+ // HTTP client specified from the initial NewProvider request. This is used
+ // when creating the common key set.
+ client *http.Client
+ // A key set that uses context.Background() and is shared between all code paths
+ // that don't have a convinent way of supplying a unique context.
+ commonRemoteKeySet KeySet
+}
+
+func (p *Provider) remoteKeySet() KeySet {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.commonRemoteKeySet == nil {
+ ctx := context.Background()
+ if p.client != nil {
+ ctx = ClientContext(ctx, p.client)
+ }
+ p.commonRemoteKeySet = NewRemoteKeySet(ctx, p.jwksURL)
+ }
+ return p.commonRemoteKeySet
}
type providerJSON struct {
@@ -132,6 +149,7 @@ var supportedAlgorithms = map[string]bool{
PS256: true,
PS384: true,
PS512: true,
+ EdDSA: true,
}
// ProviderConfig allows creating providers when discovery isn't supported. It's
@@ -167,12 +185,13 @@ type ProviderConfig struct {
// through discovery.
func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider {
return &Provider{
- issuer: p.IssuerURL,
- authURL: p.AuthURL,
- tokenURL: p.TokenURL,
- userInfoURL: p.UserInfoURL,
- algorithms: p.Algorithms,
- remoteKeySet: NewRemoteKeySet(cloneContext(ctx), p.JWKSURL),
+ issuer: p.IssuerURL,
+ authURL: p.AuthURL,
+ tokenURL: p.TokenURL,
+ userInfoURL: p.UserInfoURL,
+ jwksURL: p.JWKSURL,
+ algorithms: p.Algorithms,
+ client: getClient(ctx),
}
}
@@ -221,26 +240,27 @@ func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
}
}
return &Provider{
- issuer: issuerURL,
- authURL: p.AuthURL,
- tokenURL: p.TokenURL,
- userInfoURL: p.UserInfoURL,
- algorithms: algs,
- rawClaims: body,
- remoteKeySet: NewRemoteKeySet(cloneContext(ctx), p.JWKSURL),
+ issuer: issuerURL,
+ authURL: p.AuthURL,
+ tokenURL: p.TokenURL,
+ userInfoURL: p.UserInfoURL,
+ jwksURL: p.JWKSURL,
+ algorithms: algs,
+ rawClaims: body,
+ client: getClient(ctx),
}, nil
}
// Claims unmarshals raw fields returned by the server during discovery.
//
-// var claims struct {
-// ScopesSupported []string `json:"scopes_supported"`
-// ClaimsSupported []string `json:"claims_supported"`
-// }
+// var claims struct {
+// ScopesSupported []string `json:"scopes_supported"`
+// ClaimsSupported []string `json:"claims_supported"`
+// }
//
-// if err := provider.Claims(&claims); err != nil {
-// // handle unmarshaling error
-// }
+// if err := provider.Claims(&claims); err != nil {
+// // handle unmarshaling error
+// }
//
// For a list of fields defined by the OpenID Connect spec see:
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
@@ -256,6 +276,12 @@ func (p *Provider) Endpoint() oauth2.Endpoint {
return oauth2.Endpoint{AuthURL: p.authURL, TokenURL: p.tokenURL}
}
+// UserInfoEndpoint returns the OpenID Connect userinfo endpoint for the given
+// provider.
+func (p *Provider) UserInfoEndpoint() string {
+ return p.userInfoURL
+}
+
// UserInfo represents the OpenID Connect userinfo claims.
type UserInfo struct {
Subject string `json:"sub"`
@@ -317,7 +343,7 @@ func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource)
ct := resp.Header.Get("Content-Type")
mediaType, _, parseErr := mime.ParseMediaType(ct)
if parseErr == nil && mediaType == "application/jwt" {
- payload, err := p.remoteKeySet.VerifySignature(ctx, string(body))
+ payload, err := p.remoteKeySet().VerifySignature(ctx, string(body))
if err != nil {
return nil, fmt.Errorf("oidc: invalid userinfo jwt signature %v", err)
}
@@ -391,18 +417,17 @@ type IDToken struct {
// Claims unmarshals the raw JSON payload of the ID Token into a provided struct.
//
-// idToken, err := idTokenVerifier.Verify(rawIDToken)
-// if err != nil {
-// // handle error
-// }
-// var claims struct {
-// Email string `json:"email"`
-// EmailVerified bool `json:"email_verified"`
-// }
-// if err := idToken.Claims(&claims); err != nil {
-// // handle error
-// }
-//
+// idToken, err := idTokenVerifier.Verify(rawIDToken)
+// if err != nil {
+// // handle error
+// }
+// var claims struct {
+// Email string `json:"email"`
+// EmailVerified bool `json:"email_verified"`
+// }
+// if err := idToken.Claims(&claims); err != nil {
+// // handle error
+// }
func (i *IDToken) Claims(v interface{}) error {
if i.claims == nil {
return errors.New("oidc: claims not set")
@@ -424,7 +449,7 @@ func (i *IDToken) VerifyAccessToken(accessToken string) error {
h = sha256.New()
case RS384, ES384, PS384:
h = sha512.New384()
- case RS512, ES512, PS512:
+ case RS512, ES512, PS512, EdDSA:
h = sha512.New()
default:
return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm)
diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
index ade861572..3e5ffbc76 100644
--- a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
+++ b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
@@ -64,14 +64,13 @@ type IDTokenVerifier struct {
// This constructor can be used to create a verifier directly using the issuer URL and
// JSON Web Key Set URL without using discovery:
//
-// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs")
-// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
+// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs")
+// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
//
// Or a static key set (e.g. for testing):
//
-// keySet := &oidc.StaticKeySet{PublicKeys: []crypto.PublicKey{pub1, pub2}}
-// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
-//
+// keySet := &oidc.StaticKeySet{PublicKeys: []crypto.PublicKey{pub1, pub2}}
+// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier {
return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL}
}
@@ -120,8 +119,22 @@ type Config struct {
InsecureSkipSignatureCheck bool
}
+// VerifierContext returns an IDTokenVerifier that uses the provider's key set to
+// verify JWTs. As opposed to Verifier, the context is used for all requests to
+// the upstream JWKs endpoint.
+func (p *Provider) VerifierContext(ctx context.Context, config *Config) *IDTokenVerifier {
+ return p.newVerifier(NewRemoteKeySet(ctx, p.jwksURL), config)
+}
+
// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs.
+//
+// The returned verifier uses a background context for all requests to the upstream
+// JWKs endpoint. To control that context, use VerifierContext instead.
func (p *Provider) Verifier(config *Config) *IDTokenVerifier {
+ return p.newVerifier(p.remoteKeySet(), config)
+}
+
+func (p *Provider) newVerifier(keySet KeySet, config *Config) *IDTokenVerifier {
if len(config.SupportedSigningAlgs) == 0 && len(p.algorithms) > 0 {
// Make a copy so we don't modify the config values.
cp := &Config{}
@@ -129,7 +142,7 @@ func (p *Provider) Verifier(config *Config) *IDTokenVerifier {
cp.SupportedSigningAlgs = p.algorithms
config = cp
}
- return NewVerifier(p.issuer, p.remoteKeySet, config)
+ return NewVerifier(p.issuer, keySet, config)
}
func parseJWT(p string) ([]byte, error) {
@@ -193,19 +206,18 @@ func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src
//
// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
//
-// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
-// if err != nil {
-// // handle error
-// }
-//
-// // Extract the ID Token from oauth2 token.
-// rawIDToken, ok := oauth2Token.Extra("id_token").(string)
-// if !ok {
-// // handle error
-// }
+// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
+// if err != nil {
+// // handle error
+// }
//
-// token, err := verifier.Verify(ctx, rawIDToken)
+// // Extract the ID Token from oauth2 token.
+// rawIDToken, ok := oauth2Token.Extra("id_token").(string)
+// if !ok {
+// // handle error
+// }
//
+// token, err := verifier.Verify(ctx, rawIDToken)
func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) {
// Throw out tokens with invalid claims before trying to verify the token. This lets
// us do cheap checks before possibly re-syncing keys.
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go
index 2a26b66d0..5b0d01769 100644
--- a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go
@@ -17,6 +17,7 @@ package name
import (
"net"
"net/url"
+ "path"
"regexp"
"strings"
)
@@ -50,6 +51,11 @@ func (r Registry) String() string {
return r.Name()
}
+// Repo returns a Repository in the Registry with the given name.
+func (r Registry) Repo(repo ...string) Repository {
+ return Repository{Registry: r, repository: path.Join(repo...)}
+}
+
// Scope returns the scope required to access the registry.
func (r Registry) Scope(string) string {
// The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z
diff --git a/vendor/github.com/google/trillian/.gitignore b/vendor/github.com/google/trillian/.gitignore
deleted file mode 100644
index f7e55498c..000000000
--- a/vendor/github.com/google/trillian/.gitignore
+++ /dev/null
@@ -1,25 +0,0 @@
-*.iml
-*.swo
-*.swp
-*.tfstate
-*.tfstate.backup
-*~
-/.idea
-/bazel-*
-/commit_log
-/coverage.txt
-/createtree
-/ct_hammer
-/ct_server
-/dump_tree
-/licenses
-/loglb
-/maphammer
-/mapreplay
-/mdmtest
-/protoc
-/trillian_log_server
-/trillian_log_signer
-/trillian_map_server
-default.etcd
-cockroach-data/
diff --git a/vendor/github.com/google/trillian/.golangci.yaml b/vendor/github.com/google/trillian/.golangci.yaml
deleted file mode 100644
index 4784f8fde..000000000
--- a/vendor/github.com/google/trillian/.golangci.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-run:
- # timeout for analysis, e.g. 30s, 5m, default is 1m
- deadline: 90s
- skip-files:
- - types/internal/tls/tls.go
-
-linters-settings:
- gocyclo:
- # minimal code complexity to report, 30 by default (but we recommend 10-20)
- # TODO(mhutchinson): lower this again after reworking interceptor
- min-complexity: 26
- depguard:
- list-type: blacklist
- packages:
- - golang.org/x/net/context
- - github.com/gogo/protobuf/proto
-
-linters:
- disable-all: true
- enable:
- - depguard
- - gocyclo
- - gofmt
- - goimports
- - govet
- - ineffassign
- - megacheck
- - misspell
- - revive
- - unused
- # TODO(gbelvin): write license linter and commit to upstream.
- # ./scripts/check_license.sh is run by ./scripts/presubmit.sh
-
-issues:
- # Don't turn off any checks by default. We can do this explicitly if needed.
- exclude-use-default: false
diff --git a/vendor/github.com/google/trillian/AUTHORS b/vendor/github.com/google/trillian/AUTHORS
deleted file mode 100644
index f45549fad..000000000
--- a/vendor/github.com/google/trillian/AUTHORS
+++ /dev/null
@@ -1,14 +0,0 @@
-# This is the official list of benchmark authors for copyright purposes.
-# This file is distinct from the CONTRIBUTORS files.
-# See the latter for an explanation.
-#
-# Names should be added to this file as:
-# Name or Organization <email address>
-# The email address is not required for organizations.
-#
-# Please keep the list sorted.
-
-Antonio Marcedone <a.marcedone@gmail.com>
-Google LLC
-Internet Security Research Group
-Vishal Kuo <vishalkuo@gmail.com>
diff --git a/vendor/github.com/google/trillian/BUILD.bazel b/vendor/github.com/google/trillian/BUILD.bazel
deleted file mode 100644
index bbee3e0cb..000000000
--- a/vendor/github.com/google/trillian/BUILD.bazel
+++ /dev/null
@@ -1,55 +0,0 @@
-# This BUILD file contains Bazel build targets for clients of the Trillian API.
-# Bazel can be obtained from www.bazel.build
-#
-# Even where Bazel is not being used by client builds, these targets provide
-# a mechanism to determine which proto files are required for the API. For
-# example, the following command will list the proto files required to use
-# the Trillian Admin gRPC interface:
-#
-# bazel query --notool_deps --noimplicit_deps \
-# 'kind("source file", deps(:trillian_admin_api_proto))'
-package(default_visibility = ["//visibility:public"])
-
-# A proto library for the Trillian Admin gRPC API.
-proto_library(
- name = "trillian_admin_api_proto",
- srcs = [
- "trillian_admin_api.proto",
- ],
- deps = [
- ":trillian_proto",
- "@com_google_googleapis//google/api:annotations_proto",
- "@com_google_googleapis//google/rpc:status_proto",
- "@com_google_protobuf//:field_mask_proto",
- ],
-)
-
-# A proto library for the Trillian Log gRPC API.
-proto_library(
- name = "trillian_log_api_proto",
- srcs = [
- "trillian_log_api.proto",
- ],
- deps = [
- ":trillian_proto",
- "@com_google_googleapis//google/api:annotations_proto",
- "@com_google_googleapis//google/rpc:status_proto",
- "@com_google_protobuf//:api_proto",
- "@com_google_protobuf//:timestamp_proto",
- ],
-)
-
-# Common proto definitions used within the Trillian gRPC APIs.
-proto_library(
- name = "trillian_proto",
- srcs = [
- "crypto/keyspb/keyspb.proto",
- "trillian.proto",
- ],
- deps = [
- "@com_google_protobuf//:any_proto",
- "@com_google_protobuf//:api_proto",
- "@com_google_protobuf//:duration_proto",
- "@com_google_protobuf//:timestamp_proto",
- ],
-)
diff --git a/vendor/github.com/google/trillian/CHANGELOG.md b/vendor/github.com/google/trillian/CHANGELOG.md
deleted file mode 100644
index 7a072252d..000000000
--- a/vendor/github.com/google/trillian/CHANGELOG.md
+++ /dev/null
@@ -1,1139 +0,0 @@
-# TRILLIAN Changelog
-
-## HEAD
-
-## v1.5.1
-
-### Storage
-
-* A new storage driver for CockroachDB has been added. It's currently in alpha stage
- with support provided by Equinix Metal.
-
-### Misc
-* Fix log server not exiting properly on SIGINT
-
-### Dependency updates
-
-* Switch from glog to klog by @jdolitsky in https://github.com/google/trillian/pull/2787
-* Bump google.golang.org/api from 0.92.0 to 0.93.0 by @dependabot in https://github.com/google/trillian/pull/2800
-* Bump cloud.google.com/go/spanner from 1.36.0 to 1.37.0 by @dependabot in https://github.com/google/trillian/pull/2803
-* Bump google.golang.org/grpc from 1.48.0 to 1.49.0 by @dependabot in https://github.com/google/trillian/pull/2804
-* Bump google.golang.org/api from 0.93.0 to 0.94.0 by @dependabot in https://github.com/google/trillian/pull/2802
-* Bump cloud.google.com/go/spanner from 1.37.0 to 1.38.0 by @dependabot in https://github.com/google/trillian/pull/2806
-* Bump k8s.io/klog/v2 from 2.70.1 to 2.80.0 by @dependabot in https://github.com/google/trillian/pull/2807
-* Bump k8s.io/klog/v2 from 2.80.0 to 2.80.1 by @dependabot in https://github.com/google/trillian/pull/2808
-* Bump github.com/google/go-cmp from 0.5.8 to 0.5.9 by @dependabot in https://github.com/google/trillian/pull/2809
-* Bump google.golang.org/api from 0.94.0 to 0.95.0 by @dependabot in https://github.com/google/trillian/pull/2810
-* Bump go.etcd.io/etcd/etcdctl/v3 from 3.5.4 to 3.5.5 by @dependabot in https://github.com/google/trillian/pull/2812
-* Bump go.etcd.io/etcd/v3 from 3.5.4 to 3.5.5 by @dependabot in https://github.com/google/trillian/pull/2816
-* Bump google.golang.org/api from 0.95.0 to 0.96.0 by @dependabot in https://github.com/google/trillian/pull/2813
-* Bump google.golang.org/api from 0.96.0 to 0.97.0 by @dependabot in https://github.com/google/trillian/pull/2819
-* Bump cloud.google.com/go/spanner from 1.38.0 to 1.39.0 by @dependabot in https://github.com/google/trillian/pull/2818
-* Bump google.golang.org/api from 0.97.0 to 0.98.0 by @dependabot in https://github.com/google/trillian/pull/2820
-* Bump google.golang.org/grpc from 1.49.0 to 1.50.0 by @dependabot in https://github.com/google/trillian/pull/2821
-* Bump google.golang.org/grpc from 1.50.0 to 1.50.1 by @dependabot in https://github.com/google/trillian/pull/2823
-* Bump google.golang.org/api from 0.98.0 to 0.99.0 by @dependabot in https://github.com/google/trillian/pull/2822
-* Bump google.golang.org/api from 0.99.0 to 0.100.0 by @dependabot in https://github.com/google/trillian/pull/2824
-* Bump github.com/prometheus/client_model from 0.2.0 to 0.3.0 by @dependabot in https://github.com/google/trillian/pull/2825
-* Bump golang.org/x/tools from 0.1.12 to 0.2.0 by @dependabot in https://github.com/google/trillian/pull/2826
-* Bump google.golang.org/api from 0.100.0 to 0.101.0 by @dependabot in https://github.com/google/trillian/pull/2827
-* Bump github.com/prometheus/client_golang from 1.13.0 to 1.13.1 by @dependabot in https://github.com/google/trillian/pull/2828
-* Bump golang.org/x/sys from 0.1.0 to 0.2.0 by @dependabot in https://github.com/google/trillian/pull/2829
-* Bump google.golang.org/api from 0.101.0 to 0.102.0 by @dependabot in https://github.com/google/trillian/pull/2830
-* Bump go.opencensus.io from 0.23.0 to 0.24.0 by @dependabot in https://github.com/google/trillian/pull/2832
-* Bump cloud.google.com/go/spanner from 1.39.0 to 1.40.0 by @dependabot in https://github.com/google/trillian/pull/2831
-* Bump github.com/prometheus/client_golang from 1.13.1 to 1.14.0 by @dependabot in https://github.com/google/trillian/pull/2838
-* Bump google.golang.org/api from 0.102.0 to 0.103.0 by @dependabot in https://github.com/google/trillian/pull/2839
-* Bump golang.org/x/crypto from 0.1.0 to 0.2.0 by @dependabot in https://github.com/google/trillian/pull/2841
-* Bump golang.org/x/tools from 0.2.0 to 0.3.0 by @dependabot in https://github.com/google/trillian/pull/2840
-* Dependabot: Also keep GitHub actions up-to-date by @JAORMX in https://github.com/google/trillian/pull/2842
-* Bump actions/upload-artifact from 3.1.0 to 3.1.1 by @dependabot in https://github.com/google/trillian/pull/2843
-* Bump golang.org/x/crypto from 0.2.0 to 0.3.0 by @dependabot in https://github.com/google/trillian/pull/2847
-* Bump google.golang.org/grpc from 1.50.1 to 1.51.0 by @dependabot in https://github.com/google/trillian/pull/2845
-* Bump github.com/cockroachdb/cockroach-go/v2 from 2.2.16 to 2.2.18 by @dependabot in https://github.com/google/trillian/pull/2846
-* Bump go.etcd.io/etcd/v3 from 3.5.5 to 3.5.6 by @dependabot in https://github.com/google/trillian/pull/2849
-* Bump github.com/cockroachdb/cockroach-go/v2 from 2.2.18 to 2.2.19 by @dependabot in https://github.com/google/trillian/pull/2856
-* Bump golang.org/x/sys from 0.2.0 to 0.3.0 by @dependabot in https://github.com/google/trillian/pull/2858
-* Bump cloud.google.com/go/spanner from 1.40.0 to 1.41.0 by @dependabot in https://github.com/google/trillian/pull/2857
-* Bump actions/setup-go from 3.3.1 to 3.4.0 by @dependabot in https://github.com/google/trillian/pull/2862
-* Bump github/codeql-action from 2.1.34 to 2.1.35 by @dependabot in https://github.com/google/trillian/pull/2861
-* Bump golangci/golangci-lint-action from 3.3.0 to 3.3.1 by @dependabot in https://github.com/google/trillian/pull/2860
-* Bump github.com/go-sql-driver/mysql from 1.6.0 to 1.7.0 by @dependabot in https://github.com/google/trillian/pull/2859
-* Bump qs, body-parser and express in /scripts/gcb2slack by @dependabot in https://github.com/google/trillian/pull/2867
-* Bump minimist from 1.2.0 to 1.2.7 in /scripts/gcb2slack by @dependabot in https://github.com/google/trillian/pull/2864
-* Bump axios and @slack/webhook in /scripts/gcb2slack by @dependabot in https://github.com/google/trillian/pull/2868
-* Bump json-bigint and google-auth-library in /scripts/gcb2slack by @dependabot in https://github.com/google/trillian/pull/2869
-* Bump node-fetch from 2.6.0 to 2.6.7 in /scripts/gcb2slack by @dependabot in https://github.com/google/trillian/pull/2866
-* Bump golang.org/x/tools from 0.3.0 to 0.4.0 by @dependabot in https://github.com/google/trillian/pull/2870
-* Bump github/codeql-action from 2.1.35 to 2.1.36 by @dependabot in https://github.com/google/trillian/pull/2874
-* Bump actions/checkout from 3.1.0 to 3.2.0 by @dependabot in https://github.com/google/trillian/pull/2873
-* Bump golang.org/x/crypto from 0.3.0 to 0.4.0 by @dependabot in https://github.com/google/trillian/pull/2872
-* Bump google.golang.org/api from 0.103.0 to 0.104.0 by @dependabot in https://github.com/google/trillian/pull/2871
-* Bump cloud.google.com/go/spanner from 1.41.0 to 1.42.0 by @dependabot in https://github.com/google/trillian/pull/2877
-
-
-## v.1.5.0
-
-### Storage
-
-* Ephemeral nodes are no-longer written for any tree by default (and have not been read since the v1.4.0 release), the corresponding `--tree_ids_with_no_ephemeral_nodes` flag is now deprecated (and will be removed in a future release).
-
-### Cleanup
-* Format code according to go1.19rc2 by @mhutchinson in https://github.com/google/trillian/pull/2785
-* Delete merkle package, use [github.com/transparency-dev/merkle](https://pkg.go.dev/github.com/transparency-dev/merkle) instead.
-
-### Misc
-* Fix order-dependent test by @hickford in https://github.com/google/trillian/pull/2792
-
-### Dependency updates
-* Updated golangci-lint to v1.47.3 (developers should update to this version) by @mhutchinson in https://github.com/google/trillian/pull/2791
-* Bump google.golang.org/api from 0.87.0 to 0.88.0 by @dependabot in https://github.com/google/trillian/pull/2783
-* Bump cloud.google.com/go/spanner from 1.35.0 to 1.36.0 by @dependabot in https://github.com/google/trillian/pull/2784
-* Bump google.golang.org/api from 0.88.0 to 0.90.0 by @dependabot in https://github.com/google/trillian/pull/2789
-* Bump golang.org/x/tools from 0.1.11 to 0.1.12 by @dependabot in https://github.com/google/trillian/pull/2790
-* Bump google.golang.org/protobuf from 1.28.0 to 1.28.1 by @dependabot in https://github.com/google/trillian/pull/2788
-* Bump google.golang.org/api from 0.90.0 to 0.91.0 by @dependabot in https://github.com/google/trillian/pull/2796
-* Bump github.com/prometheus/client_golang from 1.12.2 to 1.13.0 by @dependabot in https://github.com/google/trillian/pull/2795
-* Bump github.com/fullstorydev/grpcurl from 1.8.6 to 1.8.7 by @dependabot in https://github.com/google/trillian/pull/2794
-* Bump google.golang.org/api from 0.91.0 to 0.92.0 by @dependabot in https://github.com/google/trillian/pull/2798
-
-## v1.4.2
-
-* #2568: Allow disabling the writes of ephemeral nodes to storage via the
- `--tree_ids_with_no_ephemeral_nodes` flag to the sequencer.
-* #2748: `--cloudspanner_max_burst_sessions` deprecated (it hasn't had any
- effect for a while, now it's more explicit)
-* #2768: update go.mod to use 1.17 compatibility from 1.13.
-
-### Dependency updates
-
-* Updated golangci-lint to v1.46.1 (developers should update to this version)
-* Removed dependency on certificate-transparency-go
-
-### Developer updates
-
-* #2765 copies the required protos from `googleapis` into `third_party` in this
- repository. This simplifies the preconditions in order to compile the proto
- definitions, and removes a big dependency on `$GOPATH/src` which was archaic;
- `$GOPATH/src/github.com/googleapis/googleapis` is no longer required.
-
-## v1.4.1
-
-* `countFromInformationSchema` function to add support for MySQL 8.
-
-### Removals
-
- * #2710: Unused `storage/tools/dumplib` was removed. The useful storage format
- regression test moved to `integration/format`.
- * #2711: Unused `storage/tools/hasher` removed.
- * #2715: Packages under `merkle` are deprecated and to be removed. Use
- https://github.com/transparency-dev/merkle instead.
-
-### Misc improvements
-
- * #2712: Fix MySQL world-writable config warning.
- * #2726: Check the tile height invariant stricter. No changes required.
-
-### Dependency updates
- * #2731: Update `protoc` from `v3.12.4` to `v3.20.1`
-
-## v1.4.0
-
-* Recommended go version for development: 1.17
- * This is the version used by the cloudbuild presubmits. Using a
- different version can lead to presubmits failing due to unexpected
- diffs.
-* GCP terraform script updated. GKE 1.19 and updated CPU type to E2
-
-### Dependency updates
-Many dep updates, including:
- * Upgraded to etcd v3 in order to allow grpc to be upgraded (#2195)
- * etcd was `v0.5.0-alpha.5`, now `v3.5.0`
- * grpc upgraded from `v1.29.1` to `v1.40.0`
- * certificate-transparency-go from `v1.0.21` to
- `v1.1.2-0.20210512142713-bed466244fa6`
- * protobuf upgraded from `v1` to `v2`
- * MySQL driver from `1.5.0` to `1.6.0`
-
-### Cleanup
- * **Removed signatures from LogRoot and EntryTimestamps returned by RPCs** (reflecting that
- there should not be a trust boundary between Trillian and the personality.)
- * Removed the deprecated crypto.NewSHA256Signer function.
- * Finish removing the `LogMetadata.GetUnsequencedCounts()` method.
- * Removed the following APIs:
- - `TrillianLog.GetLeavesByHash`
- - `TrillianLog.GetLeavesByIndex`
- - `TrillianLog.QueueLeaves`
- * Removed the incomplete Postgres storage backend (#1298).
- * Deprecated `LogRootV1.Revision` field.
- * Moved `rfc6962` hasher one directory up to eliminate empty leftover package.
- * Removed unused `log_client` tool.
- * Various tidyups and improvements to merke & proof generation code.
- * Remove some remnants of experimental map.
-
-### Storage refactoring
- * `NodeReader.GetMerkleNodes` does not accept revisions anymore. The
- implementations must use the transaction's `ReadRevision` instead.
- * `TreeStorage` migrated to using `compact.NodeID` type suitable for logs.
- * Removed the tree storage `ReadRevision` and `WriteRevision` methods.
- Revisions are now an implementation detail of the current storages. The
- change allows log implementations which don't need revisions.
- * Removed `Rollback` methods from storage interfaces, as `Close` is enough to
- cover the use-case.
- * Removed the unused `IsOpen` and `IsClosed` methods from transaction
- interfaces.
- * Removed the `ReadOnlyLogTX` interface, and put its only used
- `GetActiveLogIDs` method to `LogStorage`.
- * Inlined the `LogMetadata` interface to `ReadOnlyLogStorage`.
- * Inlined the `TreeStorage` interfaces to `LogStorage`.
- * Removed the need for the storage layer to return ephemeral node hashes. The
- application layer always requests for complete subtree nodes comprising the
- compact ranges corresponding to the requests.
- * Removed the single-tile callback from `SubtreeCache`, it uses only
- `GetSubtreesFunc` now.
- * Removed `SetSubtreesFunc` callback from `SubtreeCache`. The tiles should be
- written by the caller now, i.e. the caller must invoke the callback.
-
-## v1.3.13
-[Published 2021-02-16](https://github.com/google/trillian/releases/tag/v1.3.13)
-
-### Cleanup
- * Removed the experimental map API.
-
-## v1.3.12
-[Published 2021-02-16](https://github.com/google/trillian/releases/tag/v1.3.12)
-
-### Misc improvements
-
- * Removed unused `PeekTokens` method from the `quota.Manager` interface.
- * Ensure goroutines never block in the subtree cache (#2272).
- * Breaking unnecessary dependencies for Trillian clients:
- * Moved verifiers from `merkle` into `merkle/{log,map}verifier`sub-pacakges,
- reducing the amount of extra baggage inadvertently pulled in by clients.
- * Concrete hashers have been moved into subpackages, separating them from their
- registration code, allowing clients to directly pull just the hasher they're
- interested in and avoid the Trillian/hasher registry+protobuf deps.
- * Moved some packages intended for internal-only use into `internal` packages:
- * InMemoryMerkleTree (indended to only be used by Trillian tests)
- * Removed wrapper for etcd client (#2288).
- * Moved `--quota_system` and `--storage_system` flags to `main.go` so that they
- are initialised properly. It might break depending builds relying on these
- flags. Suggested fix: add the flags to `main.go`.
- * Made signer tolerate mastership election failures [#1150].
- * `testdb` no longer accepts the `--test_mysql_uri` flag, and instead honours the
- `TEST_MYSQL_URI` ENV var. This makes it easier to blanket configure tests to use a
- specific test DB instance.
- * Removed experimental Skylog folder (#2297).
- * Fixed a race condition in the operation manager that should only affect tests
- (#2302).
- * Run gofumpt formatter on the whole repository (#2315).
- * Refactor signer operation loop (#2294).
-
-### Upgrades
- * Dockerfiles are now based on Go 1.13 image.
- * The etcd is now pinned to v3.4.12.
- * The golangci-lint suite is now at v1.36.0.
- * CI/CD has migrated from Travis to Google Cloud Build.
- * prometheus from 1.7.1 to 1.9.0 (#2239, #2270).
- * go-cmp from 0.5.2 to 0.5.4 (#2262).
- * apache/beam from 2.26.0+incompatible to 2.27.0+incompatible (#2273).
- * lib/pq from 1.8.0 to 1.9.0 (#2264).
- * go-redis from 6.15.8+incompatible to 6.15.9+incompatible (#2215).
-
-
-### Process
- * Recognise that we do not follow strict semantic versioning practices.
-
-## v1.3.11
-[Published 2020-10-06](https://github.com/google/trillian/releases/tag/v1.3.11)
-
-### Documentation
-
-Added docs which describe the Claimant Model of transparency, a useful
-framework for reasoning about the design and architecture of transparent
-systems.
-
-### Misc improvements
-
- * Fixed int to string conversion warnings for golang 1.15
- * Metric improvements for fetched leaf counts
- * Move tools.go into its own directory to help with dependencies
-
-### Dependency updates
- * go-grpc-middleware from 1.2.0 to 1.2.2 (#2219, #2229)
- * stackdriver from 0.13.2 to 0.13.4 (#2220, #2223)
- * Google api from 0.28.0 to 0.29.0 (#2193)
-
-
-## v1.3.10
-[Published 2020-07-02](https://github.com/google/trillian/releases/tag/v1.3.10)
-
-### Storage
-
-The StorageProvider type and helpers have been moved from the server package to
-storage. Aliases for the old types/functions are created for backward
-compatibility, but the new code should not use them as we will remove them with
-the next major version bump. The individual storage providers have been moved to
-the corresponding packages, and are now required to be imported explicitly by
-the main file in order to be registered. We are including only MySQL and
-cloudspanner providers by default, since these are the ones that we support.
-
-The cloudspanner storage is supported for logs only, while the Map storage API
-is being polished and decoupled from the log storage API. We may return the
-support when the new API is tested.
-
-Support for storage of Ed25519 signatures has been added to the mysql and
-postgres storage drivers (only applicable in new installations) and bugs
-preventing correct usage of that algorithm have been fixed.
-
-#### Storage TX Interfaces
-- `QueueLeaves` has been removed from the `LogTreeTX` interface because
- `QueueLeaves` is not transactional. All callers use the
- `QueueLeaves` function in the `LogStorage` interface.
-- `AddSequencedLeaves` has been removed from the `LogTreeTX`.
-
-
-### Log Changes
-
-#### Monitoring & Metrics
-
-The `queued_leaves` metric is removed, and replaced by `added_leaves` which
-covers both `QueueLeaves` and `AddSequencedLeaves`, and is labeled by log ID.
-
-#### MySQL Dequeueing Change #2159
-mysql will now remove leaves from the queue inside of `UpdateLeaves` rather
-than directly inside of `Dequeue`.
-This change brings the behavior of the mysql storage implementation into line
-with the spanner implementation and makes consistent testing possible.
-
-
-### Map Changes
-
-**The verifiable map is still experimental.**
-APIs, such as SetLeaves, have been deprecated and will be deleted in the near
-future. The semantics of WriteLeaves have become stricter: now it always
-requires the caller to specify the write revision. These changes will not
-affect the Trillian module semantic version due to the experimental status of
-the Map.
-
-Map API has been extended with Layout, GetTiles and SetTiles calls which allow
-for more direct processing of sparse Merkle tree tiles in the application layer.
-Map storage implementations are simpler, and no longer use the SubtreeCache.
-
-The map client has been updated so that GetAndVerifyMapLeaves and
-GetAndVerifyMapLeavesByRevision return the MapRoot for the revision at which the
-leaves were fetched. Without this callers of GetAndVerifyMapLeaves in particular
-were unable to reason about which map revision they were seeing. The
-SetAndVerifyMapLeaves method was deleted.
-
-
-
-## v1.3.9
-[Published 2020-06-22](https://github.com/google/trillian/releases/tag/v1.3.9)
-
-### Selected Dependency Updates
-* etcd from v3.3.18 to 3.4.7 (#2090)
-* etcd-operator from v0.9.1 to v0.9.4
-* upgraded protoc version to latest (#2088)
-* github.com/golang/protobuf to v1.4.1 (#2111)
-* google.golang.org/grpc from v1.26 to 1.29.1 (#2108)
-
-
-## v1.3.8
-[Published 2020-05-12](https://github.com/google/trillian/releases/tag/v1.3.8)
-
-### HTTP APIs
-
-The HTTP/JSON APIs have been removed in favor of a pure gRPC intereface.
-[grpcurl](https://github.com/fullstorydev/grpcurl) is the recommended way
-of interacting with the gRPC API from the commandline.
-
-
-## v1.3.7
-[Published 2020-05-12](https://github.com/google/trillian/releases/tag/v1.3.7)
-
-### Server Binaries
-
-The `trillian_log_server`, `trillian_log_signer` and `trillian_map_server`
-binaries have moved from `github.com/google/trillian/server/` to
-`github.com/google/trillian/cmd`. A subset of the `server` package has also
-moved and has been split into `cmd/internal/serverutil`, `quota/etcd` and
-`quota/mysqlqm` packages.
-
-
-## v1.3.6
-[Published 2020-05-12](https://github.com/google/trillian/releases/tag/v1.3.6)
-
-### Deployments
-
-The Kubernetes configs will now provision 5 nodes for Trillian's Etcd cluster,
-instead of 3 nodes.
-[This makes the Etcd cluster more resilient](https://etcd.io/docs/v3.2.17/faq/#what-is-failure-tolerance)
-to nodes becoming temporarily unavailable, such as during updates (it can now
-tolerate 2 nodes being unavailable, instead of just 1).
-
-### Monitoring & Metrics
-
-A count of the total number of individual leaves the logserver attempts to
-fetch via the GetEntries.* API methods has been added.
-
-
-## v1.3.5
-[Published 2020-05-12](https://github.com/google/trillian/releases/tag/v1.3.5)
-
-### Log Changes
-
-#### Potential sequencer hang fixed
-A potential deadlock condition in the log sequencer when the process is
-attempting to exit has been addressed.
-
-### Quota
-
-#### New Features
-
-An experimental Redis-based `quota.Manager` implementation has been added.
-
-#### Behaviour Changes
-
-Quota used to be refunded for all failed requests. For uses of quota that were
-to protect against abuse or fair utilization, this could allow infinite QPS in
-situations that really should have the requests throttled. Refunds are now only
-performed for tokens in `Global` buckets, which prevents tokens being leaked if
-duplicate leaves are queued.
-
-### Tools
-
-The `licenses` tool has been moved from "scripts/licenses" to [a dedicated
-repository](https://github.com/google/go-licenses).
-
-### Bazel Changes
-
-Python support is disabled unless we hear that the community cares about this
-being re-enabled. This was broken by a downstream change and without a signal
-from the Trillian community to say this is needed, the pragmatic action is to
-not spend time investigating this issue.
-
-
-## v1.3.4 - Invalid release, do not use.
-[Published 2020-05-12](https://github.com/google/trillian/releases/tag/v1.3.4)
-
-
-## v1.3.3 - Module fixes
-
-Published 2019-10-31 17:30:00 +0000 UTC
-
-Patch release to address Go Module issue. Removes `replace` directives in our
-go.mod file now that our dependencies have fixed their invalid pseudo-version
-issues.
-
-## v1.3.2 - Module fixes
-
-Published 2019-09-05 17:30:00 +0000 UTC
-
-Patch release to address Go Module issue. Some dependencies use invalid pseudo-
-versions in their go.mod files that Go 1.13 rejects. We've added `replace`
-directives to our go.mod file to fix these invalid pseudo-versions.
-
-## v1.3.1 - Module and Bazel fixes
-
-Published 2019-08-16 15:00:00 +0000 UTC
-
-Patch release primarily to address Go Module issue. v1.3.0 declared a dependency
-on github.com/russross/blackfriday/v2 v2.0.1+incompatible which made downstream
-dependencies suffer.
-
-## v1.3.0
-
-Published 2019-07-17 15:00:00 +0000 UTC
-
-### Storage APIs GetSignedLogRoot / SetSignedLogRoot now take pointers
-
-This at the storage layer and does not affect the log server API.
-This is part of work to fix proto buffer usages where they are passed
-by value or compared by generic code like `reflect.DeepEquals()`. Passing
-them by value creates shallow copies that can share internal state. As the
-generated structs contain additional exported `XXX_` fields generic
-comparisons using all fields can produce incorrect results.
-
-### Storage Commit takes context.Context
-
-To support passing a context down to `NodeStorage.SetLeaves`, and remove various `context.TODO()`s,
-the following functions have been modified to accept a `context.Context` parameter:
-
-- `storage/cache.NodeStorage.SetLeaves`
-- `storage/cache.SetSubtreesFunc`
-- `storage/cache.SubtreeCache.Flush`
-- `storage.ReadonlyLogTX.Commit`
-
-### Go Module Support
-
-Go Module support has been enabled. Please use GO111MODULE=on to build Trillian.
-Updating dependencies no longer requires updating the vendor directory.
-
-### TrillianMapWrite API
-New API service for writing to the Trillian Map. This allows APIs such as
-GetLeavesByRevisionNoProof to be removed from the read API, and these methods to
-be tuned & provisioned differently for read vs write performance.
-
-### GetLeavesByRevisionNoProof API
-Allow map clients to forgo fetching inclusion proofs.
-This dramatically speeds things up for clients that don't need verifiability.
-This situation occurs in some situation where a Trillian personality is
-interacting directly with the Trillian Map.
-
-### GetMapLeafByRevision API
-New GetMapLeafByRevision API for fetching a single map leaf. This allows there
-to be a separate API end point for fetching a single leaf vs. the batch
-GetMapLeavesByRevision API which is much slower when many leaves are requested.
-This supports separate monitoring and alerting for different traffic patterns.
-
-### Add Profiling Flags to Binaries
-
-The `trillian_log_server`, `trillian_log_signer` and `trillian_map_server`
-binaries now have CPU and heap profiling flags. Profiling is off by default.
-For more details see the
-[Go Blog](https://blog.golang.org/profiling-go-programs).
-### Map performance tweaks
-
-The map mode has had some performance tweaks added:
-* A workaround for locking issues which affect the map when it's used in
- single-transaction mode.
-
-### Introduce BatchInclusionProof function
-
-Added a batch version of the Merkle Tree InclusionProof function.
-
-Updated the map RPC for getLeaves to use the new batch function to improve
-efficiency.
-
-### Google Cloud Spanner support
-
-Google Cloud Spanner is now a supported storage backend for maps.
-
-The admin API calls to list trees backed by Cloud Spanner trees are fixed.
-
-### RPC Server Transaction Leaks Fixed
-
-There were some cases where the Log RPC server could leak storage transactions
-in error situations. These have now been fixed. If you have a custom storage
-implementation review the fixes made to the MySQL Log storage to see if they
-need to be applied to your code (`storage/mysql/log_storage.go`). The Map
-server had similar issues but these were fixed without requiring changes to
-storage code.
-
-### GetLatestSignedLogRoot With Consistency Proof
-
-`GetLatestSignedLogRoot` in the LogServer will return a consistency proof if
-`first_tree_size` > 0. This reduces the number of RPC calls from logClient from
-2 to 1 in `client.getAndVerifyLatestRoot`.
-
-### Testing
-
-Support has been added for testing against a locally running mysql docker image,
-in addition to a locally running mysql instance.
-
-### Deprecated Fields Removed From SignedLogRoot Proto
-
-*Important Note*: For use in Certificate Transparency this version of the
-logserver binary won't work properly with an older CTFE. Make sure to update the
-CTFE servers to a current version (built from a git checkout after March 20th
-2019) before deploying logservers that include this change or deploy them
-together with this release. Failure to do this can result in 5XX errors being
-returned to clients when the old handler code tries to access fields in
-responses that no longer exist.
-
-All the fields marked as deprecated in this proto have been removed. All the
-same fields are available via the TLS marshalled log root in the proto. Updating
-affected code is straightforward.
-
-Normally, clients will want to verify that the signed root is correctly signed.
-This is the preferred way to interact with the root data.
-
-There is a utility function provided that will verify the signature and unpack
-the TLS data. It works well in conjunction with a `LogVerifier`. The public key
-of the server is required.
-
-```go
-verifier := client.NewLogVerifier(rfc6962.DefaultHasher, pk, crypto.SHA256)
-root, err := crypto.VerifySignedLogRoot(verifier.PubKey, verifier.SigHash, resp.SignedLogRoot)
-if err != nil {
- // Signature verified and unmarshalled correctly. The struct may now
- // be used.
- if root.TreeSize > 0 {
- // Non empty tree.
- }
-}
-```
-
-### MySQL changes
-
-#### Configurable number of connections for MySQL
-
-Two new flags have been added that limit connections to MySQL database servers:
-
-- `--mysql_max_conns` - limits the total number of database connections
-- `--mysql_max_idle_conns` - limits the number of idle database connections
-
-By default, there is no maximum number of database connections. However, the
-database server will likely impose limits on the number of connections. The
-default limit on idle connections is controlled by
-[Go's `sql` package](https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns).
-
-#### Enfored no concurrent use of MySQL tx
-
-Concurrently using a single MySQL transaction can cause the driver to error
-out, so we now attempt to prevent this from happening.
-
-### Removal of length limits for a tree's `display_name` and `description`
-
-Previously, these were restricted to 20 bytes and 200 bytes respectively. These
-limits have been removed. However, the underlying storage implementation may
-still impose its own limitations.
-
-### Server validation of leaf hashes
-
-The log server now checks that leaf hashes are the correct length and returns
-an InvalidArgument error if they are not. Previously, GetLeavesByHash would
-simply not return any matching leaves for invalid hashes, and
-GetInclusionProofByHash would return a NotFound error.
-
-### Map client
-
-A [MapClient](client/map_client.go) has been added to simplify interacting with
-the map server.
-
-### Database Schema
-
-This version includes a change to the MySQL and Postgres database schemas to add
-an index on the `SequencedLeafData` table. This improves performance for
-inclusion proof queries.
-
-### Deployments
-
-The Trillian Docker images now accept GOFLAGS and GO111MODULE arguments
-and set them as environment variables inside the Docker container.
-
-The [db\_server Docker image](examples/deployment/docker/db_server/Dockerfile)
-is now based on
-[the MySQL 5.7 image from the Google Cloud Marketplace](https://console.cloud.google.com/marketplace/details/google/mysql5),
-rather than the [official MySQL 5.7 image](https://hub.docker.com/_/mysql).
-This Dockerfile supercedes Dockerfile.db, which has been removed.
-
-There is now a [mysql.cnf file](examples/deployment/docker/db_server/mysql.cnf)
-alongside the Dockerfile that makes it easy to build the image with a custom
-configuration, e.g. to allow MySQL to use more memory.
-
-The `trillian-log-service` and `trillian-log-signer` Kubernetes services will
-now have load balancers configured for them that expose those services outside
-of the Kubernetes cluster. This makes it easier to access their APIs. When
-deployed on Google Cloud, these will be
-[Internal Load Balancers](https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing).
-Note that this change **cannot be applied to an existing deployment**; delete
-the existing Kubernetes services and redeploy them, otherwise you'll see an
-error similar to `The Service "trillian-log-service" is invalid: spec.clusterIP:
-Invalid value: "": field is immutable`.
-
-A working [Docker Compose](https://docs.docker.com/compose/) configuration is
-now available and can be used to bring up a local Trillian deployment for
-testing and experimental purposes:
-
-```shell
-docker-compose -f examples/deployment/docker-compose.yml up
-```
-
-Docker Compose v3.1 or higher is required.
-
-The Terraform, Kubernetes and Docker configuration files, as well as various
-scripts, all now use the same, consistently-named environment variables for
-MySQL-related data (e.g. `MYSQL_DATABASE`). The variable names are based on
-those for the
-[MySQL Docker image](https://hub.docker.com/_/mysql#environment-variables).
-
-Docker images have been upgraded from Go 1.9 to 1.11. They now use ["Distroless"
-base images](https://github.com/GoogleContainerTools/distroless).
-
-### Dropped metrics
-
-Quota metrics with specs of the form `users/<user>/read` and
-`users/<user>/write` are no longer exported by the Trillian binaries (as they
-lead to excessive storage requirements for Trillian metrics).
-
-### Resilience improvements in `log_signer`
-
-#### Add timeout to sequencing loop
-
-Added a timeout to the context in the sequencing loop, with a default of 60s.
-
-#### Fix Operation Loop Hang
-
-Resolved a bug that would hide errors and cause the `OperationLoop` to hang
-until process exit if any error occurred.
-
-### Linting toolchain migration
-
-gometalinter has been replaced with golangci-lint for improved performance and
-Go module support.
-
-### Compact Merkle tree data structures
-
-`CompactMerkleTree` has been removed from `github.com/google/trillian/merkle`,
-and a new package `github.com/google/trillian/merkle/compact` was introduced. A
-new powerful data structure named "compact range" has been added to that
-package, and is now used throughout the repository instead of the compact tree.
-It is a generalization of the previous structure, as it allows manipulating
-arbitrary sub-ranges of leaves rather than only prefixes.
-
-### Storage API changes
-
-The internal storage API is modified so that the ReadOnlyTreeTX.ReadRevision and
-TreeWriter.WriteRevision entrypoints take a context.Context parameter and return
-an optional error.
-
-The `SubtreeCache.GetNodeHash()` method is no longer exported.
-
-The memory storage provider has been refactored to make it more consistent with
-the other storage providers.
-
-The `LogMetadata.GetUnsequencedCounts()` method has been removed.
-
-`NodeReader.GetMerkleNodes` now must return `Node` objects in the same order as
-node IDs requested. Storage implementations known to us already adhere to this
-requirement.
-
-### Maphammer improvements
-
-The maphammer test tool for the experimental Trillian Map has been enhanced.
-
-### Default values changed for some signer flags
-
-The following flags for the signer have new default values:
-
-- `--sequencer_interval`: changed from 10 seconds to 100 milliseconds
-- `--batch_size`: changed from 50 to 1000
-
-These changes improve the signer's throughput and latency under typical
-conditions.
-
-### Master election refactoring
-
-The `--resign_odds` flag in `logsigner` is removed, in favor of a more generic
-`--master_hold_jitter` flag. Operators using this flag are advised to set the
-jitter to `master_check_interval * resign_odds * 2` to achieve similar behavior.
-
-The `--master_check_interval` flag is removed from `logsigner`.
-
-`logsigner` switched to using a new master election interface contained in
-`util/election2` package. The interfaces in `util/election` are removed.
-
-### `CONIKS_SHA256` hash strategy added
-
-Support has been added for a CONIKS sparse tree hasher with SHA256 as the hash
-algorithm. Set a tree's `hash_strategy` to `CONIKS_SHA256` to use it.
-
-### Performance
-
-The performance of `SetLeaves` requests on the Map has been slightly improved.
-The performance of `GetConsistencyProof` requests has been improved when using
-MySQL.
-
-### Logging
-
-Some warning-level logging has been removed from the sequencer in favour of
-returning the same information via the returned error. The caller may still
-choose to log this information. This allows storage implementations that retry
-transactions to suppress warnings when a transaction initially fails but a retry
-succeeds.
-
-Some incorrectly-formatted log messages have been fixed.
-
-### Documentation
-
-[API documentation in Markdown format](docs/api.md) is now available.
-
-### Other
-
-The `TimeSource` type (and other time utils) moved to a separate `util/clock`
-package, extended with a new `Timer` interface that allows mocking `time.Timer`.
-
-The `Sequencer.SignRoot()` method has been removed.
-
-## v1.2.1 - Map race fixed. TLS client support. LogClient improvements
-
-Published 2018-08-20 10:31:00 +0000 UTC
-
-### Servers
-
-A race condition was fixed that affected sparse Merkle trees as served by the
-map server.
-
-### Utilities / Binaries
-
-The `maphammer` uses a consistent empty check, fixing spurious failures in some
-tests.
-
-The `createtree` etc. set of utilities now support TLS via the `-tls-cert-file`
-flag. This support is also available as a client module.
-
-### Log Client
-
-`GetAndVerifyInclusionAtIndex` no longer updates the clients root on every
-access as this was an unexpected side effect. Clients now have explicit control
-of when the root is updated by calling `UpdateRoot`.
-
-A root parameter is now required when log clients are constructed.
-
-The client will now only retry requests that fail with the following errors:
-
-- Aborted
-- DeadlineExceeded
-- ResourceExhausted
-- Unavailable
-
-There is one exception - it will also retry InitLog/InitMap requests that fail
-due to a FailedPrecondition error.
-
-### Other
-
-The Travis build script has been updated for newer versions of MySQL (5.7
-through MySQL 8) and will no longer work with 5.6.
-
-Commit
-[f3eaa887163bb4d2ea4b4458cb4e7c5c2f346bc6](https://api.github.com/repos/google/trillian/commits/f3eaa887163bb4d2ea4b4458cb4e7c5c2f346bc6)
-Download [zip](https://api.github.com/repos/google/trillian/zipball/v1.2.1)
-
-## v1.2.0 - Signer / Quota fixes. Error mapping fix. K8 improvements
-
-Published 2018-06-25 10:42:52 +0000 UTC
-
-The Log Signer now tries to avoid creating roots older than ones that already
-exist. This issue has been seen occurring on a test system. Important note: If
-running this code in production allowing clocks to drift out of sync between
-nodes can cause other problems including for clustering and database
-replication.
-
-The Log Signer now publishes metrics for the logs that it is actively signing.
-In a clustered environment responsibility can be expected to move around between
-signer instances over time.
-
-The Log API now allows personalities to explicitly list a vector of identifiers
-which should be charged for `User` quota. This allows a more nuanced application
-of request rate limiting across multiple dimensions. Some fixes have also been
-made to quota handling e.g. batch requests were not reserving the appropriate
-quota. Consult the corresponding PRs for more details.
-
-For the log RPC server APIs `GetLeavesByIndex` and `GetLeavesByRange` MySQL
-storage has been modified to return status codes that match CloudSpanner.
-Previously some requests with out of range parameters were receiving 5xx error
-status rather than 4xx when errors were mapped to the HTTP space by CTFE.
-
-The Kubernetes deployment scripts continue to evolve and improve.
-
-Commit
-[aef10347dba1bd86a0fcb152b47989d0b51ba1fa](https://api.github.com/repos/google/trillian/commits/aef10347dba1bd86a0fcb152b47989d0b51ba1fa)
-Download [zip](https://api.github.com/repos/google/trillian/zipball/v1.2.0)
-
-## v1.1.1 - CloudSpanner / Tracing / Health Checks
-
-Published 2018-05-08 12:55:34 +0000 UTC
-
-More improvements have been made to the CloudSpanner storage code. CloudSpanner
-storage has now been tested up to ~3.1 billion log entries.
-
-Explicit health checks have been added to the gRPC Log and Map servers (and the
-log signer). The HTTP endpoint must be enabled and the checks will serve on
-`/healthz` where a non 200 response means the server is unhealthy. The example
-Kubernetes deployment configuration has been updated to include them. Other
-improvements have been made to the Kubernetes deployment scripts and docs.
-
-The gRPC Log and Map servers have been instrumented for tracing with
-[OpenCensus](https://opencensus.io/). For GCP it just requires the `--tracing`
-flag to be added and results will be available in the GCP console under
-StackDriver -> Trace.
-
-Commit
-[3a68a845f0febdd36937c15f1d97a3a0f9509440](https://api.github.com/repos/google/trillian/commits/3a68a845f0febdd36937c15f1d97a3a0f9509440)
-Download [zip](https://api.github.com/repos/google/trillian/zipball/v1.1.1)
-
-## v1.1.0 - CloudSpanner Improvements & Log Root structure changes etc.
-
-Published 2018-04-17 08:02:50 +0000 UTC
-
-Changes are in progress (e.g. see #1037) to rework the internal signed root
-format used by the log RPC server to be more useful / interoperable. Currently
-they are mostly internal API changes to the log and map servers. However, the
-`signature` and `log_id` fields in SignedLogRoot have been deleted and users
-must unpack the serialized structure to access these now. This change is not
-backwards compatible.
-
-Changes have been made to log server APIs and CT frontends for when a request
-hits a server that has an earlier version of the tree than is needed to satisfy
-the request. In these cases the log server used to return an error but now
-returns an empty proof along with the current STH it has available. This allows
-clients to detect these cases and handle them appropriately.
-
-The CloudSpanner schema has changed. If you have a database instance you'll need
-to recreate it with the new schema. Performance has been noticeably improved
-since the previous release and we have tested it to approx one billion log
-entries. Note: This code is still being developed and further changes are
-possible.
-
-Support for `sqlite` in unit tests has been removed because of ongoing issues
-with flaky tests. These were caused by concurrent accesses to the same database,
-which it doesn't support. The use of `sqlite` in production has never been
-supported and it should not be used for this.
-
-Commit
-[9a5dc6223bab0e1061b66b49757c2418c47b9f29](https://api.github.com/repos/google/trillian/commits/9a5dc6223bab0e1061b66b49757c2418c47b9f29)
-Download [zip](https://api.github.com/repos/google/trillian/zipball/v1.1.0)
-
-## v1.0.8 - Docker Updates / Freezing Logs / CloudSpanner Options
-
-Published 2018-03-08 13:42:11 +0000 UTC
-
-The Docker image files have been updated and the database has been changed to
-`MariaDB 10.1`.
-
-A `ReadOnlyStaleness` option has been added to the experimental CloudSpanner
-storage. This allows for tuning that might increase performance in some
-scenarios by issuing read transactions with the `exact_staleness` option set
-rather than `strong_read`. For more details see the
-[CloudSpanner TransactionOptions](https://cloud.google.com/spanner/docs/reference/rest/v1/TransactionOptions)
-documentation.
-
-The `LogVerifier` interface has been removed from the log client, though the
-functionality is still available. It is unlikely that there were implementations
-by third-parties.
-
-A new `TreeState DRAINING` has been added for trees with `TreeType LOG`. This is
-to support logs being cleanly frozen. A log tree in this state will not accept
-new entries via `QueueLeaves` but will continue to integrate any that were
-previously queued. When the queue of pending entries has been emptied the tree
-can be set to the `FROZEN` state safely. For MySQL storage this requires a
-schema update to add `'DRAINING'` to the enum of valid states.
-
-A command line utility `updatetree` has been added to allow tree states to be
-changed. This is also to support cleanly freezing logs.
-
-A 'howto' document has been added that explains how to freeze a log tree using
-the features added in this release.
-
-Commit
-[0e6d950b872d19e42320f4714820f0fe793b9913](https://api.github.com/repos/google/trillian/commits/0e6d950b872d19e42320f4714820f0fe793b9913)
-Download [zip](https://api.github.com/repos/google/trillian/zipball/v1.0.8)
-
-## v1.0.7 - Storage API Changes, Schema Tweaks
-
-Published 2018-03-01 11:16:32 +0000 UTC
-
-Note: A large number of storage related API changes have been made in this
-release. These will probably only affect developers writing their own storage
-implementations.
-
-A new tree type `ORDERED_LOG` has been added for upcoming mirror support. This
-requires a schema change before it can be used. This change can be made when
-convenient and can be deferred until the functionality is available and needed.
-The definition of the `TreeType` column enum should be changed to `ENUM('LOG',
-'MAP', 'PREORDERED_LOG') NOT NULL`
-
-Some storage interfaces were removed in #977 as they only had one
-implementation. We think this won't cause any impact on third parties and are
-willing to reconsider this change if it does.
-
-The gRPC Log and Map server APIs have new methods `InitLog` and `InitMap` which
-prepare newly created trees for use. Attempting to use trees that have not been
-initialized will return the `FAILED_PRECONDITION` error
-`storage.ErrTreeNeedsInit`.
-
-The gRPC Log server API has new methods `AddSequencedLeaf` and
-`AddSequencedLeaves`. These are intended to support mirroring applications and
-are not yet implemented.
-
-Storage APIs have been added such as `ReadWriteTransaction` which allows the
-underlying storage to manage the transaction and optionally retry until success
-or timeout. This is a more natural fit for some types of storage API such as
-[CloudSpanner](https://cloud.google.com/spanner/docs/transactions) and possibly
-other environments with managed transactions.
-
-The older `BeginXXX` methods were removed from the APIs. It should be fairly
-easy to convert a custom storage implementation to the new API format as can be
-seen from the changes made to the MySQL storage.
-
-The `GetOpts` options are no longer used by storage. This fixed the strange
-situation of storage code having to pass manufactured dummy instances to
-`GetTree`, which was being called in all the layers involved in request
-processing. Various internal APIs were modified to take a `*trillian.Tree`
-instead of an `int64`.
-
-A new storage implementation has been added for CloudSpanner. This is currently
-experimental and does not yet support Map trees. We have also added Docker
-examples for running Trillian in Google Cloud with CloudSpanner.
-
-The maximum size of a `VARBINARY` column in MySQL is too small to properly
-support Map storage. The type has been changed in the schema to `MEDIUMBLOB`.
-This can be done in place with an `ALTER TABLE` command but this could very be
-slow for large databases as it is a change to the physical row layout. Note:
-There is no need to make this change to the database if you are only using it
-for Log storage e.g. for Certificate Transparency servers.
-
-The obsolete programs `queue_leaves` and `fetch_leaves` have been deleted.
-
-Commit
-[7d73671537ca2a4745dc94da3dc93d32d7ce91f1](https://api.github.com/repos/google/trillian/commits/7d73671537ca2a4745dc94da3dc93d32d7ce91f1)
-Download [zip](https://api.github.com/repos/google/trillian/zipball/v1.0.7)
-
-## v1.0.6 - GetLeavesByRange. 403 Permission Errors. Signer Metrics.
-
-Published 2018-02-05 16:00:26 +0000 UTC
-
-A new log server RPC API has been added to get leaves in a range. This is a more
-natural fit for CT type applications as it more closely follows the CT HTTP API.
-
-The server now returns 403 for permission denied where it used to return 500
-errors. This follows the behaviour of the C++ implementation.
-
-The log signer binary now reports metrics for the number it has signed and the
-number of errors that have occurred. This is intended to give more insight into
-the state of the queue and integration processing.
-
-Commit
-[b20b3109af7b68227c83c5d930271eaa4f0be771](https://api.github.com/repos/google/trillian/commits/b20b3109af7b68227c83c5d930271eaa4f0be771)
-Download [zip](https://api.github.com/repos/google/trillian/zipball/v1.0.6)
-
-## v1.0.5 - TLS, Merge Delay Metrics, Easier Admin Tests
-
-Published 2018-02-07 09:41:08 +0000 UTC
-
-The API protos have been rebuilt with gRPC 1.3.
-
-Timestamps have been added to the log leaves in the MySQL database. Before
-upgrading to this version you **must** make the following schema changes:
-
-* Add the following column to the `LeafData` table. If you have existing data
- in the queue you might have to remove the NOT NULL clause:
- `QueueTimestampNanos BIGINT NOT NULL`
-
-* Add the following column to the `SequencedLeafData` table:
- `IntegrateTimestampNanos BIGINT NOT NULL`
-
-The above timestamps are used to export metrics via monitoring that give the
-merge delay for each tree that is in use. This is a good metric to use for
-alerting on.
-
-The Log and Map RPC servers now support TLS.
-
-AdminServer tests have been improved.
-
-Commit
-[dec673baf984c3d22d7b314011d809258ec36821](https://api.github.com/repos/google/trillian/commits/dec673baf984c3d22d7b314011d809258ec36821)
-Download [zip](https://api.github.com/repos/google/trillian/zipball/v1.0.5)
-
-## v1.0.4 - Fix election issue. Large vendor updates.
-
-Published 2018-02-05 15:42:25 +0000 UTC
-
-An issue has been fixed where the master for a log could resign from the
-election while it was in the process of integrating a batch of leaves. We do not
-believe this could cause any issues with data integrity because of the versioned
-tree storage.
-
-This release includes a large number of vendor commits merged to catch up with
-etcd 3.2.10 and gRPC v1.3.
-
-Commit
-[1713865ecca0dc8f7b4a8ed830a48ae250fd943b](https://api.github.com/repos/google/trillian/commits/1713865ecca0dc8f7b4a8ed830a48ae250fd943b)
-Download [zip](https://api.github.com/repos/google/trillian/zipball/v1.0.4)
-
-## v1.0.3 - Auth API. Interceptor fixes. Request validation + More
-
-Published 2018-02-05 15:33:08 +0000 UTC
-
-An authorization API has been added to the interceptors. This is intended for
-future development and integration.
-
-Issues where the interceptor would not time out on `PutTokens` have been fixed.
-This should make the quota system more robust.
-
-A bug has been fixed where the interceptor did not pass the context deadline
-through to other requests it made. This would cause some failing requests to do
-so after longer than the deadline with a misleading reason in the log. It did
-not cause request failures if they would otherwise succeed.
-
-Metalinter has been added and the code has been cleaned up where appropriate.
-
-Docker and Kubernetes scripts have been available and images are now built with
-Go 1.9.
-
-Sqlite has been introduced for unit tests where possible. Note that it is not
-multi threaded and cannot support all our testing scenarios. We still require
-MySQL for integration tests. Please note that Sqlite **must not** be used for
-production deployments as RPC servers are multi threaded database clients.
-
-The Log RPC server now applies tighter validation to request parameters than
-before. It's possible that some requests will be rejected. This should not
-affect valid requests.
-
-The admin server will only create trees for the log type it is hosted in. For
-example the admin server running in the Log server will not create Map trees.
-This may be reviewed in future as applications can legitimately use both tree
-types.
-
-Commit
-[9d08b330ab4270a8e984072076c0b3e84eb4601b](https://api.github.com/repos/google/trillian/commits/9d08b330ab4270a8e984072076c0b3e84eb4601b)
-Download [zip](https://api.github.com/repos/google/trillian/zipball/v1.0.3)
-
-## v1.0.2 - TreeGC, Go 1.9, Update Private Keys.
-
-Published 2018-02-05 15:18:40 +0000 UTC
-
-Go 1.9 is required.
-
-It is now possible to update private keys via the admin API and this was added
-to the available field masks. The key storage format has not changed so we
-believe this change is transparent.
-
-Deleted trees are now garbage collected after an interval. This hard deletes
-them and they cannot be recovered. Be aware of this before upgrading if you have
-any that are in a soft deleted state.
-
-The Admin RPC API has been extended to allow trees to be undeleted - up to the
-point where they are hard deleted as set out above.
-
-Commit
-[442511ad82108654033c9daa4e72f8a79691dd32](https://api.github.com/repos/google/trillian/commits/442511ad82108654033c9daa4e72f8a79691dd32)
-Download [zip](https://api.github.com/repos/google/trillian/zipball/v1.0.2)
-
-## v1.0.1 - Batched Queue Option Added
-
-Published 2018-02-05 14:49:33 +0000 UTC
-
-Apart from fixes this release includes the option for a batched queue. This has
-been reported to allow faster sequencing but is not enabled by default.
-
-If you want to switch to this you must build the code with the `--tags
-batched_queue` option. You must then also apply a schema change if you are
-running with a previous version of the database. Add the following column to the
-`Unsequenced` table:
-
-`QueueID VARBINARY(32) DEFAULT NULL`
-
-If you don't plan to switch to the `batched_queue` mode then you don't need to
-make the above change.
-
-Commit
-[afd178f85c963f56ad2ae7d4721d139b1d6050b4](https://api.github.com/repos/google/trillian/commits/afd178f85c963f56ad2ae7d4721d139b1d6050b4)
-Download [zip](https://api.github.com/repos/google/trillian/zipball/v1.0.1)
-
-## v1.0 - First Log version we believe was ready for use. To support CT.
-
-Published 2018-02-05 13:51:55 +0000 UTC
-
-Quota metrics published. Quota admin api + server implemented. Improvements to
-local / AWS deployment. Map fixes and further development. ECDSA key handling
-improvements. Key factory improvements. Code coverage added. Quota integration
-test added. Etcd quota support in log and map connected up. Incompatibility with
-C++ code fixed where consistency proof requests for first == second == 0 were
-rejected.
-
-Commit
-[a6546d092307f6e0d396068066033b434203824d](https://api.github.com/repos/google/trillian/commits/a6546d092307f6e0d396068066033b434203824d)
-Download [zip](https://api.github.com/repos/google/trillian/zipball/v1.0)
diff --git a/vendor/github.com/google/trillian/CODEOWNERS b/vendor/github.com/google/trillian/CODEOWNERS
deleted file mode 100644
index 48985d872..000000000
--- a/vendor/github.com/google/trillian/CODEOWNERS
+++ /dev/null
@@ -1,21 +0,0 @@
-# See https://help.github.com/articles/about-codeowners/
-# for more info about CODEOWNERS file
-
-# It uses the same pattern rule for gitignore file
-# https://git-scm.com/docs/gitignore#_pattern_format
-#
-# These owners will be the default owners for everything in
-# the repo. Unless a later match takes precedence,
-# @google/trillian-team will be requested for
-# review when someone opens a pull request.
-* @google/trillian-team
-
-/*.proto @mhutchinson @AlCutter @pphaneuf
-/storage/storagepb/storage.proto @mhutchinson @AlCutter @pphaneuf
-
-# Mitigation for https://github.com/google/trillian/issues/1297
-# Folks to watch out for hanges to DB schemas and ensure that
-# there's a note added in a sensible location about how to
-# upgrade schema instances.
-/storage/mysql/schema/* @mhutchinson @AlCutter @pphaneuf
-/storage/cloudspanner/spanner.sdl @mhutchinson @AlCutter @pphaneuf
diff --git a/vendor/github.com/google/trillian/CONTRIBUTING.md b/vendor/github.com/google/trillian/CONTRIBUTING.md
deleted file mode 100644
index 43de4c9d4..000000000
--- a/vendor/github.com/google/trillian/CONTRIBUTING.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# How to contribute #
-
-We'd love to accept your patches and contributions to this project. There are
-a just a few small guidelines you need to follow.
-
-
-## Contributor License Agreement ##
-
-Contributions to any Google project must be accompanied by a Contributor
-License Agreement. This is not a copyright **assignment**, it simply gives
-Google permission to use and redistribute your contributions as part of the
-project.
-
- * If you are an individual writing original source code and you're sure you
- own the intellectual property, then you'll need to sign an [individual
- CLA][].
-
- * If you work for a company that wants to allow you to contribute your work,
- then you'll need to sign a [corporate CLA][].
-
-You generally only need to submit a CLA once, so if you've already submitted
-one (even if it was for a different project), you probably don't need to do it
-again.
-
-[individual CLA]: https://developers.google.com/open-source/cla/individual
-[corporate CLA]: https://developers.google.com/open-source/cla/corporate
-
-Once your CLA is submitted (or if you already submitted one for
-another Google project), make a commit adding yourself to the
-[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
-of your first [pull request][].
-
-[AUTHORS]: AUTHORS
-[CONTRIBUTORS]: CONTRIBUTORS
-
-
-## Submitting a patch ##
-
- 1. It's generally best to start by opening a new issue describing the bug or
- feature you're intending to fix. Even if you think it's relatively minor,
- it's helpful to know what people are working on. Mention in the initial
- issue that you are planning to work on that bug or feature so that it can
- be assigned to you.
-
- 1. Follow the normal process of [forking][] the project, and setup a new
- branch to work in. It's important that each group of changes be done in
- separate branches in order to ensure that a pull request only includes the
- commits related to that bug or feature.
-
- 1. Do your best to have [well-formed commit messages][] for each change.
- This provides consistency throughout the project, and ensures that commit
- messages are able to be formatted properly by various git tools.
-
- 1. Finally, push the commits to your fork and submit a [pull request][].
-
-[forking]: https://help.github.com/articles/fork-a-repo
-[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
-[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/vendor/github.com/google/trillian/CONTRIBUTORS b/vendor/github.com/google/trillian/CONTRIBUTORS
deleted file mode 100644
index 680a58b98..000000000
--- a/vendor/github.com/google/trillian/CONTRIBUTORS
+++ /dev/null
@@ -1,39 +0,0 @@
-# People who have agreed to one of the CLAs and can contribute patches.
-# The AUTHORS file lists the copyright holders; this file
-# lists people. For example, Google employees are listed here
-# but not in AUTHORS, because Google holds the copyright.
-#
-# Names should be added to this file only after verifying that
-# the individual or the individual's organization has agreed to
-# the appropriate Contributor License Agreement, found here:
-#
-# https://developers.google.com/open-source/cla/individual
-# https://developers.google.com/open-source/cla/corporate
-#
-# The agreement for individuals can be filled out on the web.
-#
-# When adding J Random Contributor's name to this file,
-# either J's name or J's organization's name should be
-# added to the AUTHORS file, depending on whether the
-# individual or corporate CLA was used.
-#
-# Names should be added to this file as:
-# Name <email address>
-#
-# Please keep the list sorted.
-
-Al Cutter <al@google.com> <al@9600.org>
-Alan Parra <alanparra@google.com>
-Antonio Marcedone <a.marcedone@gmail.com>
-Ben Laurie <benl@google.com> <ben@links.org>
-David Drysdale <drysdale@google.com>
-Gary Belvin <gbelvin@google.com>
-Roland Shoemaker <roland@letsencrypt.org>
-Martin Smith <mhs@google.com>
-Martin Hutchinson <mhutchinson@google.com> <mhutchinson@gmail.com>
-Paul Hadfield <hadfieldp@google.com> <paul@phad.org.uk>
-Pavel Kalinnikov <pkalinnikov@google.com> <pavelkalinnikov@gmail.com>
-Pierre Phaneuf <pphaneuf@google.com> <pphaneuf@gmail.com>
-Rob Percival <robpercival@google.com>
-Roger Ng <rogerng@google.com> <roger2hk@gmail.com>
-Vishal Kuo <vishalkuo@gmail.com>
diff --git a/vendor/github.com/google/trillian/LICENSE b/vendor/github.com/google/trillian/LICENSE
deleted file mode 100644
index d64569567..000000000
--- a/vendor/github.com/google/trillian/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/google/trillian/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/google/trillian/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index f72f59ff8..000000000
--- a/vendor/github.com/google/trillian/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,15 +0,0 @@
-<!---
-Describe your changes in detail here.
-If this fixes an issue, please write "Fixes #123", substituting the issue number.
--->
-
-### Checklist
-
-<!---
-Go over all the following points, and put an `x` in all the boxes that apply.
-Feel free to not tick any boxes that don't apply to this PR (e.g. refactoring may not need a CHANGELOG update).
-If you're unsure about any of these, don't hesitate to ask. We're here to help!
--->
-
-- [ ] I have updated the [CHANGELOG](CHANGELOG.md).
-- [ ] I have updated [documentation](docs/) accordingly (including the [feature implementation matrix](docs/Feature_Implementation_Matrix.md)).
diff --git a/vendor/github.com/google/trillian/README.md b/vendor/github.com/google/trillian/README.md
deleted file mode 100644
index 9ebb3a2de..000000000
--- a/vendor/github.com/google/trillian/README.md
+++ /dev/null
@@ -1,318 +0,0 @@
-# Trillian: General Transparency
-
-[![Go Report Card](https://goreportcard.com/badge/github.com/google/trillian)](https://goreportcard.com/report/github.com/google/trillian)
-[![codecov](https://codecov.io/gh/google/trillian/branch/master/graph/badge.svg?token=QwofUwmvAs)](https://codecov.io/gh/google/trillian)
-[![GoDoc](https://godoc.org/github.com/google/trillian?status.svg)](https://godoc.org/github.com/google/trillian)
-[![Slack Status](https://img.shields.io/badge/Slack-Chat-blue.svg)](https://gtrillian.slack.com/)
-
- - [Overview](#overview)
- - [Support](#support)
- - [Using the Code](#using-the-code)
- - [MySQL Setup](#mysql-setup)
- - [Integration Tests](#integration-tests)
- - [Working on the Code](#working-on-the-code)
- - [Rebuilding Generated Code](#rebuilding-generated-code)
- - [Updating Dependencies](#updating-dependencies)
- - [Running Codebase Checks](#running-codebase-checks)
- - [Design](#design)
- - [Design Overview](#design-overview)
- - [Personalities](#personalities)
- - [Log Mode](#log-mode)
- - [Use Cases](#use-cases)
- - [Certificate Transparency Log](#certificate-transparency-log)
-
-
-## Overview
-
-Trillian is an implementation of the concepts described in the
-[Verifiable Data Structures](docs/papers/VerifiableDataStructures.pdf) white paper,
-which in turn is an extension and generalisation of the ideas which underpin
-[Certificate Transparency](https://certificate-transparency.org).
-
-Trillian implements a [Merkle tree](https://en.wikipedia.org/wiki/Merkle_tree)
-whose contents are served from a data storage layer, to allow scalability to
-extremely large trees. On top of this Merkle tree, Trillian provides the
-following:
-
- - An append-only **Log** mode, analogous to the original
- [Certificate Transparency](https://certificate-transparency.org) logs. In
- this mode, the Merkle tree is effectively filled up from the left, giving a
- *dense* Merkle tree.
-
-Note that Trillian requires particular applications to provide their own
-[personalities](#personalities) on top of the core transparent data store
-functionality.
-
-[Certificate Transparency (CT)](https://tools.ietf.org/html/rfc6962)
-is the most well-known and widely deployed transparency application, and an implementation of CT as a Trillian personality is available in the
-[certificate-transparency-go repo](https://github.com/google/certificate-transparency-go/blob/master/trillian).
-
-Other examples of Trillian personalities are available in the
-[trillian-examples](https://github.com/google/trillian-examples) repo.
-
-
-## Support
-
-- Mailing list: https://groups.google.com/forum/#!forum/trillian-transparency
-- Slack: https://gtrillian.slack.com/ ([invitation](https://join.slack.com/t/gtrillian/shared_invite/enQtNDM3NTE3NjA4NDcwLTMwYzVlMDUxMDQ2MGU5MjcyZGIxMmVmZGNlNzdhMzRlOGFjMWJkNzc0MGY1Y2QyNWQyMWM4NzJlOGMxNTZkZGU))
-
-
-## Using the Code
-
-The Trillian codebase is stable and is used in production by multiple
-organizations, including many large-scale
-[Certificate Transparency](https://certificate.transparency.dev) log
-operators.
-
-Given this, we do not plan to add any new features to this version of Trillian,
-and will try to avoid any further incompatible code and schema changes but
-cannot guarantee that they will never be necessary.
-
-The current state of feature implementation is recorded in the
-[Feature implementation matrix](docs/Feature_Implementation_Matrix.md).
-
-To build and test Trillian you need:
-
- - Go 1.17 or later (go 1.17 matches cloudbuild, and is preferred for developers
- that will be submitting PRs to this project).
-
-To run many of the tests (and production deployment) you need:
-
- - [MySQL](https://www.mysql.com/) or [MariaDB](https://mariadb.org/) to provide
- the data storage layer; see the [MySQL Setup](#mysql-setup) section.
-
-Note that this repository uses Go modules to manage dependencies; Go will fetch
-and install them automatically upon build/test.
-
-To fetch the code, dependencies, and build Trillian, run the following:
-
-```bash
-git clone https://github.com/google/trillian.git
-cd trillian
-
-go build ./...
-```
-
-To build and run tests, use:
-
-```bash
-go test ./...
-```
-
-
-The repository also includes multi-process integration tests, described in the
-[Integration Tests](#integration-tests) section below.
-
-
-### MySQL Setup
-
-To run Trillian's integration tests you need to have an instance of MySQL
-running and configured to:
-
- - listen on the standard MySQL port 3306 (so `mysql --host=127.0.0.1
- --port=3306` connects OK)
- - not require a password for the `root` user
-
-You can then set up the [expected tables](storage/mysql/schema/storage.sql) in a
-`test` database like so:
-
-```bash
-./scripts/resetdb.sh
-Warning: about to destroy and reset database 'test'
-Are you sure? y
-> Resetting DB...
-> Reset Complete
-```
-
-### Integration Tests
-
-Trillian includes an integration test suite to confirm basic end-to-end
-functionality, which can be run with:
-
-```bash
-./integration/integration_test.sh
-```
-
-This runs a multi-process test:
-
- - A [test](integration/log_integration_test.go) that starts a Trillian server
- in Log mode, together with a signer, logs many leaves, and checks they are
- integrated correctly.
-
-### Deployment
-
-You can find instructions on how to deploy Trillian in [deployment](/deployment)
-and [examples/deployment](/examples/deployment) directories.
-
-## Working on the Code
-
-Developers who want to make changes to the Trillian codebase need some
-additional dependencies and tools, described in the following sections. The
-[Cloud Build configuration](cloudbuild.yaml) and the scripts it depends on are
-also a useful reference for the required tools and scripts, as it may be more
-up-to-date than this document.
-
-### Rebuilding Generated Code
-
-Some of the Trillian Go code is autogenerated from other files:
-
- - [gRPC](http://www.grpc.io/) message structures are originally provided as
- [protocol buffer](https://developers.google.com/protocol-buffers/) message
- definitions. See also, https://grpc.io/docs/protoc-installation/.
- - Some unit tests use mock implementations of interfaces; these are created
- from the real implementations by [GoMock](https://github.com/golang/mock).
- - Some enums have string-conversion methods (satisfying the `fmt.Stringer`
- interface) created using the
- [stringer](https://godoc.org/golang.org/x/tools/cmd/stringer) tool (`go get
- golang.org/x/tools/cmd/stringer`).
-
-Re-generating mock or protobuffer files is only needed if you're changing
-the original files; if you do, you'll need to install the prerequisites:
-
- - a series of tools, using `go install` to ensure that the versions are
- compatible and tested:
-
- ```
- cd $(go list -f '{{ .Dir }}' github.com/google/trillian); \
- go install github.com/golang/mock/mockgen; \
- go install google.golang.org/protobuf/proto; \
- go install google.golang.org/protobuf/cmd/protoc-gen-go; \
- go install google.golang.org/grpc/cmd/protoc-gen-go-grpc; \
- go install github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc; \
- go install golang.org/x/tools/cmd/stringer
- ```
-
-and run the following:
-
-```bash
-go generate -x ./... # hunts for //go:generate comments and runs them
-```
-
-### Updating Dependencies
-
-The Trillian codebase uses go.mod to declare fixed versions of its dependencies.
-With Go modules, updating a dependency simply involves running `go get`:
-```
-export GO111MODULE=on
-go get package/path # Fetch the latest published version
-go get package/path@X.Y.Z # Fetch a specific published version
-go get package/path@HEAD # Fetch the latest commit
-```
-
-To update ALL dependencies to the latest version run `go get -u`.
-Be warned however, that this may undo any selected versions that resolve issues in other non-module repos.
-
-While running `go build` and `go test`, go will add any ambiguous transitive dependencies to `go.mod`
-To clean these up run:
-```
-go mod tidy
-```
-
-### Running Codebase Checks
-
-The [`scripts/presubmit.sh`](scripts/presubmit.sh) script runs various tools
-and tests over the codebase.
-
-#### Install [golangci-lint](https://github.com/golangci/golangci-lint#local-installation).
-```bash
-go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.47.3
-```
-
-#### Run code generation, build, test and linters
-```bash
-./scripts/presubmit.sh
-```
-
-#### Or just run the linters alone
-```bash
-golangci-lint run
-```
-
-
-## Design
-
-### Design Overview
-
-Trillian is primarily implemented as a
-[gRPC service](http://www.grpc.io/docs/guides/concepts.html#service-definition);
-this service receives get/set requests over gRPC and retrieves the corresponding
-Merkle tree data from a separate storage layer (currently using MySQL), ensuring
-that the cryptographic properties of the tree are preserved along the way.
-
-The Trillian service is multi-tenanted – a single Trillian installation can
-support multiple Merkle trees in parallel, distinguished by their `TreeId` – and
-each tree operates in one of two modes:
-
- - **Log** mode: an append-only collection of items; this has two sub-modes:
- - normal Log mode, where the Trillian service assigns sequence numbers to
- new tree entries as they arrive
- - 'preordered' Log mode, where the unique sequence number for entries in
- the Merkle tree is externally specified
-
-In either case, Trillian's key transparency property is that cryptographic
-proofs of inclusion/consistency are available for data items added to the
-service.
-
-
-### Personalities
-
-To build a complete transparent application, the Trillian core service needs
-to be paired with additional code, known as a *personality*, that provides
-functionality that is specific to the particular application.
-
-In particular, the personality is responsible for:
-
- * **Admission Criteria** – ensuring that submissions comply with the
- overall purpose of the application.
- * **Canonicalization** – ensuring that equivalent versions of the same
- data get the same canonical identifier, so they can be de-duplicated by
- the Trillian core service.
- * **External Interface** – providing an API for external users,
- including any practical constraints (ACLs, load-balancing, DoS protection,
- etc.)
-
-This is
-[described in more detail in a separate document](docs/Personalities.md).
-General
-[design considerations for transparent Log applications](docs/TransparentLogging.md)
-are also discussed separately.
-
-### Log Mode
-
-When running in Log mode, Trillian provides a gRPC API whose operations are
-similar to those available for Certificate Transparency logs
-(cf. [RFC 6962](https://tools.ietf.org/html/6962)). These include:
-
- - `GetLatestSignedLogRoot` returns information about the current root of the
- Merkle tree for the log, including the tree size, hash value, timestamp and
- signature.
- - `GetLeavesByRange` returns leaf information for particular leaves,
- specified by their index in the log.
- - `QueueLeaf` requests inclusion of the specified item into the log.
- - For a pre-ordered log, `AddSequencedLeaves` requests the inclusion of
- specified items into the log at specified places in the tree.
- - `GetInclusionProof`, `GetInclusionProofByHash` and `GetConsistencyProof`
- return inclusion and consistency proof data.
-
-In Log mode (whether normal or pre-ordered), Trillian includes an additional
-Signer component; this component periodically processes pending items and
-adds them to the Merkle tree, creating a new signed tree head as a result.
-
-![Log components](docs/images/LogDesign.png)
-
-(Note that each of the components in this diagram can be
-[distributed](https://github.com/google/certificate-transparency-go/blob/master/trillian/docs/ManualDeployment.md#distribution),
-for scalability and resilience.)
-
-
-Use Cases
----------
-
-### Certificate Transparency Log
-
-The most obvious application for Trillian in Log mode is to provide a
-Certificate Transparency (RFC 6962) Log. To do this, the CT Log personality
-needs to include all of the certificate-specific processing – in particular,
-checking that an item that has been suggested for inclusion is indeed a valid
-certificate that chains to an accepted root.
-
diff --git a/vendor/github.com/google/trillian/cloudbuild.yaml b/vendor/github.com/google/trillian/cloudbuild.yaml
deleted file mode 100644
index b1ee8f780..000000000
--- a/vendor/github.com/google/trillian/cloudbuild.yaml
+++ /dev/null
@@ -1,187 +0,0 @@
-# This file contains Google Cloud Build configuration for presubmit checks, unit
-# and integration tests, triggered by pull requests and commits to branches.
-
-timeout: 1800s
-substitutions:
- _CODECOV_TOKEN: "" # The auth token for uploading coverage to Codecov.
-options:
- machineType: E2_HIGHCPU_32
- volumes:
- # A shared volume for caching Go modules between steps.
- - name: go-modules
- path: /go
- env:
- - GO111MODULE=on
- - GOPATH=/go
- - GOLANG_PROTOBUF_REGISTRATION_CONFLICT=ignore # Temporary work-around v1.proto already registered error.
- - DOCKER_CLIENT_TIMEOUT=120
- - COMPOSE_HTTP_TIMEOUT=120
-
-# Cache the testbase image in Container Regisrty, to be reused by subsequent
-# builds. The technique is described here:
-# https://cloud.google.com/cloud-build/docs/speeding-up-builds#using_a_cached_docker_image
-#
-# TODO(pavelkalinnikov): Consider pushing this image only on commits to master.
-images: ['gcr.io/$PROJECT_ID/trillian_testbase:latest']
-
-# Cloud Build logs sent to GCS bucket
-logsBucket: 'gs://trillian-cloudbuild-logs'
-
-steps:
-
-# Try to pull the testbase image from Container Registry.
-- name: 'gcr.io/cloud-builders/docker'
- entrypoint: 'bash'
- args: ['-c', 'docker pull gcr.io/$PROJECT_ID/trillian_testbase:latest || exit 0']
-# Build the testbase image reusing as much of the cached image as possible.
-- name: 'gcr.io/cloud-builders/docker'
- args: [
- 'build',
- '-t', 'gcr.io/$PROJECT_ID/trillian_testbase:latest',
- '--cache-from', 'gcr.io/$PROJECT_ID/trillian_testbase:latest',
- '-f', './integration/cloudbuild/testbase/Dockerfile',
- '.'
- ]
-
-# Set up tools and any other common steps which should not be part of Docker image.
-- id: prepare
- name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
- entrypoint: ./integration/cloudbuild/prepare.sh
-
-# Run lint and porcelain checks, make sure the diff is empty and no files need
-# to be updated. This includes gofmt, golangci-linter, go mod tidy, go mod
-# generate and a few more.
-- id: lint
- name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
- entrypoint: ./scripts/presubmit.sh
- args:
- - --no-build
- - --fix
- - --no-mod-tidy
- - --empty-diff
- waitFor:
- - prepare
-
-# Presubmit
-- id: presubmit
- name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
- entrypoint: ./integration/cloudbuild/run_presubmit.sh
- args:
- - --no-linters
- - --no-generate
- env:
- - GOFLAGS=-race
- - GO_TEST_TIMEOUT=20m
- waitFor:
- - lint
-
-# Codecov
-- id: codecov
- name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
- entrypoint: ./integration/cloudbuild/run_presubmit.sh
- args:
- - --coverage
- - --no-linters
- - --no-generate
- env:
- - GOFLAGS=-race
- - GO_TEST_TIMEOUT=20m
- - CODECOV_TOKEN=${_CODECOV_TOKEN}
- waitFor:
- - lint
-
-# Presubmit (Batched queue)
-- id: presubmit_batched
- name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
- entrypoint: ./integration/cloudbuild/run_presubmit.sh
- args:
- - --no-linters
- - --no-generate
- env:
- - GOFLAGS=-race --tags=batched_queue
- - GO_TEST_TIMEOUT=20m
- waitFor:
- - lint
-
-# Presubmit (PKCS11)
-- id: presubmit_pkcs11
- name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
- entrypoint: ./integration/cloudbuild/run_presubmit.sh
- args:
- - --no-linters
- - --no-generate
- env:
- - GOFLAGS=-race --tags=pkcs11
- - GO_TEST_TIMEOUT=20m
- waitFor:
- - lint
-
-# Try to spread the load a bit, we'll wait for all the presubmit.* steps
-# to finish before starting the integration.* ones.
-# Having too many "big" things running concurrently leads to problems
-# with timeouts and mysql issues.
-- id: presubmits_done
- name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
- entrypoint: /bin/true
- waitFor:
- - codecov
- - presubmit
- - presubmit_batched
- - presubmit_pkcs11
-
-# Integration
-- id: integration
- name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
- entrypoint: ./integration/cloudbuild/run_integration.sh
- env:
- - GO_TEST_TIMEOUT=20m
- waitFor:
- - presubmits_done
-
-# Integration (Docker)
-- id: integration_docker
- name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
- entrypoint: ./integration/docker_compose_integration_test.sh
- waitFor:
- - presubmits_done
-
-# Integration (etcd)
-- id: integration_etcd
- name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
- entrypoint: ./integration/cloudbuild/run_integration.sh
- env:
- - ETCD_DIR=/go/bin
- - GOFLAGS=-race
- - GO_TEST_TIMEOUT=20m
- waitFor:
- - presubmits_done
-
-# Integration (Batched queue)
-- id: integration_batched
- name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
- entrypoint: ./integration/cloudbuild/run_integration.sh
- env:
- - GOFLAGS=-race -tags=batched_queue
- - GO_TEST_TIMEOUT=20m
- waitFor:
- - presubmits_done
-
-# Integration (PKCS11)
-- id: integration_pkcs11
- name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
- entrypoint: ./integration/cloudbuild/run_integration.sh
- env:
- - GOFLAGS=-race -tags=pkcs11
- - GO_TEST_TIMEOUT=20m
- waitFor:
- - presubmits_done
-
-# Integration (MariaDB)
-- id: integration_mariadb
- name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
- entrypoint: ./integration/cloudbuild/run_integration.sh
- env:
- - GO_TEST_TIMEOUT=20m
- - MYSQLD_IMAGE=mariadb:10.3
- waitFor:
- - presubmits_done
diff --git a/vendor/github.com/google/trillian/cloudbuild_master.yaml b/vendor/github.com/google/trillian/cloudbuild_master.yaml
deleted file mode 100644
index 751478890..000000000
--- a/vendor/github.com/google/trillian/cloudbuild_master.yaml
+++ /dev/null
@@ -1,165 +0,0 @@
-timeout: 1800s
-substitutions:
- _CLUSTER_NAME: trillian-opensource-ci
- _MASTER_ZONE: us-central1-a
- _MYSQL_TAG: "5.7"
- _MYSQL_ROOT_PASSWORD: ""
- _MYSQL_PASSWORD: ""
-options:
- machineType: E2_HIGHCPU_32
-steps:
-- id: pull_mysql
- name : gcr.io/cloud-builders/docker
- args:
- - pull
- - marketplace.gcr.io/google/mysql5:${_MYSQL_TAG}
-- id: tag_mysql
- name: gcr.io/cloud-builders/docker
- args:
- - tag
- - marketplace.gcr.io/google/mysql5:${_MYSQL_TAG}
- - gcr.io/${PROJECT_ID}/mysql5:${_MYSQL_TAG}
- waitFor:
- - pull_mysql
-- id: push_mysql
- name: gcr.io/cloud-builders/docker
- args:
- - push
- - gcr.io/${PROJECT_ID}/mysql5:${_MYSQL_TAG}
- waitFor:
- - tag_mysql
-- id: build_db_server
- name: gcr.io/kaniko-project/executor:v1.6.0
- args:
- - --dockerfile=examples/deployment/docker/db_server/Dockerfile
- - --destination=gcr.io/${PROJECT_ID}/db_server:${COMMIT_SHA}
- - --destination=gcr.io/${PROJECT_ID}/db_server:latest
- - --cache=true
- - --cache-dir= # Cache is in Google Container Registry
- waitFor:
- - push_mysql
-- id: build_log_server
- name: gcr.io/kaniko-project/executor:v1.6.0
- args:
- - --dockerfile=examples/deployment/docker/log_server/Dockerfile
- - --destination=gcr.io/${PROJECT_ID}/log_server:${COMMIT_SHA}
- - --destination=gcr.io/${PROJECT_ID}/log_server:latest
- - --cache=true
- - --cache-dir= # Cache is in Google Container Registry
- waitFor: ["-"]
-- id: build_log_signer
- name: gcr.io/kaniko-project/executor:v1.6.0
- args:
- - --dockerfile=examples/deployment/docker/log_signer/Dockerfile
- - --destination=gcr.io/${PROJECT_ID}/log_signer:${COMMIT_SHA}
- - --destination=gcr.io/${PROJECT_ID}/log_signer:latest
- - --cache=true
- - --cache-dir= # Cache is in Google Container Registry
- waitFor: ["-"]
-- id: build_envsubst
- name: gcr.io/cloud-builders/docker
- args:
- - build
- - examples/deployment/docker/envsubst
- - -t
- - envsubst
- waitFor: ["-"]
-# etcd-operator requires that a ClusterRole has been created for it already.
-# Do this manually using examples/deployment/kubernetes/etcd-role*.yaml.
-- id: apply_k8s_cfgs_for_clusterwide_etcd_operator
- name: gcr.io/cloud-builders/kubectl
- args:
- - apply
- - -f=examples/deployment/kubernetes/etcd-deployment.yaml
- env:
- - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
- - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
- waitFor: ["-"]
-- id: copy_k8s_cfgs_for_spanner
- name: busybox
- entrypoint: cp
- args:
- - -r
- - examples/deployment/kubernetes/
- - envsubst-spanner/
- waitFor: ['-']
-- id: envsubst_k8s_cfgs_for_spanner
- name: envsubst
- args:
- - envsubst-spanner/etcd-cluster.yaml
- - envsubst-spanner/trillian-ci-spanner.yaml
- - envsubst-spanner/trillian-log-deployment.yaml
- - envsubst-spanner/trillian-log-service.yaml
- - envsubst-spanner/trillian-log-signer-deployment.yaml
- - envsubst-spanner/trillian-log-signer-service.yaml
- env:
- - PROJECT_ID=${PROJECT_ID}
- - IMAGE_TAG=${COMMIT_SHA}
- waitFor:
- - build_envsubst
- - copy_k8s_cfgs_for_spanner
-- id: apply_k8s_cfgs_for_spanner
- name: gcr.io/cloud-builders/kubectl
- args:
- - apply
- - -f=envsubst-spanner/etcd-cluster.yaml
- - -f=envsubst-spanner/trillian-ci-spanner.yaml
- - -f=envsubst-spanner/trillian-log-deployment.yaml
- - -f=envsubst-spanner/trillian-log-service.yaml
- - -f=envsubst-spanner/trillian-log-signer-deployment.yaml
- - -f=envsubst-spanner/trillian-log-signer-service.yaml
- env:
- - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
- - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
- waitFor:
- - envsubst_k8s_cfgs_for_spanner
- - build_log_server
- - build_log_signer
-- id: copy_k8s_cfgs_for_mysql
- name: busybox
- entrypoint: cp
- args:
- - -r
- - examples/deployment/kubernetes/
- - envsubst-mysql/
- waitFor: ['-']
-- id: envsubst_k8s_cfgs_for_mysql
- name: envsubst
- args:
- - envsubst-mysql/etcd-cluster.yaml
- - envsubst-mysql/trillian-ci-mysql.yaml
- - envsubst-mysql/trillian-mysql.yaml
- - envsubst-mysql/trillian-log-deployment.yaml
- - envsubst-mysql/trillian-log-service.yaml
- - envsubst-mysql/trillian-log-signer-deployment.yaml
- - envsubst-mysql/trillian-log-signer-service.yaml
- env:
- - PROJECT_ID=${PROJECT_ID}
- - IMAGE_TAG=${COMMIT_SHA}
- - MYSQL_ROOT_PASSWORD=${_MYSQL_ROOT_PASSWORD}
- - MYSQL_USER=trillian
- - MYSQL_PASSWORD=${_MYSQL_PASSWORD}
- - MYSQL_DATABASE=trillian
- waitFor:
- - build_envsubst
- - copy_k8s_cfgs_for_mysql
-- id: apply_k8s_cfgs_for_mysql
- name: gcr.io/cloud-builders/kubectl
- args:
- - apply
- - --namespace=mysql
- - -f=envsubst-mysql/etcd-cluster.yaml
- - -f=envsubst-mysql/trillian-ci-mysql.yaml
- - -f=envsubst-mysql/trillian-mysql.yaml
- - -f=envsubst-mysql/trillian-log-deployment.yaml
- - -f=envsubst-mysql/trillian-log-service.yaml
- - -f=envsubst-mysql/trillian-log-signer-deployment.yaml
- - -f=envsubst-mysql/trillian-log-signer-service.yaml
- env:
- - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
- - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
- waitFor:
- - envsubst_k8s_cfgs_for_mysql
- - build_db_server
- - build_log_server
- - build_log_signer
diff --git a/vendor/github.com/google/trillian/cloudbuild_pr.yaml b/vendor/github.com/google/trillian/cloudbuild_pr.yaml
deleted file mode 100644
index 2309e5615..000000000
--- a/vendor/github.com/google/trillian/cloudbuild_pr.yaml
+++ /dev/null
@@ -1,175 +0,0 @@
-# This file contains configuration for Cloud Builds triggered by pull requests
-# to this repository.
-
-timeout: 1800s
-substitutions:
- _CLUSTER_NAME: trillian-opensource-ci
- _MASTER_ZONE: us-central1-a
- _MYSQL_TAG: "5.7"
- _MYSQL_ROOT_PASSWORD: ""
- _MYSQL_PASSWORD: ""
-options:
- machineType: E2_HIGHCPU_32
-
-steps:
-
-- id: pull_mysql
- name : gcr.io/cloud-builders/docker
- args:
- - pull
- - marketplace.gcr.io/google/mysql5:${_MYSQL_TAG}
-- id: tag_mysql
- name: gcr.io/cloud-builders/docker
- args:
- - tag
- - marketplace.gcr.io/google/mysql5:${_MYSQL_TAG}
- - gcr.io/${PROJECT_ID}/mysql5:${_MYSQL_TAG}
- waitFor:
- - pull_mysql
-- id: push_mysql
- name: gcr.io/cloud-builders/docker
- args:
- - push
- - gcr.io/${PROJECT_ID}/mysql5:${_MYSQL_TAG}
- waitFor:
- - tag_mysql
-
-- id: build_db_server
- name: gcr.io/kaniko-project/executor:v1.6.0
- args:
- - --dockerfile=examples/deployment/docker/db_server/Dockerfile
- - --destination=gcr.io/${PROJECT_ID}/db_server:${COMMIT_SHA}
- - --cache=true
- - --cache-dir= # Cache is in Google Container Registry.
- waitFor:
- - push_mysql
-
-- id: build_log_server
- name: gcr.io/kaniko-project/executor:v1.6.0
- args:
- - --dockerfile=examples/deployment/docker/log_server/Dockerfile
- - --destination=gcr.io/${PROJECT_ID}/log_server:${COMMIT_SHA}
- - --cache=true
- - --cache-dir= # Cache is in Google Container Registry
- waitFor: ['-']
-- id: build_log_signer
- name: gcr.io/kaniko-project/executor:v1.6.0
- args:
- - --dockerfile=examples/deployment/docker/log_signer/Dockerfile
- - --destination=gcr.io/${PROJECT_ID}/log_signer:${COMMIT_SHA}
- - --cache=true
- - --cache-dir= # Cache is in Google Container Registry
- waitFor: ['-']
-
-- id: build_envsubst
- name: gcr.io/cloud-builders/docker
- args:
- - build
- - examples/deployment/docker/envsubst
- - -t
- - envsubst
- waitFor: ["-"]
-- id: apply_k8s_cfgs_for_clusterwide_etcd_operator_dryrun
- name: gcr.io/cloud-builders/kubectl
- args:
- - apply
- - --dry-run=server
- - -f=examples/deployment/kubernetes/etcd-deployment.yaml
- env:
- - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
- - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
- waitFor: ['-']
-- id: copy_k8s_cfgs_for_spanner
- name: busybox
- entrypoint: cp
- args:
- - -r
- - examples/deployment/kubernetes/
- - envsubst-spanner/
- waitFor: ['-']
-- id: envsubst_k8s_cfgs_for_spanner
- name: envsubst
- args:
- - envsubst-spanner/etcd-cluster.yaml
- - envsubst-spanner/trillian-ci-spanner.yaml
- - envsubst-spanner/trillian-log-deployment.yaml
- - envsubst-spanner/trillian-log-service.yaml
- - envsubst-spanner/trillian-log-signer-deployment.yaml
- - envsubst-spanner/trillian-log-signer-service.yaml
- env:
- - PROJECT_ID=${PROJECT_ID}
- - IMAGE_TAG=${COMMIT_SHA}
- waitFor:
- - build_envsubst
- - copy_k8s_cfgs_for_spanner
-- id: apply_k8s_cfgs_for_spanner_dryrun
- name: gcr.io/cloud-builders/kubectl
- args:
- - apply
- - --dry-run=server
- - -f=envsubst-spanner/etcd-cluster.yaml
- - -f=envsubst-spanner/trillian-ci-spanner.yaml
- - -f=envsubst-spanner/trillian-log-deployment.yaml
- - -f=envsubst-spanner/trillian-log-service.yaml
- - -f=envsubst-spanner/trillian-log-signer-deployment.yaml
- - -f=envsubst-spanner/trillian-log-signer-service.yaml
- - --prune
- - --all
- - --prune-whitelist=core/v1/ConfigMap
- env:
- - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
- - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
- waitFor:
- - envsubst_k8s_cfgs_for_spanner
- - build_log_server
- - build_log_signer
-- id: copy_k8s_cfgs_for_mysql
- name: busybox
- entrypoint: cp
- args:
- - -r
- - examples/deployment/kubernetes/
- - envsubst-mysql/
- waitFor: ['-']
-- id: envsubst_k8s_cfgs_for_mysql
- name: envsubst
- args:
- - envsubst-mysql/etcd-cluster.yaml
- - envsubst-mysql/trillian-ci-mysql.yaml
- - envsubst-mysql/trillian-mysql.yaml
- - envsubst-mysql/trillian-log-deployment.yaml
- - envsubst-mysql/trillian-log-service.yaml
- - envsubst-mysql/trillian-log-signer-deployment.yaml
- - envsubst-mysql/trillian-log-signer-service.yaml
- env:
- - PROJECT_ID=${PROJECT_ID}
- - IMAGE_TAG=${COMMIT_SHA}
- - MYSQL_ROOT_PASSWORD=${_MYSQL_ROOT_PASSWORD}
- - MYSQL_PASSWORD=${_MYSQL_PASSWORD}
- waitFor:
- - build_envsubst
- - copy_k8s_cfgs_for_mysql
-- id: apply_k8s_cfgs_for_mysql_dryrun
- name: gcr.io/cloud-builders/kubectl
- args:
- - apply
- - --dry-run=server
- - --namespace=mysql
- - -f=envsubst-mysql/etcd-cluster.yaml
- - -f=envsubst-mysql/trillian-ci-mysql.yaml
- - -f=envsubst-mysql/trillian-mysql.yaml
- - -f=envsubst-mysql/trillian-log-deployment.yaml
- - -f=envsubst-mysql/trillian-log-service.yaml
- - -f=envsubst-mysql/trillian-log-signer-deployment.yaml
- - -f=envsubst-mysql/trillian-log-signer-service.yaml
- - --prune
- - --all
- - --prune-whitelist=core/v1/ConfigMap
- env:
- - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
- - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
- waitFor:
- - envsubst_k8s_cfgs_for_mysql
- - build_db_server
- - build_log_server
- - build_log_signer
diff --git a/vendor/github.com/google/trillian/cloudbuild_tag.yaml b/vendor/github.com/google/trillian/cloudbuild_tag.yaml
deleted file mode 100644
index 3455a9e4a..000000000
--- a/vendor/github.com/google/trillian/cloudbuild_tag.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-timeout: 1800s
-substitutions:
- _MYSQL_TAG: "5.7"
-options:
- machineType: E2_HIGHCPU_32
-steps:
-- id: pull_mysql
- name : gcr.io/cloud-builders/docker
- args:
- - pull
- - marketplace.gcr.io/google/mysql5:${_MYSQL_TAG}
-- id: tag_mysql
- name: gcr.io/cloud-builders/docker
- args:
- - tag
- - marketplace.gcr.io/google/mysql5:${_MYSQL_TAG}
- - gcr.io/${PROJECT_ID}/mysql5:${_MYSQL_TAG}
- waitFor:
- - pull_mysql
-- id: push_mysql
- name: gcr.io/cloud-builders/docker
- args:
- - push
- - gcr.io/${PROJECT_ID}/mysql5:${_MYSQL_TAG}
- waitFor:
- - tag_mysql
-- id: build_db_server
- name: gcr.io/kaniko-project/executor:v1.6.0
- args:
- - --dockerfile=examples/deployment/docker/db_server/Dockerfile
- - --destination=gcr.io/${PROJECT_ID}/db_server:${TAG_NAME}
- - --cache=true
- - --cache-dir= # Cache is in Google Container Registry
- waitFor:
- - push_mysql
-- id: build_log_server
- name: gcr.io/kaniko-project/executor:v1.6.0
- args:
- - --dockerfile=examples/deployment/docker/log_server/Dockerfile
- - --destination=gcr.io/${PROJECT_ID}/log_server:${TAG_NAME}
- - --cache=true
- - --cache-dir= # Cache is in Google Container Registry
- waitFor: ["-"]
-- id: build_log_signer
- name: gcr.io/kaniko-project/executor:v1.6.0
- args:
- - --dockerfile=examples/deployment/docker/log_signer/Dockerfile
- - --destination=gcr.io/${PROJECT_ID}/log_signer:${TAG_NAME}
- - --cache=true
- - --cache-dir= # Cache is in Google Container Registry
- waitFor: ["-"]
diff --git a/vendor/github.com/google/trillian/codecov.yml b/vendor/github.com/google/trillian/codecov.yml
deleted file mode 100644
index cae92b04e..000000000
--- a/vendor/github.com/google/trillian/codecov.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-# Customizations to codecov for Trillian repo. This will be merged into
-# the team / default codecov yaml file.
-#
-# Validate changes with:
-# curl --data-binary @codecov.yml https://codecov.io/validate
-
-# Exclude code that's for testing, demos or utilities that aren't really
-# part of production releases.
-ignore:
- - "**/mock_*.go"
- - "**/testonly"
- - "docs"
- - "examples"
- - "integration"
- - "testonly"
-
-coverage:
- status:
- project:
- default:
- # Allow 1% coverage drop without complaining, to avoid being too noisy.
- threshold: 1%
diff --git a/vendor/github.com/google/trillian/gen.go b/vendor/github.com/google/trillian/gen.go
deleted file mode 100644
index 09fff99fa..000000000
--- a/vendor/github.com/google/trillian/gen.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2016 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package trillian contains the generated protobuf code for the Trillian API.
-package trillian
-
-//go:generate protoc -I=. -I=third_party/googleapis --go_out=paths=source_relative:. --go-grpc_out=paths=source_relative:. --go-grpc_opt=require_unimplemented_servers=false trillian_log_api.proto trillian_admin_api.proto trillian.proto --doc_out=markdown,api.md:./docs/
-//go:generate protoc -I=. --go_out=paths=source_relative:. crypto/keyspb/keyspb.proto
-
-//go:generate mockgen -package tmock -destination testonly/tmock/mock_log_server.go github.com/google/trillian TrillianLogServer
-//go:generate mockgen -package tmock -destination testonly/tmock/mock_admin_server.go github.com/google/trillian TrillianAdminServer
diff --git a/vendor/github.com/google/trillian/trillian.pb.go b/vendor/github.com/google/trillian/trillian.pb.go
deleted file mode 100644
index 6855aca39..000000000
--- a/vendor/github.com/google/trillian/trillian.pb.go
+++ /dev/null
@@ -1,806 +0,0 @@
-// Copyright 2016 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.28.1
-// protoc v3.20.1
-// source: trillian.proto
-
-package trillian
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- anypb "google.golang.org/protobuf/types/known/anypb"
- durationpb "google.golang.org/protobuf/types/known/durationpb"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// LogRootFormat specifies the fields that are covered by the
-// SignedLogRoot signature, as well as their ordering and formats.
-type LogRootFormat int32
-
-const (
- LogRootFormat_LOG_ROOT_FORMAT_UNKNOWN LogRootFormat = 0
- LogRootFormat_LOG_ROOT_FORMAT_V1 LogRootFormat = 1
-)
-
-// Enum value maps for LogRootFormat.
-var (
- LogRootFormat_name = map[int32]string{
- 0: "LOG_ROOT_FORMAT_UNKNOWN",
- 1: "LOG_ROOT_FORMAT_V1",
- }
- LogRootFormat_value = map[string]int32{
- "LOG_ROOT_FORMAT_UNKNOWN": 0,
- "LOG_ROOT_FORMAT_V1": 1,
- }
-)
-
-func (x LogRootFormat) Enum() *LogRootFormat {
- p := new(LogRootFormat)
- *p = x
- return p
-}
-
-func (x LogRootFormat) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (LogRootFormat) Descriptor() protoreflect.EnumDescriptor {
- return file_trillian_proto_enumTypes[0].Descriptor()
-}
-
-func (LogRootFormat) Type() protoreflect.EnumType {
- return &file_trillian_proto_enumTypes[0]
-}
-
-func (x LogRootFormat) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use LogRootFormat.Descriptor instead.
-func (LogRootFormat) EnumDescriptor() ([]byte, []int) {
- return file_trillian_proto_rawDescGZIP(), []int{0}
-}
-
-// Defines the way empty / node / leaf hashes are constructed incorporating
-// preimage protection, which can be application specific.
-type HashStrategy int32
-
-const (
- // Hash strategy cannot be determined. Included to enable detection of
- // mismatched proto versions being used. Represents an invalid value.
- HashStrategy_UNKNOWN_HASH_STRATEGY HashStrategy = 0
- // Certificate Transparency strategy: leaf hash prefix = 0x00, node prefix =
- // 0x01, empty hash is digest([]byte{}), as defined in the specification.
- HashStrategy_RFC6962_SHA256 HashStrategy = 1
- // Sparse Merkle Tree strategy: leaf hash prefix = 0x00, node prefix = 0x01,
- // empty branch is recursively computed from empty leaf nodes.
- // NOT secure in a multi tree environment. For testing only.
- HashStrategy_TEST_MAP_HASHER HashStrategy = 2
- // Append-only log strategy where leaf nodes are defined as the ObjectHash.
- // All other properties are equal to RFC6962_SHA256.
- HashStrategy_OBJECT_RFC6962_SHA256 HashStrategy = 3
- // The CONIKS sparse tree hasher with SHA512_256 as the hash algorithm.
- HashStrategy_CONIKS_SHA512_256 HashStrategy = 4
- // The CONIKS sparse tree hasher with SHA256 as the hash algorithm.
- HashStrategy_CONIKS_SHA256 HashStrategy = 5
-)
-
-// Enum value maps for HashStrategy.
-var (
- HashStrategy_name = map[int32]string{
- 0: "UNKNOWN_HASH_STRATEGY",
- 1: "RFC6962_SHA256",
- 2: "TEST_MAP_HASHER",
- 3: "OBJECT_RFC6962_SHA256",
- 4: "CONIKS_SHA512_256",
- 5: "CONIKS_SHA256",
- }
- HashStrategy_value = map[string]int32{
- "UNKNOWN_HASH_STRATEGY": 0,
- "RFC6962_SHA256": 1,
- "TEST_MAP_HASHER": 2,
- "OBJECT_RFC6962_SHA256": 3,
- "CONIKS_SHA512_256": 4,
- "CONIKS_SHA256": 5,
- }
-)
-
-func (x HashStrategy) Enum() *HashStrategy {
- p := new(HashStrategy)
- *p = x
- return p
-}
-
-func (x HashStrategy) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (HashStrategy) Descriptor() protoreflect.EnumDescriptor {
- return file_trillian_proto_enumTypes[1].Descriptor()
-}
-
-func (HashStrategy) Type() protoreflect.EnumType {
- return &file_trillian_proto_enumTypes[1]
-}
-
-func (x HashStrategy) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use HashStrategy.Descriptor instead.
-func (HashStrategy) EnumDescriptor() ([]byte, []int) {
- return file_trillian_proto_rawDescGZIP(), []int{1}
-}
-
-// State of the tree.
-type TreeState int32
-
-const (
- // Tree state cannot be determined. Included to enable detection of
- // mismatched proto versions being used. Represents an invalid value.
- TreeState_UNKNOWN_TREE_STATE TreeState = 0
- // Active trees are able to respond to both read and write requests.
- TreeState_ACTIVE TreeState = 1
- // Frozen trees are only able to respond to read requests, writing to a frozen
- // tree is forbidden. Trees should not be frozen when there are entries
- // in the queue that have not yet been integrated. See the DRAINING
- // state for this case.
- TreeState_FROZEN TreeState = 2
- // Deprecated: now tracked in Tree.deleted.
- //
- // Deprecated: Do not use.
- TreeState_DEPRECATED_SOFT_DELETED TreeState = 3
- // Deprecated: now tracked in Tree.deleted.
- //
- // Deprecated: Do not use.
- TreeState_DEPRECATED_HARD_DELETED TreeState = 4
- // A tree that is draining will continue to integrate queued entries.
- // No new entries should be accepted.
- TreeState_DRAINING TreeState = 5
-)
-
-// Enum value maps for TreeState.
-var (
- TreeState_name = map[int32]string{
- 0: "UNKNOWN_TREE_STATE",
- 1: "ACTIVE",
- 2: "FROZEN",
- 3: "DEPRECATED_SOFT_DELETED",
- 4: "DEPRECATED_HARD_DELETED",
- 5: "DRAINING",
- }
- TreeState_value = map[string]int32{
- "UNKNOWN_TREE_STATE": 0,
- "ACTIVE": 1,
- "FROZEN": 2,
- "DEPRECATED_SOFT_DELETED": 3,
- "DEPRECATED_HARD_DELETED": 4,
- "DRAINING": 5,
- }
-)
-
-func (x TreeState) Enum() *TreeState {
- p := new(TreeState)
- *p = x
- return p
-}
-
-func (x TreeState) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (TreeState) Descriptor() protoreflect.EnumDescriptor {
- return file_trillian_proto_enumTypes[2].Descriptor()
-}
-
-func (TreeState) Type() protoreflect.EnumType {
- return &file_trillian_proto_enumTypes[2]
-}
-
-func (x TreeState) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use TreeState.Descriptor instead.
-func (TreeState) EnumDescriptor() ([]byte, []int) {
- return file_trillian_proto_rawDescGZIP(), []int{2}
-}
-
-// Type of the tree.
-type TreeType int32
-
-const (
- // Tree type cannot be determined. Included to enable detection of mismatched
- // proto versions being used. Represents an invalid value.
- TreeType_UNKNOWN_TREE_TYPE TreeType = 0
- // Tree represents a verifiable log.
- TreeType_LOG TreeType = 1
- // Tree represents a verifiable pre-ordered log, i.e., a log whose entries are
- // placed according to sequence numbers assigned outside of Trillian.
- TreeType_PREORDERED_LOG TreeType = 3
-)
-
-// Enum value maps for TreeType.
-var (
- TreeType_name = map[int32]string{
- 0: "UNKNOWN_TREE_TYPE",
- 1: "LOG",
- 3: "PREORDERED_LOG",
- }
- TreeType_value = map[string]int32{
- "UNKNOWN_TREE_TYPE": 0,
- "LOG": 1,
- "PREORDERED_LOG": 3,
- }
-)
-
-func (x TreeType) Enum() *TreeType {
- p := new(TreeType)
- *p = x
- return p
-}
-
-func (x TreeType) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (TreeType) Descriptor() protoreflect.EnumDescriptor {
- return file_trillian_proto_enumTypes[3].Descriptor()
-}
-
-func (TreeType) Type() protoreflect.EnumType {
- return &file_trillian_proto_enumTypes[3]
-}
-
-func (x TreeType) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use TreeType.Descriptor instead.
-func (TreeType) EnumDescriptor() ([]byte, []int) {
- return file_trillian_proto_rawDescGZIP(), []int{3}
-}
-
-// Represents a tree.
-// Readonly attributes are assigned at tree creation, after which they may not
-// be modified.
-//
-// Note: Many APIs within the rest of the code require these objects to
-// be provided. For safety they should be obtained via Admin API calls and
-// not created dynamically.
-type Tree struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the tree.
- // Readonly.
- TreeId int64 `protobuf:"varint,1,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // State of the tree.
- // Trees are ACTIVE after creation. At any point the tree may transition
- // between ACTIVE, DRAINING and FROZEN states.
- TreeState TreeState `protobuf:"varint,2,opt,name=tree_state,json=treeState,proto3,enum=trillian.TreeState" json:"tree_state,omitempty"`
- // Type of the tree.
- // Readonly after Tree creation. Exception: Can be switched from
- // PREORDERED_LOG to LOG if the Tree is and remains in the FROZEN state.
- TreeType TreeType `protobuf:"varint,3,opt,name=tree_type,json=treeType,proto3,enum=trillian.TreeType" json:"tree_type,omitempty"`
- // Display name of the tree.
- // Optional.
- DisplayName string `protobuf:"bytes,8,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
- // Description of the tree,
- // Optional.
- Description string `protobuf:"bytes,9,opt,name=description,proto3" json:"description,omitempty"`
- // Storage-specific settings.
- // Varies according to the storage implementation backing Trillian.
- StorageSettings *anypb.Any `protobuf:"bytes,13,opt,name=storage_settings,json=storageSettings,proto3" json:"storage_settings,omitempty"`
- // Interval after which a new signed root is produced even if there have been
- // no submission. If zero, this behavior is disabled.
- MaxRootDuration *durationpb.Duration `protobuf:"bytes,15,opt,name=max_root_duration,json=maxRootDuration,proto3" json:"max_root_duration,omitempty"`
- // Time of tree creation.
- // Readonly.
- CreateTime *timestamppb.Timestamp `protobuf:"bytes,16,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
- // Time of last tree update.
- // Readonly (automatically assigned on updates).
- UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
- // If true, the tree has been deleted.
- // Deleted trees may be undeleted during a certain time window, after which
- // they're permanently deleted (and unrecoverable).
- // Readonly.
- Deleted bool `protobuf:"varint,19,opt,name=deleted,proto3" json:"deleted,omitempty"`
- // Time of tree deletion, if any.
- // Readonly.
- DeleteTime *timestamppb.Timestamp `protobuf:"bytes,20,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"`
-}
-
-func (x *Tree) Reset() {
- *x = Tree{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Tree) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Tree) ProtoMessage() {}
-
-func (x *Tree) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Tree.ProtoReflect.Descriptor instead.
-func (*Tree) Descriptor() ([]byte, []int) {
- return file_trillian_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Tree) GetTreeId() int64 {
- if x != nil {
- return x.TreeId
- }
- return 0
-}
-
-func (x *Tree) GetTreeState() TreeState {
- if x != nil {
- return x.TreeState
- }
- return TreeState_UNKNOWN_TREE_STATE
-}
-
-func (x *Tree) GetTreeType() TreeType {
- if x != nil {
- return x.TreeType
- }
- return TreeType_UNKNOWN_TREE_TYPE
-}
-
-func (x *Tree) GetDisplayName() string {
- if x != nil {
- return x.DisplayName
- }
- return ""
-}
-
-func (x *Tree) GetDescription() string {
- if x != nil {
- return x.Description
- }
- return ""
-}
-
-func (x *Tree) GetStorageSettings() *anypb.Any {
- if x != nil {
- return x.StorageSettings
- }
- return nil
-}
-
-func (x *Tree) GetMaxRootDuration() *durationpb.Duration {
- if x != nil {
- return x.MaxRootDuration
- }
- return nil
-}
-
-func (x *Tree) GetCreateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CreateTime
- }
- return nil
-}
-
-func (x *Tree) GetUpdateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.UpdateTime
- }
- return nil
-}
-
-func (x *Tree) GetDeleted() bool {
- if x != nil {
- return x.Deleted
- }
- return false
-}
-
-func (x *Tree) GetDeleteTime() *timestamppb.Timestamp {
- if x != nil {
- return x.DeleteTime
- }
- return nil
-}
-
-// SignedLogRoot represents a commitment by a Log to a particular tree.
-//
-// Note that the signature itself is no-longer provided by Trillian since
-// https://github.com/google/trillian/pull/2452 .
-// This functionality was intended to support a niche-use case but added
-// significant complexity and was prone to causing confusion and
-// misunderstanding for personality authors.
-type SignedLogRoot struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // log_root holds the TLS-serialization of the following structure (described
- // in RFC5246 notation):
- //
- // enum { v1(1), (65535)} Version;
- // struct {
- // uint64 tree_size;
- // opaque root_hash<0..128>;
- // uint64 timestamp_nanos;
- // uint64 revision;
- // opaque metadata<0..65535>;
- // } LogRootV1;
- // struct {
- // Version version;
- // select(version) {
- // case v1: LogRootV1;
- // }
- // } LogRoot;
- //
- // A serialized v1 log root will therefore be laid out as:
- //
- // +---+---+---+---+---+---+---+---+---+---+---+---+---+---+-....--+
- // | ver=1 | tree_size |len| root_hash |
- // +---+---+---+---+---+---+---+---+---+---+---+---+---+---+-....--+
- //
- // +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
- // | timestamp_nanos | revision |
- // +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
- //
- // +---+---+---+---+---+-....---+
- // | len | metadata |
- // +---+---+---+---+---+-....---+
- //
- // (with all integers encoded big-endian).
- LogRoot []byte `protobuf:"bytes,8,opt,name=log_root,json=logRoot,proto3" json:"log_root,omitempty"`
-}
-
-func (x *SignedLogRoot) Reset() {
- *x = SignedLogRoot{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SignedLogRoot) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SignedLogRoot) ProtoMessage() {}
-
-func (x *SignedLogRoot) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SignedLogRoot.ProtoReflect.Descriptor instead.
-func (*SignedLogRoot) Descriptor() ([]byte, []int) {
- return file_trillian_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *SignedLogRoot) GetLogRoot() []byte {
- if x != nil {
- return x.LogRoot
- }
- return nil
-}
-
-// Proof holds a consistency or inclusion proof for a Merkle tree, as returned
-// by the API.
-type Proof struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // leaf_index indicates the requested leaf index when this message is used for
- // a leaf inclusion proof. This field is set to zero when this message is
- // used for a consistency proof.
- LeafIndex int64 `protobuf:"varint,1,opt,name=leaf_index,json=leafIndex,proto3" json:"leaf_index,omitempty"`
- Hashes [][]byte `protobuf:"bytes,3,rep,name=hashes,proto3" json:"hashes,omitempty"`
-}
-
-func (x *Proof) Reset() {
- *x = Proof{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Proof) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Proof) ProtoMessage() {}
-
-func (x *Proof) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Proof.ProtoReflect.Descriptor instead.
-func (*Proof) Descriptor() ([]byte, []int) {
- return file_trillian_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *Proof) GetLeafIndex() int64 {
- if x != nil {
- return x.LeafIndex
- }
- return 0
-}
-
-func (x *Proof) GetHashes() [][]byte {
- if x != nil {
- return x.Hashes
- }
- return nil
-}
-
-var File_trillian_proto protoreflect.FileDescriptor
-
-var file_trillian_proto_rawDesc = []byte{
- 0x0a, 0x0e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x12, 0x08, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf1, 0x05, 0x0a, 0x04, 0x54, 0x72, 0x65, 0x65, 0x12,
- 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x0a, 0x74, 0x72, 0x65, 0x65,
- 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x74,
- 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74,
- 0x65, 0x52, 0x09, 0x74, 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x09,
- 0x74, 0x72, 0x65, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x12, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x54,
- 0x79, 0x70, 0x65, 0x52, 0x08, 0x74, 0x72, 0x65, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a,
- 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65,
- 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
- 0x6e, 0x79, 0x52, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69,
- 0x6e, 0x67, 0x73, 0x12, 0x45, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f,
- 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x52, 0x6f,
- 0x6f, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x72,
- 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x54, 0x69, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18,
- 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x3b,
- 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x14, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
- 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x04, 0x10,
- 0x08, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08,
- 0x12, 0x10, 0x13, 0x52, 0x1e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x65, 0x70,
- 0x6f, 0x63, 0x68, 0x52, 0x10, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70,
- 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0e, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x61, 0x6c, 0x67, 0x6f,
- 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x73, 0x74, 0x72, 0x61,
- 0x74, 0x65, 0x67, 0x79, 0x52, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65,
- 0x79, 0x52, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x52, 0x13, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74,
- 0x68, 0x6d, 0x52, 0x16, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x63, 0x69,
- 0x70, 0x68, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x1e, 0x75, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x5f, 0x73,
- 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x22, 0x9d, 0x01, 0x0a, 0x0d, 0x53,
- 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x19, 0x0a, 0x08,
- 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07,
- 0x6c, 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x08, 0x4a, 0x04, 0x08,
- 0x09, 0x10, 0x0a, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x6c,
- 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x52, 0x12, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f,
- 0x68, 0x61, 0x73, 0x68, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52,
- 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x73,
- 0x52, 0x0d, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52,
- 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x50, 0x0a, 0x05, 0x50, 0x72,
- 0x6f, 0x6f, 0x66, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65,
- 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6c, 0x65, 0x61, 0x66, 0x49, 0x6e, 0x64,
- 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03,
- 0x28, 0x0c, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03,
- 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x2a, 0x44, 0x0a, 0x0d,
- 0x4c, 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x1b, 0x0a,
- 0x17, 0x4c, 0x4f, 0x47, 0x5f, 0x52, 0x4f, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54,
- 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x4f,
- 0x47, 0x5f, 0x52, 0x4f, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x56, 0x31,
- 0x10, 0x01, 0x2a, 0x97, 0x01, 0x0a, 0x0c, 0x48, 0x61, 0x73, 0x68, 0x53, 0x74, 0x72, 0x61, 0x74,
- 0x65, 0x67, 0x79, 0x12, 0x19, 0x0a, 0x15, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x48,
- 0x41, 0x53, 0x48, 0x5f, 0x53, 0x54, 0x52, 0x41, 0x54, 0x45, 0x47, 0x59, 0x10, 0x00, 0x12, 0x12,
- 0x0a, 0x0e, 0x52, 0x46, 0x43, 0x36, 0x39, 0x36, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36,
- 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x48,
- 0x41, 0x53, 0x48, 0x45, 0x52, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x4f, 0x42, 0x4a, 0x45, 0x43,
- 0x54, 0x5f, 0x52, 0x46, 0x43, 0x36, 0x39, 0x36, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36,
- 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x49, 0x4b, 0x53, 0x5f, 0x53, 0x48, 0x41,
- 0x35, 0x31, 0x32, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4e,
- 0x49, 0x4b, 0x53, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x05, 0x2a, 0x8b, 0x01, 0x0a,
- 0x09, 0x54, 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x55, 0x4e,
- 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x52, 0x45, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45,
- 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x0a,
- 0x0a, 0x06, 0x46, 0x52, 0x4f, 0x5a, 0x45, 0x4e, 0x10, 0x02, 0x12, 0x1f, 0x0a, 0x17, 0x44, 0x45,
- 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x4f, 0x46, 0x54, 0x5f, 0x44, 0x45,
- 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x03, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1f, 0x0a, 0x17, 0x44,
- 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x48, 0x41, 0x52, 0x44, 0x5f, 0x44,
- 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x04, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x0c, 0x0a, 0x08,
- 0x44, 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x2a, 0x49, 0x0a, 0x08, 0x54, 0x72,
- 0x65, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
- 0x4e, 0x5f, 0x54, 0x52, 0x45, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a,
- 0x03, 0x4c, 0x4f, 0x47, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x52, 0x45, 0x4f, 0x52, 0x44,
- 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x4f, 0x47, 0x10, 0x03, 0x22, 0x04, 0x08, 0x02, 0x10, 0x02,
- 0x2a, 0x03, 0x4d, 0x41, 0x50, 0x42, 0x48, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x42, 0x0d, 0x54, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x50, 0x01, 0x5a, 0x1a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x62,
- 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_trillian_proto_rawDescOnce sync.Once
- file_trillian_proto_rawDescData = file_trillian_proto_rawDesc
-)
-
-func file_trillian_proto_rawDescGZIP() []byte {
- file_trillian_proto_rawDescOnce.Do(func() {
- file_trillian_proto_rawDescData = protoimpl.X.CompressGZIP(file_trillian_proto_rawDescData)
- })
- return file_trillian_proto_rawDescData
-}
-
-var file_trillian_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
-var file_trillian_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_trillian_proto_goTypes = []interface{}{
- (LogRootFormat)(0), // 0: trillian.LogRootFormat
- (HashStrategy)(0), // 1: trillian.HashStrategy
- (TreeState)(0), // 2: trillian.TreeState
- (TreeType)(0), // 3: trillian.TreeType
- (*Tree)(nil), // 4: trillian.Tree
- (*SignedLogRoot)(nil), // 5: trillian.SignedLogRoot
- (*Proof)(nil), // 6: trillian.Proof
- (*anypb.Any)(nil), // 7: google.protobuf.Any
- (*durationpb.Duration)(nil), // 8: google.protobuf.Duration
- (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp
-}
-var file_trillian_proto_depIdxs = []int32{
- 2, // 0: trillian.Tree.tree_state:type_name -> trillian.TreeState
- 3, // 1: trillian.Tree.tree_type:type_name -> trillian.TreeType
- 7, // 2: trillian.Tree.storage_settings:type_name -> google.protobuf.Any
- 8, // 3: trillian.Tree.max_root_duration:type_name -> google.protobuf.Duration
- 9, // 4: trillian.Tree.create_time:type_name -> google.protobuf.Timestamp
- 9, // 5: trillian.Tree.update_time:type_name -> google.protobuf.Timestamp
- 9, // 6: trillian.Tree.delete_time:type_name -> google.protobuf.Timestamp
- 7, // [7:7] is the sub-list for method output_type
- 7, // [7:7] is the sub-list for method input_type
- 7, // [7:7] is the sub-list for extension type_name
- 7, // [7:7] is the sub-list for extension extendee
- 0, // [0:7] is the sub-list for field type_name
-}
-
-func init() { file_trillian_proto_init() }
-func file_trillian_proto_init() {
- if File_trillian_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_trillian_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Tree); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SignedLogRoot); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Proof); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_trillian_proto_rawDesc,
- NumEnums: 4,
- NumMessages: 3,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_trillian_proto_goTypes,
- DependencyIndexes: file_trillian_proto_depIdxs,
- EnumInfos: file_trillian_proto_enumTypes,
- MessageInfos: file_trillian_proto_msgTypes,
- }.Build()
- File_trillian_proto = out.File
- file_trillian_proto_rawDesc = nil
- file_trillian_proto_goTypes = nil
- file_trillian_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/trillian/trillian.proto b/vendor/github.com/google/trillian/trillian.proto
deleted file mode 100644
index 78869ba96..000000000
--- a/vendor/github.com/google/trillian/trillian.proto
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright 2016 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-option java_multiple_files = true;
-option java_package = "com.google.trillian.proto";
-option java_outer_classname = "TrillianProto";
-option go_package = "github.com/google/trillian";
-
-package trillian;
-
-import "google/protobuf/any.proto";
-import "google/protobuf/duration.proto";
-import "google/protobuf/timestamp.proto";
-
-// LogRootFormat specifies the fields that are covered by the
-// SignedLogRoot signature, as well as their ordering and formats.
-enum LogRootFormat {
- LOG_ROOT_FORMAT_UNKNOWN = 0;
- LOG_ROOT_FORMAT_V1 = 1;
-}
-
-// What goes in here?
-// Things which are exposed through the public trillian APIs.
-
-// Defines the way empty / node / leaf hashes are constructed incorporating
-// preimage protection, which can be application specific.
-enum HashStrategy {
- // Hash strategy cannot be determined. Included to enable detection of
- // mismatched proto versions being used. Represents an invalid value.
- UNKNOWN_HASH_STRATEGY = 0;
-
- // Certificate Transparency strategy: leaf hash prefix = 0x00, node prefix =
- // 0x01, empty hash is digest([]byte{}), as defined in the specification.
- RFC6962_SHA256 = 1;
-
- // Sparse Merkle Tree strategy: leaf hash prefix = 0x00, node prefix = 0x01,
- // empty branch is recursively computed from empty leaf nodes.
- // NOT secure in a multi tree environment. For testing only.
- TEST_MAP_HASHER = 2;
-
- // Append-only log strategy where leaf nodes are defined as the ObjectHash.
- // All other properties are equal to RFC6962_SHA256.
- OBJECT_RFC6962_SHA256 = 3;
-
- // The CONIKS sparse tree hasher with SHA512_256 as the hash algorithm.
- CONIKS_SHA512_256 = 4;
-
- // The CONIKS sparse tree hasher with SHA256 as the hash algorithm.
- CONIKS_SHA256 = 5;
-}
-
-// State of the tree.
-enum TreeState {
- // Tree state cannot be determined. Included to enable detection of
- // mismatched proto versions being used. Represents an invalid value.
- UNKNOWN_TREE_STATE = 0;
-
- // Active trees are able to respond to both read and write requests.
- ACTIVE = 1;
-
- // Frozen trees are only able to respond to read requests, writing to a frozen
- // tree is forbidden. Trees should not be frozen when there are entries
- // in the queue that have not yet been integrated. See the DRAINING
- // state for this case.
- FROZEN = 2;
-
- // Deprecated: now tracked in Tree.deleted.
- DEPRECATED_SOFT_DELETED = 3 [deprecated = true];
-
- // Deprecated: now tracked in Tree.deleted.
- DEPRECATED_HARD_DELETED = 4 [deprecated = true];
-
- // A tree that is draining will continue to integrate queued entries.
- // No new entries should be accepted.
- DRAINING = 5;
-}
-
-// Type of the tree.
-enum TreeType {
- // Tree type cannot be determined. Included to enable detection of mismatched
- // proto versions being used. Represents an invalid value.
- UNKNOWN_TREE_TYPE = 0;
-
- // Tree represents a verifiable log.
- LOG = 1;
-
- // Tree represents a verifiable pre-ordered log, i.e., a log whose entries are
- // placed according to sequence numbers assigned outside of Trillian.
- PREORDERED_LOG = 3;
-
- reserved 2;
- reserved "MAP";
-}
-
-// Represents a tree.
-// Readonly attributes are assigned at tree creation, after which they may not
-// be modified.
-//
-// Note: Many APIs within the rest of the code require these objects to
-// be provided. For safety they should be obtained via Admin API calls and
-// not created dynamically.
-message Tree {
- // ID of the tree.
- // Readonly.
- int64 tree_id = 1;
-
- // State of the tree.
- // Trees are ACTIVE after creation. At any point the tree may transition
- // between ACTIVE, DRAINING and FROZEN states.
- TreeState tree_state = 2;
-
- // Type of the tree.
- // Readonly after Tree creation. Exception: Can be switched from
- // PREORDERED_LOG to LOG if the Tree is and remains in the FROZEN state.
- TreeType tree_type = 3;
-
- // Display name of the tree.
- // Optional.
- string display_name = 8;
-
- // Description of the tree,
- // Optional.
- string description = 9;
-
- // Storage-specific settings.
- // Varies according to the storage implementation backing Trillian.
- google.protobuf.Any storage_settings = 13;
-
- // Interval after which a new signed root is produced even if there have been
- // no submission. If zero, this behavior is disabled.
- google.protobuf.Duration max_root_duration = 15;
-
- // Time of tree creation.
- // Readonly.
- google.protobuf.Timestamp create_time = 16;
-
- // Time of last tree update.
- // Readonly (automatically assigned on updates).
- google.protobuf.Timestamp update_time = 17;
-
- // If true, the tree has been deleted.
- // Deleted trees may be undeleted during a certain time window, after which
- // they're permanently deleted (and unrecoverable).
- // Readonly.
- bool deleted = 19;
-
- // Time of tree deletion, if any.
- // Readonly.
- google.protobuf.Timestamp delete_time = 20;
-
- reserved 4 to 7, 10 to 12, 14, 18;
- reserved "create_time_millis_since_epoch";
- reserved "duplicate_policy";
- reserved "hash_algorithm";
- reserved "hash_strategy";
- reserved "private_key";
- reserved "public_key";
- reserved "signature_algorithm";
- reserved "signature_cipher_suite";
- reserved "update_time_millis_since_epoch";
-}
-
-// SignedLogRoot represents a commitment by a Log to a particular tree.
-//
-// Note that the signature itself is no-longer provided by Trillian since
-// https://github.com/google/trillian/pull/2452 .
-// This functionality was intended to support a niche-use case but added
-// significant complexity and was prone to causing confusion and
-// misunderstanding for personality authors.
-message SignedLogRoot {
- // log_root holds the TLS-serialization of the following structure (described
- // in RFC5246 notation):
- //
- // enum { v1(1), (65535)} Version;
- // struct {
- // uint64 tree_size;
- // opaque root_hash<0..128>;
- // uint64 timestamp_nanos;
- // uint64 revision;
- // opaque metadata<0..65535>;
- // } LogRootV1;
- // struct {
- // Version version;
- // select(version) {
- // case v1: LogRootV1;
- // }
- // } LogRoot;
- //
- // A serialized v1 log root will therefore be laid out as:
- //
- // +---+---+---+---+---+---+---+---+---+---+---+---+---+---+-....--+
- // | ver=1 | tree_size |len| root_hash |
- // +---+---+---+---+---+---+---+---+---+---+---+---+---+---+-....--+
- //
- // +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
- // | timestamp_nanos | revision |
- // +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
- //
- // +---+---+---+---+---+-....---+
- // | len | metadata |
- // +---+---+---+---+---+-....---+
- //
- // (with all integers encoded big-endian).
- bytes log_root = 8;
-
- reserved 1 to 7, 9;
- reserved "key_hint";
- reserved "log_id";
- reserved "log_root_signature";
- reserved "root_hash";
- reserved "signature";
- reserved "timestamp_nanos";
- reserved "tree_revision";
- reserved "tree_size";
-}
-
-// Proof holds a consistency or inclusion proof for a Merkle tree, as returned
-// by the API.
-message Proof {
- // leaf_index indicates the requested leaf index when this message is used for
- // a leaf inclusion proof. This field is set to zero when this message is
- // used for a consistency proof.
- int64 leaf_index = 1;
- repeated bytes hashes = 3;
-
- reserved 2;
- reserved "proof_node";
-}
diff --git a/vendor/github.com/google/trillian/trillian_admin_api.pb.go b/vendor/github.com/google/trillian/trillian_admin_api.pb.go
deleted file mode 100644
index 2d97bbf1c..000000000
--- a/vendor/github.com/google/trillian/trillian_admin_api.pb.go
+++ /dev/null
@@ -1,621 +0,0 @@
-// Copyright 2016 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.28.1
-// protoc v3.20.1
-// source: trillian_admin_api.proto
-
-package trillian
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// ListTrees request.
-// No filters or pagination options are provided.
-type ListTreesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // If true, deleted trees are included in the response.
- ShowDeleted bool `protobuf:"varint,1,opt,name=show_deleted,json=showDeleted,proto3" json:"show_deleted,omitempty"`
-}
-
-func (x *ListTreesRequest) Reset() {
- *x = ListTreesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_admin_api_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListTreesRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListTreesRequest) ProtoMessage() {}
-
-func (x *ListTreesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_admin_api_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListTreesRequest.ProtoReflect.Descriptor instead.
-func (*ListTreesRequest) Descriptor() ([]byte, []int) {
- return file_trillian_admin_api_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *ListTreesRequest) GetShowDeleted() bool {
- if x != nil {
- return x.ShowDeleted
- }
- return false
-}
-
-// ListTrees response.
-// No pagination is provided, all trees the requester has access to are
-// returned.
-type ListTreesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Trees matching the list request filters.
- Tree []*Tree `protobuf:"bytes,1,rep,name=tree,proto3" json:"tree,omitempty"`
-}
-
-func (x *ListTreesResponse) Reset() {
- *x = ListTreesResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_admin_api_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListTreesResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListTreesResponse) ProtoMessage() {}
-
-func (x *ListTreesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_admin_api_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListTreesResponse.ProtoReflect.Descriptor instead.
-func (*ListTreesResponse) Descriptor() ([]byte, []int) {
- return file_trillian_admin_api_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *ListTreesResponse) GetTree() []*Tree {
- if x != nil {
- return x.Tree
- }
- return nil
-}
-
-// GetTree request.
-type GetTreeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the tree to retrieve.
- TreeId int64 `protobuf:"varint,1,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
-}
-
-func (x *GetTreeRequest) Reset() {
- *x = GetTreeRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_admin_api_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetTreeRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetTreeRequest) ProtoMessage() {}
-
-func (x *GetTreeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_admin_api_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetTreeRequest.ProtoReflect.Descriptor instead.
-func (*GetTreeRequest) Descriptor() ([]byte, []int) {
- return file_trillian_admin_api_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *GetTreeRequest) GetTreeId() int64 {
- if x != nil {
- return x.TreeId
- }
- return 0
-}
-
-// CreateTree request.
-type CreateTreeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Tree to be created. See Tree and CreateTree for more details.
- Tree *Tree `protobuf:"bytes,1,opt,name=tree,proto3" json:"tree,omitempty"`
-}
-
-func (x *CreateTreeRequest) Reset() {
- *x = CreateTreeRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_admin_api_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateTreeRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateTreeRequest) ProtoMessage() {}
-
-func (x *CreateTreeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_admin_api_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateTreeRequest.ProtoReflect.Descriptor instead.
-func (*CreateTreeRequest) Descriptor() ([]byte, []int) {
- return file_trillian_admin_api_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *CreateTreeRequest) GetTree() *Tree {
- if x != nil {
- return x.Tree
- }
- return nil
-}
-
-// UpdateTree request.
-type UpdateTreeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Tree to be updated.
- Tree *Tree `protobuf:"bytes,1,opt,name=tree,proto3" json:"tree,omitempty"`
- // Fields modified by the update request.
- // For example: "tree_state", "display_name", "description".
- UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
-}
-
-func (x *UpdateTreeRequest) Reset() {
- *x = UpdateTreeRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_admin_api_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpdateTreeRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpdateTreeRequest) ProtoMessage() {}
-
-func (x *UpdateTreeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_admin_api_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpdateTreeRequest.ProtoReflect.Descriptor instead.
-func (*UpdateTreeRequest) Descriptor() ([]byte, []int) {
- return file_trillian_admin_api_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *UpdateTreeRequest) GetTree() *Tree {
- if x != nil {
- return x.Tree
- }
- return nil
-}
-
-func (x *UpdateTreeRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
- if x != nil {
- return x.UpdateMask
- }
- return nil
-}
-
-// DeleteTree request.
-type DeleteTreeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the tree to delete.
- TreeId int64 `protobuf:"varint,1,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
-}
-
-func (x *DeleteTreeRequest) Reset() {
- *x = DeleteTreeRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_admin_api_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DeleteTreeRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DeleteTreeRequest) ProtoMessage() {}
-
-func (x *DeleteTreeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_admin_api_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DeleteTreeRequest.ProtoReflect.Descriptor instead.
-func (*DeleteTreeRequest) Descriptor() ([]byte, []int) {
- return file_trillian_admin_api_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *DeleteTreeRequest) GetTreeId() int64 {
- if x != nil {
- return x.TreeId
- }
- return 0
-}
-
-// UndeleteTree request.
-type UndeleteTreeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the tree to undelete.
- TreeId int64 `protobuf:"varint,1,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
-}
-
-func (x *UndeleteTreeRequest) Reset() {
- *x = UndeleteTreeRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_admin_api_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UndeleteTreeRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UndeleteTreeRequest) ProtoMessage() {}
-
-func (x *UndeleteTreeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_admin_api_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UndeleteTreeRequest.ProtoReflect.Descriptor instead.
-func (*UndeleteTreeRequest) Descriptor() ([]byte, []int) {
- return file_trillian_admin_api_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *UndeleteTreeRequest) GetTreeId() int64 {
- if x != nil {
- return x.TreeId
- }
- return 0
-}
-
-var File_trillian_admin_api_proto protoreflect.FileDescriptor
-
-var file_trillian_admin_api_proto_rawDesc = []byte{
- 0x0a, 0x18, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x5f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x74, 0x72, 0x69, 0x6c,
- 0x6c, 0x69, 0x61, 0x6e, 0x1a, 0x0e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x72,
- 0x65, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x68,
- 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x0b, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x37, 0x0a,
- 0x11, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x72, 0x65, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x0e, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x54, 0x72, 0x65, 0x65,
- 0x52, 0x04, 0x74, 0x72, 0x65, 0x65, 0x22, 0x29, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x54, 0x72, 0x65,
- 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65,
- 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49,
- 0x64, 0x22, 0x47, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e,
- 0x54, 0x72, 0x65, 0x65, 0x52, 0x04, 0x74, 0x72, 0x65, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03,
- 0x52, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0x74, 0x0a, 0x11, 0x55, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x22, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
- 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x52, 0x04, 0x74,
- 0x72, 0x65, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61,
- 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b,
- 0x22, 0x2c, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x22, 0x2e,
- 0x0a, 0x13, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x32, 0x86,
- 0x03, 0x0a, 0x0d, 0x54, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x41, 0x64, 0x6d, 0x69, 0x6e,
- 0x12, 0x46, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x72, 0x65, 0x65, 0x73, 0x12, 0x1a, 0x2e,
- 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x72, 0x65,
- 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x74, 0x72, 0x69, 0x6c,
- 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x72, 0x65, 0x65, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x35, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x54,
- 0x72, 0x65, 0x65, 0x12, 0x18, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x47,
- 0x65, 0x74, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e,
- 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x22, 0x00, 0x12,
- 0x3b, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1b, 0x2e,
- 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54,
- 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x74, 0x72, 0x69,
- 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0a,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1b, 0x2e, 0x74, 0x72, 0x69,
- 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69,
- 0x61, 0x6e, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0a, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1b, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69,
- 0x61, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e,
- 0x54, 0x72, 0x65, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x0c, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1d, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61,
- 0x6e, 0x2e, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e,
- 0x2e, 0x54, 0x72, 0x65, 0x65, 0x22, 0x00, 0x42, 0x50, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x15, 0x54, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x41, 0x64,
- 0x6d, 0x69, 0x6e, 0x41, 0x70, 0x69, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x1a, 0x67,
- 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2f, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33,
-}
-
-var (
- file_trillian_admin_api_proto_rawDescOnce sync.Once
- file_trillian_admin_api_proto_rawDescData = file_trillian_admin_api_proto_rawDesc
-)
-
-func file_trillian_admin_api_proto_rawDescGZIP() []byte {
- file_trillian_admin_api_proto_rawDescOnce.Do(func() {
- file_trillian_admin_api_proto_rawDescData = protoimpl.X.CompressGZIP(file_trillian_admin_api_proto_rawDescData)
- })
- return file_trillian_admin_api_proto_rawDescData
-}
-
-var file_trillian_admin_api_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
-var file_trillian_admin_api_proto_goTypes = []interface{}{
- (*ListTreesRequest)(nil), // 0: trillian.ListTreesRequest
- (*ListTreesResponse)(nil), // 1: trillian.ListTreesResponse
- (*GetTreeRequest)(nil), // 2: trillian.GetTreeRequest
- (*CreateTreeRequest)(nil), // 3: trillian.CreateTreeRequest
- (*UpdateTreeRequest)(nil), // 4: trillian.UpdateTreeRequest
- (*DeleteTreeRequest)(nil), // 5: trillian.DeleteTreeRequest
- (*UndeleteTreeRequest)(nil), // 6: trillian.UndeleteTreeRequest
- (*Tree)(nil), // 7: trillian.Tree
- (*fieldmaskpb.FieldMask)(nil), // 8: google.protobuf.FieldMask
-}
-var file_trillian_admin_api_proto_depIdxs = []int32{
- 7, // 0: trillian.ListTreesResponse.tree:type_name -> trillian.Tree
- 7, // 1: trillian.CreateTreeRequest.tree:type_name -> trillian.Tree
- 7, // 2: trillian.UpdateTreeRequest.tree:type_name -> trillian.Tree
- 8, // 3: trillian.UpdateTreeRequest.update_mask:type_name -> google.protobuf.FieldMask
- 0, // 4: trillian.TrillianAdmin.ListTrees:input_type -> trillian.ListTreesRequest
- 2, // 5: trillian.TrillianAdmin.GetTree:input_type -> trillian.GetTreeRequest
- 3, // 6: trillian.TrillianAdmin.CreateTree:input_type -> trillian.CreateTreeRequest
- 4, // 7: trillian.TrillianAdmin.UpdateTree:input_type -> trillian.UpdateTreeRequest
- 5, // 8: trillian.TrillianAdmin.DeleteTree:input_type -> trillian.DeleteTreeRequest
- 6, // 9: trillian.TrillianAdmin.UndeleteTree:input_type -> trillian.UndeleteTreeRequest
- 1, // 10: trillian.TrillianAdmin.ListTrees:output_type -> trillian.ListTreesResponse
- 7, // 11: trillian.TrillianAdmin.GetTree:output_type -> trillian.Tree
- 7, // 12: trillian.TrillianAdmin.CreateTree:output_type -> trillian.Tree
- 7, // 13: trillian.TrillianAdmin.UpdateTree:output_type -> trillian.Tree
- 7, // 14: trillian.TrillianAdmin.DeleteTree:output_type -> trillian.Tree
- 7, // 15: trillian.TrillianAdmin.UndeleteTree:output_type -> trillian.Tree
- 10, // [10:16] is the sub-list for method output_type
- 4, // [4:10] is the sub-list for method input_type
- 4, // [4:4] is the sub-list for extension type_name
- 4, // [4:4] is the sub-list for extension extendee
- 0, // [0:4] is the sub-list for field type_name
-}
-
-func init() { file_trillian_admin_api_proto_init() }
-func file_trillian_admin_api_proto_init() {
- if File_trillian_admin_api_proto != nil {
- return
- }
- file_trillian_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_trillian_admin_api_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListTreesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_admin_api_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListTreesResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_admin_api_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTreeRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_admin_api_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateTreeRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_admin_api_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateTreeRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_admin_api_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DeleteTreeRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_admin_api_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UndeleteTreeRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_trillian_admin_api_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 7,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_trillian_admin_api_proto_goTypes,
- DependencyIndexes: file_trillian_admin_api_proto_depIdxs,
- MessageInfos: file_trillian_admin_api_proto_msgTypes,
- }.Build()
- File_trillian_admin_api_proto = out.File
- file_trillian_admin_api_proto_rawDesc = nil
- file_trillian_admin_api_proto_goTypes = nil
- file_trillian_admin_api_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/trillian/trillian_admin_api.proto b/vendor/github.com/google/trillian/trillian_admin_api.proto
deleted file mode 100644
index 39aac0f8e..000000000
--- a/vendor/github.com/google/trillian/trillian_admin_api.proto
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2016 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-option java_multiple_files = true;
-option java_package = "com.google.trillian.proto";
-option java_outer_classname = "TrillianAdminApiProto";
-option go_package = "github.com/google/trillian";
-
-package trillian;
-
-import "trillian.proto";
-import "google/protobuf/field_mask.proto";
-
-// ListTrees request.
-// No filters or pagination options are provided.
-message ListTreesRequest {
- // If true, deleted trees are included in the response.
- bool show_deleted = 1;
-}
-
-// ListTrees response.
-// No pagination is provided, all trees the requester has access to are
-// returned.
-message ListTreesResponse {
- // Trees matching the list request filters.
- repeated Tree tree = 1;
-}
-
-// GetTree request.
-message GetTreeRequest {
- // ID of the tree to retrieve.
- int64 tree_id = 1;
-}
-
-// CreateTree request.
-message CreateTreeRequest {
- // Tree to be created. See Tree and CreateTree for more details.
- Tree tree = 1;
-
- reserved 2;
- reserved "key_spec";
-}
-
-// UpdateTree request.
-message UpdateTreeRequest {
- // Tree to be updated.
- Tree tree = 1;
-
- // Fields modified by the update request.
- // For example: "tree_state", "display_name", "description".
- google.protobuf.FieldMask update_mask = 2;
-}
-
-// DeleteTree request.
-message DeleteTreeRequest {
- // ID of the tree to delete.
- int64 tree_id = 1;
-}
-
-// UndeleteTree request.
-message UndeleteTreeRequest {
- // ID of the tree to undelete.
- int64 tree_id = 1;
-}
-
-// Trillian Administrative interface.
-// Allows creation and management of Trillian trees.
-service TrillianAdmin {
- // Lists all trees the requester has access to.
- rpc ListTrees(ListTreesRequest) returns (ListTreesResponse) {}
-
- // Retrieves a tree by ID.
- rpc GetTree(GetTreeRequest) returns (Tree) {}
-
- // Creates a new tree.
- // System-generated fields are not required and will be ignored if present,
- // e.g.: tree_id, create_time and update_time.
- // Returns the created tree, with all system-generated fields assigned.
- rpc CreateTree(CreateTreeRequest) returns (Tree) {}
-
- // Updates a tree.
- // See Tree for details. Readonly fields cannot be updated.
- rpc UpdateTree(UpdateTreeRequest) returns (Tree) {}
-
- // Soft-deletes a tree.
- // A soft-deleted tree may be undeleted for a certain period, after which
- // it'll be permanently deleted.
- rpc DeleteTree(DeleteTreeRequest) returns (Tree) {}
-
- // Undeletes a soft-deleted a tree.
- // A soft-deleted tree may be undeleted for a certain period, after which
- // it'll be permanently deleted.
- rpc UndeleteTree(UndeleteTreeRequest) returns (Tree) {}
-}
diff --git a/vendor/github.com/google/trillian/trillian_admin_api_grpc.pb.go b/vendor/github.com/google/trillian/trillian_admin_api_grpc.pb.go
deleted file mode 100644
index 6253c0309..000000000
--- a/vendor/github.com/google/trillian/trillian_admin_api_grpc.pb.go
+++ /dev/null
@@ -1,311 +0,0 @@
-// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
-// versions:
-// - protoc-gen-go-grpc v1.2.0
-// - protoc v3.20.1
-// source: trillian_admin_api.proto
-
-package trillian
-
-import (
- context "context"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
-
-// TrillianAdminClient is the client API for TrillianAdmin service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type TrillianAdminClient interface {
- // Lists all trees the requester has access to.
- ListTrees(ctx context.Context, in *ListTreesRequest, opts ...grpc.CallOption) (*ListTreesResponse, error)
- // Retrieves a tree by ID.
- GetTree(ctx context.Context, in *GetTreeRequest, opts ...grpc.CallOption) (*Tree, error)
- // Creates a new tree.
- // System-generated fields are not required and will be ignored if present,
- // e.g.: tree_id, create_time and update_time.
- // Returns the created tree, with all system-generated fields assigned.
- CreateTree(ctx context.Context, in *CreateTreeRequest, opts ...grpc.CallOption) (*Tree, error)
- // Updates a tree.
- // See Tree for details. Readonly fields cannot be updated.
- UpdateTree(ctx context.Context, in *UpdateTreeRequest, opts ...grpc.CallOption) (*Tree, error)
- // Soft-deletes a tree.
- // A soft-deleted tree may be undeleted for a certain period, after which
- // it'll be permanently deleted.
- DeleteTree(ctx context.Context, in *DeleteTreeRequest, opts ...grpc.CallOption) (*Tree, error)
- // Undeletes a soft-deleted a tree.
- // A soft-deleted tree may be undeleted for a certain period, after which
- // it'll be permanently deleted.
- UndeleteTree(ctx context.Context, in *UndeleteTreeRequest, opts ...grpc.CallOption) (*Tree, error)
-}
-
-type trillianAdminClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewTrillianAdminClient(cc grpc.ClientConnInterface) TrillianAdminClient {
- return &trillianAdminClient{cc}
-}
-
-func (c *trillianAdminClient) ListTrees(ctx context.Context, in *ListTreesRequest, opts ...grpc.CallOption) (*ListTreesResponse, error) {
- out := new(ListTreesResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianAdmin/ListTrees", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *trillianAdminClient) GetTree(ctx context.Context, in *GetTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
- out := new(Tree)
- err := c.cc.Invoke(ctx, "/trillian.TrillianAdmin/GetTree", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *trillianAdminClient) CreateTree(ctx context.Context, in *CreateTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
- out := new(Tree)
- err := c.cc.Invoke(ctx, "/trillian.TrillianAdmin/CreateTree", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *trillianAdminClient) UpdateTree(ctx context.Context, in *UpdateTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
- out := new(Tree)
- err := c.cc.Invoke(ctx, "/trillian.TrillianAdmin/UpdateTree", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *trillianAdminClient) DeleteTree(ctx context.Context, in *DeleteTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
- out := new(Tree)
- err := c.cc.Invoke(ctx, "/trillian.TrillianAdmin/DeleteTree", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *trillianAdminClient) UndeleteTree(ctx context.Context, in *UndeleteTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
- out := new(Tree)
- err := c.cc.Invoke(ctx, "/trillian.TrillianAdmin/UndeleteTree", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// TrillianAdminServer is the server API for TrillianAdmin service.
-// All implementations should embed UnimplementedTrillianAdminServer
-// for forward compatibility
-type TrillianAdminServer interface {
- // Lists all trees the requester has access to.
- ListTrees(context.Context, *ListTreesRequest) (*ListTreesResponse, error)
- // Retrieves a tree by ID.
- GetTree(context.Context, *GetTreeRequest) (*Tree, error)
- // Creates a new tree.
- // System-generated fields are not required and will be ignored if present,
- // e.g.: tree_id, create_time and update_time.
- // Returns the created tree, with all system-generated fields assigned.
- CreateTree(context.Context, *CreateTreeRequest) (*Tree, error)
- // Updates a tree.
- // See Tree for details. Readonly fields cannot be updated.
- UpdateTree(context.Context, *UpdateTreeRequest) (*Tree, error)
- // Soft-deletes a tree.
- // A soft-deleted tree may be undeleted for a certain period, after which
- // it'll be permanently deleted.
- DeleteTree(context.Context, *DeleteTreeRequest) (*Tree, error)
- // Undeletes a soft-deleted a tree.
- // A soft-deleted tree may be undeleted for a certain period, after which
- // it'll be permanently deleted.
- UndeleteTree(context.Context, *UndeleteTreeRequest) (*Tree, error)
-}
-
-// UnimplementedTrillianAdminServer should be embedded to have forward compatible implementations.
-type UnimplementedTrillianAdminServer struct {
-}
-
-func (UnimplementedTrillianAdminServer) ListTrees(context.Context, *ListTreesRequest) (*ListTreesResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListTrees not implemented")
-}
-func (UnimplementedTrillianAdminServer) GetTree(context.Context, *GetTreeRequest) (*Tree, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetTree not implemented")
-}
-func (UnimplementedTrillianAdminServer) CreateTree(context.Context, *CreateTreeRequest) (*Tree, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateTree not implemented")
-}
-func (UnimplementedTrillianAdminServer) UpdateTree(context.Context, *UpdateTreeRequest) (*Tree, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateTree not implemented")
-}
-func (UnimplementedTrillianAdminServer) DeleteTree(context.Context, *DeleteTreeRequest) (*Tree, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteTree not implemented")
-}
-func (UnimplementedTrillianAdminServer) UndeleteTree(context.Context, *UndeleteTreeRequest) (*Tree, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UndeleteTree not implemented")
-}
-
-// UnsafeTrillianAdminServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to TrillianAdminServer will
-// result in compilation errors.
-type UnsafeTrillianAdminServer interface {
- mustEmbedUnimplementedTrillianAdminServer()
-}
-
-func RegisterTrillianAdminServer(s grpc.ServiceRegistrar, srv TrillianAdminServer) {
- s.RegisterService(&TrillianAdmin_ServiceDesc, srv)
-}
-
-func _TrillianAdmin_ListTrees_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListTreesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianAdminServer).ListTrees(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianAdmin/ListTrees",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianAdminServer).ListTrees(ctx, req.(*ListTreesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _TrillianAdmin_GetTree_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetTreeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianAdminServer).GetTree(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianAdmin/GetTree",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianAdminServer).GetTree(ctx, req.(*GetTreeRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _TrillianAdmin_CreateTree_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateTreeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianAdminServer).CreateTree(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianAdmin/CreateTree",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianAdminServer).CreateTree(ctx, req.(*CreateTreeRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _TrillianAdmin_UpdateTree_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdateTreeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianAdminServer).UpdateTree(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianAdmin/UpdateTree",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianAdminServer).UpdateTree(ctx, req.(*UpdateTreeRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _TrillianAdmin_DeleteTree_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteTreeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianAdminServer).DeleteTree(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianAdmin/DeleteTree",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianAdminServer).DeleteTree(ctx, req.(*DeleteTreeRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _TrillianAdmin_UndeleteTree_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UndeleteTreeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianAdminServer).UndeleteTree(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianAdmin/UndeleteTree",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianAdminServer).UndeleteTree(ctx, req.(*UndeleteTreeRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-// TrillianAdmin_ServiceDesc is the grpc.ServiceDesc for TrillianAdmin service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var TrillianAdmin_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "trillian.TrillianAdmin",
- HandlerType: (*TrillianAdminServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "ListTrees",
- Handler: _TrillianAdmin_ListTrees_Handler,
- },
- {
- MethodName: "GetTree",
- Handler: _TrillianAdmin_GetTree_Handler,
- },
- {
- MethodName: "CreateTree",
- Handler: _TrillianAdmin_CreateTree_Handler,
- },
- {
- MethodName: "UpdateTree",
- Handler: _TrillianAdmin_UpdateTree_Handler,
- },
- {
- MethodName: "DeleteTree",
- Handler: _TrillianAdmin_DeleteTree_Handler,
- },
- {
- MethodName: "UndeleteTree",
- Handler: _TrillianAdmin_UndeleteTree_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "trillian_admin_api.proto",
-}
diff --git a/vendor/github.com/google/trillian/trillian_log_api.pb.go b/vendor/github.com/google/trillian/trillian_log_api.pb.go
deleted file mode 100644
index 738e46e43..000000000
--- a/vendor/github.com/google/trillian/trillian_log_api.pb.go
+++ /dev/null
@@ -1,2070 +0,0 @@
-// Copyright 2016 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.28.1
-// protoc v3.20.1
-// source: trillian_log_api.proto
-
-package trillian
-
-import (
- status "google.golang.org/genproto/googleapis/rpc/status"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// ChargeTo describes the user(s) associated with the request whose quota should
-// be checked and charged.
-type ChargeTo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // user is a list of personality-defined strings.
- // Trillian will treat them as /User/%{user}/... keys when checking and
- // charging quota.
- // If one or more of the specified users has insufficient quota, the
- // request will be denied.
- //
- // As an example, a Certificate Transparency frontend might set the following
- // user strings when sending a QueueLeaf request to the Trillian log:
- // - The requesting IP address.
- // This would limit the number of requests per IP.
- // - The "intermediate-<hash>" for each of the intermediate certificates in
- // the submitted chain.
- // This would have the effect of limiting the rate of submissions under
- // a given intermediate/root.
- User []string `protobuf:"bytes,1,rep,name=user,proto3" json:"user,omitempty"`
-}
-
-func (x *ChargeTo) Reset() {
- *x = ChargeTo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ChargeTo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ChargeTo) ProtoMessage() {}
-
-func (x *ChargeTo) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ChargeTo.ProtoReflect.Descriptor instead.
-func (*ChargeTo) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *ChargeTo) GetUser() []string {
- if x != nil {
- return x.User
- }
- return nil
-}
-
-type QueueLeafRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- LogId int64 `protobuf:"varint,1,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"`
- Leaf *LogLeaf `protobuf:"bytes,2,opt,name=leaf,proto3" json:"leaf,omitempty"`
- ChargeTo *ChargeTo `protobuf:"bytes,3,opt,name=charge_to,json=chargeTo,proto3" json:"charge_to,omitempty"`
-}
-
-func (x *QueueLeafRequest) Reset() {
- *x = QueueLeafRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *QueueLeafRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*QueueLeafRequest) ProtoMessage() {}
-
-func (x *QueueLeafRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use QueueLeafRequest.ProtoReflect.Descriptor instead.
-func (*QueueLeafRequest) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *QueueLeafRequest) GetLogId() int64 {
- if x != nil {
- return x.LogId
- }
- return 0
-}
-
-func (x *QueueLeafRequest) GetLeaf() *LogLeaf {
- if x != nil {
- return x.Leaf
- }
- return nil
-}
-
-func (x *QueueLeafRequest) GetChargeTo() *ChargeTo {
- if x != nil {
- return x.ChargeTo
- }
- return nil
-}
-
-type QueueLeafResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // queued_leaf describes the leaf which is or will be incorporated into the
- // Log. If the submitted leaf was already present in the Log (as indicated by
- // its leaf identity hash), then the returned leaf will be the pre-existing
- // leaf entry rather than the submitted leaf.
- QueuedLeaf *QueuedLogLeaf `protobuf:"bytes,2,opt,name=queued_leaf,json=queuedLeaf,proto3" json:"queued_leaf,omitempty"`
-}
-
-func (x *QueueLeafResponse) Reset() {
- *x = QueueLeafResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *QueueLeafResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*QueueLeafResponse) ProtoMessage() {}
-
-func (x *QueueLeafResponse) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use QueueLeafResponse.ProtoReflect.Descriptor instead.
-func (*QueueLeafResponse) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *QueueLeafResponse) GetQueuedLeaf() *QueuedLogLeaf {
- if x != nil {
- return x.QueuedLeaf
- }
- return nil
-}
-
-type GetInclusionProofRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- LogId int64 `protobuf:"varint,1,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"`
- LeafIndex int64 `protobuf:"varint,2,opt,name=leaf_index,json=leafIndex,proto3" json:"leaf_index,omitempty"`
- TreeSize int64 `protobuf:"varint,3,opt,name=tree_size,json=treeSize,proto3" json:"tree_size,omitempty"`
- ChargeTo *ChargeTo `protobuf:"bytes,4,opt,name=charge_to,json=chargeTo,proto3" json:"charge_to,omitempty"`
-}
-
-func (x *GetInclusionProofRequest) Reset() {
- *x = GetInclusionProofRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetInclusionProofRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetInclusionProofRequest) ProtoMessage() {}
-
-func (x *GetInclusionProofRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetInclusionProofRequest.ProtoReflect.Descriptor instead.
-func (*GetInclusionProofRequest) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *GetInclusionProofRequest) GetLogId() int64 {
- if x != nil {
- return x.LogId
- }
- return 0
-}
-
-func (x *GetInclusionProofRequest) GetLeafIndex() int64 {
- if x != nil {
- return x.LeafIndex
- }
- return 0
-}
-
-func (x *GetInclusionProofRequest) GetTreeSize() int64 {
- if x != nil {
- return x.TreeSize
- }
- return 0
-}
-
-func (x *GetInclusionProofRequest) GetChargeTo() *ChargeTo {
- if x != nil {
- return x.ChargeTo
- }
- return nil
-}
-
-type GetInclusionProofResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The proof field may be empty if the requested tree_size was larger
- // than that available at the server (e.g. because there is skew between
- // server instances, and an earlier client request was processed by a
- // more up-to-date instance). In this case, the signed_log_root
- // field will indicate the tree size that the server is aware of, and
- // the proof field will be empty.
- Proof *Proof `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"`
- SignedLogRoot *SignedLogRoot `protobuf:"bytes,3,opt,name=signed_log_root,json=signedLogRoot,proto3" json:"signed_log_root,omitempty"`
-}
-
-func (x *GetInclusionProofResponse) Reset() {
- *x = GetInclusionProofResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetInclusionProofResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetInclusionProofResponse) ProtoMessage() {}
-
-func (x *GetInclusionProofResponse) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetInclusionProofResponse.ProtoReflect.Descriptor instead.
-func (*GetInclusionProofResponse) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *GetInclusionProofResponse) GetProof() *Proof {
- if x != nil {
- return x.Proof
- }
- return nil
-}
-
-func (x *GetInclusionProofResponse) GetSignedLogRoot() *SignedLogRoot {
- if x != nil {
- return x.SignedLogRoot
- }
- return nil
-}
-
-type GetInclusionProofByHashRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- LogId int64 `protobuf:"varint,1,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"`
- // The leaf hash field provides the Merkle tree hash of the leaf entry
- // to be retrieved.
- LeafHash []byte `protobuf:"bytes,2,opt,name=leaf_hash,json=leafHash,proto3" json:"leaf_hash,omitempty"`
- TreeSize int64 `protobuf:"varint,3,opt,name=tree_size,json=treeSize,proto3" json:"tree_size,omitempty"`
- OrderBySequence bool `protobuf:"varint,4,opt,name=order_by_sequence,json=orderBySequence,proto3" json:"order_by_sequence,omitempty"`
- ChargeTo *ChargeTo `protobuf:"bytes,5,opt,name=charge_to,json=chargeTo,proto3" json:"charge_to,omitempty"`
-}
-
-func (x *GetInclusionProofByHashRequest) Reset() {
- *x = GetInclusionProofByHashRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetInclusionProofByHashRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetInclusionProofByHashRequest) ProtoMessage() {}
-
-func (x *GetInclusionProofByHashRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetInclusionProofByHashRequest.ProtoReflect.Descriptor instead.
-func (*GetInclusionProofByHashRequest) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *GetInclusionProofByHashRequest) GetLogId() int64 {
- if x != nil {
- return x.LogId
- }
- return 0
-}
-
-func (x *GetInclusionProofByHashRequest) GetLeafHash() []byte {
- if x != nil {
- return x.LeafHash
- }
- return nil
-}
-
-func (x *GetInclusionProofByHashRequest) GetTreeSize() int64 {
- if x != nil {
- return x.TreeSize
- }
- return 0
-}
-
-func (x *GetInclusionProofByHashRequest) GetOrderBySequence() bool {
- if x != nil {
- return x.OrderBySequence
- }
- return false
-}
-
-func (x *GetInclusionProofByHashRequest) GetChargeTo() *ChargeTo {
- if x != nil {
- return x.ChargeTo
- }
- return nil
-}
-
-type GetInclusionProofByHashResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Logs can potentially contain leaves with duplicate hashes so it's possible
- // for this to return multiple proofs. If the leaf index for a particular
- // instance of the requested Merkle leaf hash is beyond the requested tree
- // size, the corresponding proof entry will be missing.
- Proof []*Proof `protobuf:"bytes,2,rep,name=proof,proto3" json:"proof,omitempty"`
- SignedLogRoot *SignedLogRoot `protobuf:"bytes,3,opt,name=signed_log_root,json=signedLogRoot,proto3" json:"signed_log_root,omitempty"`
-}
-
-func (x *GetInclusionProofByHashResponse) Reset() {
- *x = GetInclusionProofByHashResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetInclusionProofByHashResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetInclusionProofByHashResponse) ProtoMessage() {}
-
-func (x *GetInclusionProofByHashResponse) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetInclusionProofByHashResponse.ProtoReflect.Descriptor instead.
-func (*GetInclusionProofByHashResponse) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *GetInclusionProofByHashResponse) GetProof() []*Proof {
- if x != nil {
- return x.Proof
- }
- return nil
-}
-
-func (x *GetInclusionProofByHashResponse) GetSignedLogRoot() *SignedLogRoot {
- if x != nil {
- return x.SignedLogRoot
- }
- return nil
-}
-
-type GetConsistencyProofRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- LogId int64 `protobuf:"varint,1,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"`
- FirstTreeSize int64 `protobuf:"varint,2,opt,name=first_tree_size,json=firstTreeSize,proto3" json:"first_tree_size,omitempty"`
- SecondTreeSize int64 `protobuf:"varint,3,opt,name=second_tree_size,json=secondTreeSize,proto3" json:"second_tree_size,omitempty"`
- ChargeTo *ChargeTo `protobuf:"bytes,4,opt,name=charge_to,json=chargeTo,proto3" json:"charge_to,omitempty"`
-}
-
-func (x *GetConsistencyProofRequest) Reset() {
- *x = GetConsistencyProofRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetConsistencyProofRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetConsistencyProofRequest) ProtoMessage() {}
-
-func (x *GetConsistencyProofRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetConsistencyProofRequest.ProtoReflect.Descriptor instead.
-func (*GetConsistencyProofRequest) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *GetConsistencyProofRequest) GetLogId() int64 {
- if x != nil {
- return x.LogId
- }
- return 0
-}
-
-func (x *GetConsistencyProofRequest) GetFirstTreeSize() int64 {
- if x != nil {
- return x.FirstTreeSize
- }
- return 0
-}
-
-func (x *GetConsistencyProofRequest) GetSecondTreeSize() int64 {
- if x != nil {
- return x.SecondTreeSize
- }
- return 0
-}
-
-func (x *GetConsistencyProofRequest) GetChargeTo() *ChargeTo {
- if x != nil {
- return x.ChargeTo
- }
- return nil
-}
-
-type GetConsistencyProofResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The proof field may be empty if the requested tree_size was larger
- // than that available at the server (e.g. because there is skew between
- // server instances, and an earlier client request was processed by a
- // more up-to-date instance). In this case, the signed_log_root
- // field will indicate the tree size that the server is aware of, and
- // the proof field will be empty.
- Proof *Proof `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"`
- SignedLogRoot *SignedLogRoot `protobuf:"bytes,3,opt,name=signed_log_root,json=signedLogRoot,proto3" json:"signed_log_root,omitempty"`
-}
-
-func (x *GetConsistencyProofResponse) Reset() {
- *x = GetConsistencyProofResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetConsistencyProofResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetConsistencyProofResponse) ProtoMessage() {}
-
-func (x *GetConsistencyProofResponse) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetConsistencyProofResponse.ProtoReflect.Descriptor instead.
-func (*GetConsistencyProofResponse) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *GetConsistencyProofResponse) GetProof() *Proof {
- if x != nil {
- return x.Proof
- }
- return nil
-}
-
-func (x *GetConsistencyProofResponse) GetSignedLogRoot() *SignedLogRoot {
- if x != nil {
- return x.SignedLogRoot
- }
- return nil
-}
-
-type GetLatestSignedLogRootRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- LogId int64 `protobuf:"varint,1,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"`
- ChargeTo *ChargeTo `protobuf:"bytes,2,opt,name=charge_to,json=chargeTo,proto3" json:"charge_to,omitempty"`
- // If first_tree_size is non-zero, the response will include a consistency
- // proof between first_tree_size and the new tree size (if not smaller).
- FirstTreeSize int64 `protobuf:"varint,3,opt,name=first_tree_size,json=firstTreeSize,proto3" json:"first_tree_size,omitempty"`
-}
-
-func (x *GetLatestSignedLogRootRequest) Reset() {
- *x = GetLatestSignedLogRootRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetLatestSignedLogRootRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetLatestSignedLogRootRequest) ProtoMessage() {}
-
-func (x *GetLatestSignedLogRootRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetLatestSignedLogRootRequest.ProtoReflect.Descriptor instead.
-func (*GetLatestSignedLogRootRequest) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *GetLatestSignedLogRootRequest) GetLogId() int64 {
- if x != nil {
- return x.LogId
- }
- return 0
-}
-
-func (x *GetLatestSignedLogRootRequest) GetChargeTo() *ChargeTo {
- if x != nil {
- return x.ChargeTo
- }
- return nil
-}
-
-func (x *GetLatestSignedLogRootRequest) GetFirstTreeSize() int64 {
- if x != nil {
- return x.FirstTreeSize
- }
- return 0
-}
-
-type GetLatestSignedLogRootResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- SignedLogRoot *SignedLogRoot `protobuf:"bytes,2,opt,name=signed_log_root,json=signedLogRoot,proto3" json:"signed_log_root,omitempty"`
- // proof is filled in with a consistency proof if first_tree_size in
- // GetLatestSignedLogRootRequest is non-zero (and within the tree size
- // available at the server).
- Proof *Proof `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof,omitempty"`
-}
-
-func (x *GetLatestSignedLogRootResponse) Reset() {
- *x = GetLatestSignedLogRootResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetLatestSignedLogRootResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetLatestSignedLogRootResponse) ProtoMessage() {}
-
-func (x *GetLatestSignedLogRootResponse) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetLatestSignedLogRootResponse.ProtoReflect.Descriptor instead.
-func (*GetLatestSignedLogRootResponse) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *GetLatestSignedLogRootResponse) GetSignedLogRoot() *SignedLogRoot {
- if x != nil {
- return x.SignedLogRoot
- }
- return nil
-}
-
-func (x *GetLatestSignedLogRootResponse) GetProof() *Proof {
- if x != nil {
- return x.Proof
- }
- return nil
-}
-
-type GetEntryAndProofRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- LogId int64 `protobuf:"varint,1,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"`
- LeafIndex int64 `protobuf:"varint,2,opt,name=leaf_index,json=leafIndex,proto3" json:"leaf_index,omitempty"`
- TreeSize int64 `protobuf:"varint,3,opt,name=tree_size,json=treeSize,proto3" json:"tree_size,omitempty"`
- ChargeTo *ChargeTo `protobuf:"bytes,4,opt,name=charge_to,json=chargeTo,proto3" json:"charge_to,omitempty"`
-}
-
-func (x *GetEntryAndProofRequest) Reset() {
- *x = GetEntryAndProofRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetEntryAndProofRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetEntryAndProofRequest) ProtoMessage() {}
-
-func (x *GetEntryAndProofRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetEntryAndProofRequest.ProtoReflect.Descriptor instead.
-func (*GetEntryAndProofRequest) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *GetEntryAndProofRequest) GetLogId() int64 {
- if x != nil {
- return x.LogId
- }
- return 0
-}
-
-func (x *GetEntryAndProofRequest) GetLeafIndex() int64 {
- if x != nil {
- return x.LeafIndex
- }
- return 0
-}
-
-func (x *GetEntryAndProofRequest) GetTreeSize() int64 {
- if x != nil {
- return x.TreeSize
- }
- return 0
-}
-
-func (x *GetEntryAndProofRequest) GetChargeTo() *ChargeTo {
- if x != nil {
- return x.ChargeTo
- }
- return nil
-}
-
-type GetEntryAndProofResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Proof *Proof `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"`
- Leaf *LogLeaf `protobuf:"bytes,3,opt,name=leaf,proto3" json:"leaf,omitempty"`
- SignedLogRoot *SignedLogRoot `protobuf:"bytes,4,opt,name=signed_log_root,json=signedLogRoot,proto3" json:"signed_log_root,omitempty"`
-}
-
-func (x *GetEntryAndProofResponse) Reset() {
- *x = GetEntryAndProofResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetEntryAndProofResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetEntryAndProofResponse) ProtoMessage() {}
-
-func (x *GetEntryAndProofResponse) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetEntryAndProofResponse.ProtoReflect.Descriptor instead.
-func (*GetEntryAndProofResponse) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{12}
-}
-
-func (x *GetEntryAndProofResponse) GetProof() *Proof {
- if x != nil {
- return x.Proof
- }
- return nil
-}
-
-func (x *GetEntryAndProofResponse) GetLeaf() *LogLeaf {
- if x != nil {
- return x.Leaf
- }
- return nil
-}
-
-func (x *GetEntryAndProofResponse) GetSignedLogRoot() *SignedLogRoot {
- if x != nil {
- return x.SignedLogRoot
- }
- return nil
-}
-
-type InitLogRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- LogId int64 `protobuf:"varint,1,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"`
- ChargeTo *ChargeTo `protobuf:"bytes,2,opt,name=charge_to,json=chargeTo,proto3" json:"charge_to,omitempty"`
-}
-
-func (x *InitLogRequest) Reset() {
- *x = InitLogRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *InitLogRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*InitLogRequest) ProtoMessage() {}
-
-func (x *InitLogRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use InitLogRequest.ProtoReflect.Descriptor instead.
-func (*InitLogRequest) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{13}
-}
-
-func (x *InitLogRequest) GetLogId() int64 {
- if x != nil {
- return x.LogId
- }
- return 0
-}
-
-func (x *InitLogRequest) GetChargeTo() *ChargeTo {
- if x != nil {
- return x.ChargeTo
- }
- return nil
-}
-
-type InitLogResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Created *SignedLogRoot `protobuf:"bytes,1,opt,name=created,proto3" json:"created,omitempty"`
-}
-
-func (x *InitLogResponse) Reset() {
- *x = InitLogResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *InitLogResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*InitLogResponse) ProtoMessage() {}
-
-func (x *InitLogResponse) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use InitLogResponse.ProtoReflect.Descriptor instead.
-func (*InitLogResponse) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{14}
-}
-
-func (x *InitLogResponse) GetCreated() *SignedLogRoot {
- if x != nil {
- return x.Created
- }
- return nil
-}
-
-type AddSequencedLeavesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- LogId int64 `protobuf:"varint,1,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"`
- Leaves []*LogLeaf `protobuf:"bytes,2,rep,name=leaves,proto3" json:"leaves,omitempty"`
- ChargeTo *ChargeTo `protobuf:"bytes,4,opt,name=charge_to,json=chargeTo,proto3" json:"charge_to,omitempty"`
-}
-
-func (x *AddSequencedLeavesRequest) Reset() {
- *x = AddSequencedLeavesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddSequencedLeavesRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddSequencedLeavesRequest) ProtoMessage() {}
-
-func (x *AddSequencedLeavesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddSequencedLeavesRequest.ProtoReflect.Descriptor instead.
-func (*AddSequencedLeavesRequest) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{15}
-}
-
-func (x *AddSequencedLeavesRequest) GetLogId() int64 {
- if x != nil {
- return x.LogId
- }
- return 0
-}
-
-func (x *AddSequencedLeavesRequest) GetLeaves() []*LogLeaf {
- if x != nil {
- return x.Leaves
- }
- return nil
-}
-
-func (x *AddSequencedLeavesRequest) GetChargeTo() *ChargeTo {
- if x != nil {
- return x.ChargeTo
- }
- return nil
-}
-
-type AddSequencedLeavesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Same number and order as in the corresponding request.
- Results []*QueuedLogLeaf `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
-}
-
-func (x *AddSequencedLeavesResponse) Reset() {
- *x = AddSequencedLeavesResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddSequencedLeavesResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddSequencedLeavesResponse) ProtoMessage() {}
-
-func (x *AddSequencedLeavesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddSequencedLeavesResponse.ProtoReflect.Descriptor instead.
-func (*AddSequencedLeavesResponse) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{16}
-}
-
-func (x *AddSequencedLeavesResponse) GetResults() []*QueuedLogLeaf {
- if x != nil {
- return x.Results
- }
- return nil
-}
-
-type GetLeavesByRangeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- LogId int64 `protobuf:"varint,1,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"`
- StartIndex int64 `protobuf:"varint,2,opt,name=start_index,json=startIndex,proto3" json:"start_index,omitempty"`
- Count int64 `protobuf:"varint,3,opt,name=count,proto3" json:"count,omitempty"`
- ChargeTo *ChargeTo `protobuf:"bytes,4,opt,name=charge_to,json=chargeTo,proto3" json:"charge_to,omitempty"`
-}
-
-func (x *GetLeavesByRangeRequest) Reset() {
- *x = GetLeavesByRangeRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetLeavesByRangeRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetLeavesByRangeRequest) ProtoMessage() {}
-
-func (x *GetLeavesByRangeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetLeavesByRangeRequest.ProtoReflect.Descriptor instead.
-func (*GetLeavesByRangeRequest) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{17}
-}
-
-func (x *GetLeavesByRangeRequest) GetLogId() int64 {
- if x != nil {
- return x.LogId
- }
- return 0
-}
-
-func (x *GetLeavesByRangeRequest) GetStartIndex() int64 {
- if x != nil {
- return x.StartIndex
- }
- return 0
-}
-
-func (x *GetLeavesByRangeRequest) GetCount() int64 {
- if x != nil {
- return x.Count
- }
- return 0
-}
-
-func (x *GetLeavesByRangeRequest) GetChargeTo() *ChargeTo {
- if x != nil {
- return x.ChargeTo
- }
- return nil
-}
-
-type GetLeavesByRangeResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Returned log leaves starting from the `start_index` of the request, in
- // order. There may be fewer than `request.count` leaves returned, if the
- // requested range extended beyond the size of the tree or if the server opted
- // to return fewer leaves than requested.
- Leaves []*LogLeaf `protobuf:"bytes,1,rep,name=leaves,proto3" json:"leaves,omitempty"`
- SignedLogRoot *SignedLogRoot `protobuf:"bytes,2,opt,name=signed_log_root,json=signedLogRoot,proto3" json:"signed_log_root,omitempty"`
-}
-
-func (x *GetLeavesByRangeResponse) Reset() {
- *x = GetLeavesByRangeResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetLeavesByRangeResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetLeavesByRangeResponse) ProtoMessage() {}
-
-func (x *GetLeavesByRangeResponse) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetLeavesByRangeResponse.ProtoReflect.Descriptor instead.
-func (*GetLeavesByRangeResponse) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{18}
-}
-
-func (x *GetLeavesByRangeResponse) GetLeaves() []*LogLeaf {
- if x != nil {
- return x.Leaves
- }
- return nil
-}
-
-func (x *GetLeavesByRangeResponse) GetSignedLogRoot() *SignedLogRoot {
- if x != nil {
- return x.SignedLogRoot
- }
- return nil
-}
-
-// QueuedLogLeaf provides the result of submitting an entry to the log.
-// TODO(pavelkalinnikov): Consider renaming it to AddLogLeafResult or the like.
-type QueuedLogLeaf struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The leaf as it was stored by Trillian. Empty unless `status.code` is:
- // - `google.rpc.OK`: the `leaf` data is the same as in the request.
- // - `google.rpc.ALREADY_EXISTS` or 'google.rpc.FAILED_PRECONDITION`: the
- // `leaf` is the conflicting one already in the log.
- Leaf *LogLeaf `protobuf:"bytes,1,opt,name=leaf,proto3" json:"leaf,omitempty"`
- // The status of adding the leaf.
- // - `google.rpc.OK`: successfully added.
- // - `google.rpc.ALREADY_EXISTS`: the leaf is a duplicate of an already
- // existing one. Either `leaf_identity_hash` is the same in the `LOG`
- // mode, or `leaf_index` in the `PREORDERED_LOG`.
- // - `google.rpc.FAILED_PRECONDITION`: A conflicting entry is already
- // present in the log, e.g., same `leaf_index` but different `leaf_data`.
- Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
-}
-
-func (x *QueuedLogLeaf) Reset() {
- *x = QueuedLogLeaf{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *QueuedLogLeaf) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*QueuedLogLeaf) ProtoMessage() {}
-
-func (x *QueuedLogLeaf) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use QueuedLogLeaf.ProtoReflect.Descriptor instead.
-func (*QueuedLogLeaf) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{19}
-}
-
-func (x *QueuedLogLeaf) GetLeaf() *LogLeaf {
- if x != nil {
- return x.Leaf
- }
- return nil
-}
-
-func (x *QueuedLogLeaf) GetStatus() *status.Status {
- if x != nil {
- return x.Status
- }
- return nil
-}
-
-// LogLeaf describes a leaf in the Log's Merkle tree, corresponding to a single log entry.
-// Each leaf has a unique leaf index in the scope of this tree. Clients submitting new
-// leaf entries should only set the following fields:
-// - leaf_value
-// - extra_data (optionally)
-// - leaf_identity_hash (optionally)
-// - leaf_index (iff the log is a PREORDERED_LOG)
-type LogLeaf struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // merkle_leaf_hash holds the Merkle leaf hash over leaf_value. This is
- // calculated by the Trillian server when leaves are added to the tree, using
- // the defined hashing algorithm and strategy for the tree; as such, the client
- // does not need to set it on leaf submissions.
- MerkleLeafHash []byte `protobuf:"bytes,1,opt,name=merkle_leaf_hash,json=merkleLeafHash,proto3" json:"merkle_leaf_hash,omitempty"`
- // leaf_value holds the data that forms the value of the Merkle tree leaf.
- // The client should set this field on all leaf submissions, and is
- // responsible for ensuring its validity (the Trillian server treats it as an
- // opaque blob).
- LeafValue []byte `protobuf:"bytes,2,opt,name=leaf_value,json=leafValue,proto3" json:"leaf_value,omitempty"`
- // extra_data holds additional data associated with the Merkle tree leaf.
- // The client may set this data on leaf submissions, and the Trillian server
- // will return it on subsequent read operations. However, the contents of
- // this field are not covered by and do not affect the Merkle tree hash
- // calculations.
- ExtraData []byte `protobuf:"bytes,3,opt,name=extra_data,json=extraData,proto3" json:"extra_data,omitempty"`
- // leaf_index indicates the index of this leaf in the Merkle tree.
- // This field is returned on all read operations, but should only be
- // set for leaf submissions in PREORDERED_LOG mode (for a normal log
- // the leaf index is assigned by Trillian when the submitted leaf is
- // integrated into the Merkle tree).
- LeafIndex int64 `protobuf:"varint,4,opt,name=leaf_index,json=leafIndex,proto3" json:"leaf_index,omitempty"`
- // leaf_identity_hash provides a hash value that indicates the client's
- // concept of which leaf entries should be considered identical.
- //
- // This mechanism allows the client personality to indicate that two leaves
- // should be considered "duplicates" even though their `leaf_value`s differ.
- //
- // If this is not set on leaf submissions, the Trillian server will take its
- // value to be the same as merkle_leaf_hash (and thus only leaves with
- // identical leaf_value contents will be considered identical).
- //
- // For example, in Certificate Transparency each certificate submission is
- // associated with a submission timestamp, but subsequent submissions of the
- // same certificate should be considered identical. This is achieved
- // by setting the leaf identity hash to a hash over (just) the certificate,
- // whereas the Merkle leaf hash encompasses both the certificate and its
- // submission time -- allowing duplicate certificates to be detected.
- //
- //
- // Continuing the CT example, for a CT mirror personality (which must allow
- // dupes since the source log could contain them), the part of the
- // personality which fetches and submits the entries might set
- // `leaf_identity_hash` to `H(leaf_index||cert)`.
- //
- // TODO(pavelkalinnikov): Consider instead using `H(cert)` and allowing
- // identity hash dupes in `PREORDERED_LOG` mode, for it can later be
- // upgraded to `LOG` which will need to correctly detect duplicates with
- // older entries when new ones get queued.
- LeafIdentityHash []byte `protobuf:"bytes,5,opt,name=leaf_identity_hash,json=leafIdentityHash,proto3" json:"leaf_identity_hash,omitempty"`
- // queue_timestamp holds the time at which this leaf was queued for
- // inclusion in the Log, or zero if the entry was submitted without
- // queuing. Clients should not set this field on submissions.
- QueueTimestamp *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=queue_timestamp,json=queueTimestamp,proto3" json:"queue_timestamp,omitempty"`
- // integrate_timestamp holds the time at which this leaf was integrated into
- // the tree. Clients should not set this field on submissions.
- IntegrateTimestamp *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=integrate_timestamp,json=integrateTimestamp,proto3" json:"integrate_timestamp,omitempty"`
-}
-
-func (x *LogLeaf) Reset() {
- *x = LogLeaf{}
- if protoimpl.UnsafeEnabled {
- mi := &file_trillian_log_api_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *LogLeaf) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*LogLeaf) ProtoMessage() {}
-
-func (x *LogLeaf) ProtoReflect() protoreflect.Message {
- mi := &file_trillian_log_api_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use LogLeaf.ProtoReflect.Descriptor instead.
-func (*LogLeaf) Descriptor() ([]byte, []int) {
- return file_trillian_log_api_proto_rawDescGZIP(), []int{20}
-}
-
-func (x *LogLeaf) GetMerkleLeafHash() []byte {
- if x != nil {
- return x.MerkleLeafHash
- }
- return nil
-}
-
-func (x *LogLeaf) GetLeafValue() []byte {
- if x != nil {
- return x.LeafValue
- }
- return nil
-}
-
-func (x *LogLeaf) GetExtraData() []byte {
- if x != nil {
- return x.ExtraData
- }
- return nil
-}
-
-func (x *LogLeaf) GetLeafIndex() int64 {
- if x != nil {
- return x.LeafIndex
- }
- return 0
-}
-
-func (x *LogLeaf) GetLeafIdentityHash() []byte {
- if x != nil {
- return x.LeafIdentityHash
- }
- return nil
-}
-
-func (x *LogLeaf) GetQueueTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.QueueTimestamp
- }
- return nil
-}
-
-func (x *LogLeaf) GetIntegrateTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.IntegrateTimestamp
- }
- return nil
-}
-
-var File_trillian_log_api_proto protoreflect.FileDescriptor
-
-var file_trillian_log_api_proto_rawDesc = []byte{
- 0x0a, 0x16, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x61,
- 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69,
- 0x61, 0x6e, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f,
- 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x74, 0x72,
- 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x1e, 0x0a, 0x08,
- 0x43, 0x68, 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x22, 0x81, 0x01, 0x0a,
- 0x10, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x04, 0x6c, 0x65, 0x61, 0x66,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61,
- 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x61, 0x66, 0x52, 0x04, 0x6c, 0x65, 0x61, 0x66, 0x12,
- 0x2f, 0x0a, 0x09, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x43, 0x68,
- 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x52, 0x08, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f,
- 0x22, 0x4d, 0x0a, 0x11, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f,
- 0x6c, 0x65, 0x61, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x72, 0x69,
- 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x4c,
- 0x65, 0x61, 0x66, 0x52, 0x0a, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x4c, 0x65, 0x61, 0x66, 0x22,
- 0x9e, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e,
- 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06,
- 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x6f,
- 0x67, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65,
- 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6c, 0x65, 0x61, 0x66, 0x49, 0x6e, 0x64,
- 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x72, 0x65, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12,
- 0x2f, 0x0a, 0x09, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x43, 0x68,
- 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x52, 0x08, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f,
- 0x22, 0x83, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f,
- 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25,
- 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
- 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x05,
- 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x3f, 0x0a, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f,
- 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17,
- 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64,
- 0x4c, 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x0d, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c,
- 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x22, 0xce, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x49, 0x6e,
- 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x79, 0x48, 0x61,
- 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x6f, 0x67,
- 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64,
- 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6c, 0x65, 0x61, 0x66, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1b, 0x0a,
- 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x08, 0x74, 0x72, 0x65, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x72,
- 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x53, 0x65,
- 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65,
- 0x5f, 0x74, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x72, 0x69, 0x6c,
- 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x43, 0x68, 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x52, 0x08, 0x63,
- 0x68, 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x22, 0x89, 0x01, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x49,
- 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x79, 0x48,
- 0x61, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x70,
- 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x69,
- 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x05, 0x70, 0x72, 0x6f,
- 0x6f, 0x66, 0x12, 0x3f, 0x0a, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67,
- 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x72,
- 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67,
- 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x0d, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52,
- 0x6f, 0x6f, 0x74, 0x22, 0xb6, 0x01, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x73, 0x69,
- 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x03, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x66, 0x69, 0x72,
- 0x73, 0x74, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x03, 0x52, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x54, 0x72, 0x65, 0x65, 0x53, 0x69, 0x7a,
- 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x5f, 0x74, 0x72, 0x65, 0x65,
- 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x65, 0x63,
- 0x6f, 0x6e, 0x64, 0x54, 0x72, 0x65, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63,
- 0x68, 0x61, 0x72, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
- 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x43, 0x68, 0x61, 0x72, 0x67, 0x65,
- 0x54, 0x6f, 0x52, 0x08, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x22, 0x85, 0x01, 0x0a,
- 0x1b, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x50,
- 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05,
- 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72,
- 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x05, 0x70, 0x72,
- 0x6f, 0x6f, 0x66, 0x12, 0x3f, 0x0a, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x6c, 0x6f,
- 0x67, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74,
- 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f,
- 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x0d, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67,
- 0x52, 0x6f, 0x6f, 0x74, 0x22, 0x8f, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65,
- 0x73, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x2f, 0x0a,
- 0x09, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x12, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x43, 0x68, 0x61, 0x72,
- 0x67, 0x65, 0x54, 0x6f, 0x52, 0x08, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x12, 0x26,
- 0x0a, 0x0f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x69, 0x7a,
- 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x54, 0x72,
- 0x65, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4c, 0x61,
- 0x74, 0x65, 0x73, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x6f, 0x6f,
- 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0f, 0x73, 0x69, 0x67,
- 0x6e, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x53, 0x69,
- 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x0d, 0x73, 0x69, 0x67,
- 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x25, 0x0a, 0x05, 0x70, 0x72,
- 0x6f, 0x6f, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x69, 0x6c,
- 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f,
- 0x66, 0x22, 0x9d, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x41, 0x6e,
- 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a,
- 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c,
- 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x69, 0x6e, 0x64,
- 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6c, 0x65, 0x61, 0x66, 0x49, 0x6e,
- 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x72, 0x65, 0x65, 0x53, 0x69, 0x7a, 0x65,
- 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x43,
- 0x68, 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x52, 0x08, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x54,
- 0x6f, 0x22, 0xa9, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x41, 0x6e,
- 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25,
- 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
- 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x05,
- 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x25, 0x0a, 0x04, 0x6c, 0x65, 0x61, 0x66, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x4c,
- 0x6f, 0x67, 0x4c, 0x65, 0x61, 0x66, 0x52, 0x04, 0x6c, 0x65, 0x61, 0x66, 0x12, 0x3f, 0x0a, 0x0f,
- 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e,
- 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x0d,
- 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x22, 0x58, 0x0a,
- 0x0e, 0x49, 0x6e, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x15, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
- 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65,
- 0x5f, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x72, 0x69, 0x6c,
- 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x43, 0x68, 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x52, 0x08, 0x63,
- 0x68, 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x22, 0x44, 0x0a, 0x0f, 0x49, 0x6e, 0x69, 0x74, 0x4c,
- 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x63, 0x72,
- 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x72,
- 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67,
- 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x8e, 0x01,
- 0x0a, 0x19, 0x41, 0x64, 0x64, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x4c, 0x65,
- 0x61, 0x76, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6c,
- 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x6f, 0x67,
- 0x49, 0x64, 0x12, 0x29, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x4c, 0x6f,
- 0x67, 0x4c, 0x65, 0x61, 0x66, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x12, 0x2f, 0x0a,
- 0x09, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x12, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x43, 0x68, 0x61, 0x72,
- 0x67, 0x65, 0x54, 0x6f, 0x52, 0x08, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x22, 0x4f,
- 0x0a, 0x1a, 0x41, 0x64, 0x64, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x4c, 0x65,
- 0x61, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07,
- 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e,
- 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x4c,
- 0x6f, 0x67, 0x4c, 0x65, 0x61, 0x66, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22,
- 0x98, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x42, 0x79, 0x52,
- 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6c,
- 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x6f, 0x67,
- 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65,
- 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x49, 0x6e,
- 0x64, 0x65, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x68, 0x61,
- 0x72, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74,
- 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x43, 0x68, 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f,
- 0x52, 0x08, 0x63, 0x68, 0x61, 0x72, 0x67, 0x65, 0x54, 0x6f, 0x22, 0x86, 0x01, 0x0a, 0x18, 0x47,
- 0x65, 0x74, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x76, 0x65,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69,
- 0x61, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x61, 0x66, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x76,
- 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67,
- 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x72,
- 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67,
- 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x0d, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52,
- 0x6f, 0x6f, 0x74, 0x22, 0x62, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x4c, 0x6f, 0x67,
- 0x4c, 0x65, 0x61, 0x66, 0x12, 0x25, 0x0a, 0x04, 0x6c, 0x65, 0x61, 0x66, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x4c, 0x6f,
- 0x67, 0x4c, 0x65, 0x61, 0x66, 0x52, 0x04, 0x6c, 0x65, 0x61, 0x66, 0x12, 0x2a, 0x0a, 0x06, 0x73,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
- 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x4c,
- 0x65, 0x61, 0x66, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x5f, 0x6c, 0x65,
- 0x61, 0x66, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6d,
- 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1d, 0x0a,
- 0x0a, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0c, 0x52, 0x09, 0x6c, 0x65, 0x61, 0x66, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x0a,
- 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c,
- 0x52, 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x6c,
- 0x65, 0x61, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52,
- 0x09, 0x6c, 0x65, 0x61, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65,
- 0x61, 0x66, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x68, 0x61, 0x73, 0x68,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x49, 0x64, 0x65, 0x6e,
- 0x74, 0x69, 0x74, 0x79, 0x48, 0x61, 0x73, 0x68, 0x12, 0x43, 0x0a, 0x0f, 0x71, 0x75, 0x65, 0x75,
- 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x71,
- 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x4b, 0x0a,
- 0x13, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x12, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74,
- 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x32, 0xdb, 0x06, 0x0a, 0x0b, 0x54,
- 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x4c, 0x6f, 0x67, 0x12, 0x46, 0x0a, 0x09, 0x51, 0x75,
- 0x65, 0x75, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x12, 0x1a, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69,
- 0x61, 0x6e, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x51,
- 0x75, 0x65, 0x75, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69,
- 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x22, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69,
- 0x61, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50,
- 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x74, 0x72,
- 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73,
- 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x00, 0x12, 0x70, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69,
- 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x12, 0x28, 0x2e,
- 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x63, 0x6c,
- 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69,
- 0x61, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50,
- 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x12, 0x64, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x73, 0x69,
- 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x24, 0x2e, 0x74, 0x72,
- 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73,
- 0x74, 0x65, 0x6e, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x47, 0x65, 0x74,
- 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x6f, 0x66,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x16, 0x47, 0x65,
- 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67,
- 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e,
- 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c,
- 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e,
- 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65,
- 0x73, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x47, 0x65, 0x74,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x21, 0x2e,
- 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x22, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x45,
- 0x6e, 0x74, 0x72, 0x79, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x07, 0x49, 0x6e, 0x69, 0x74, 0x4c, 0x6f,
- 0x67, 0x12, 0x18, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x49, 0x6e, 0x69,
- 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x74, 0x72,
- 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x61, 0x0a, 0x12, 0x41, 0x64, 0x64, 0x53,
- 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x12, 0x23,
- 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x65, 0x71,
- 0x75, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x41,
- 0x64, 0x64, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x4c, 0x65, 0x61, 0x76, 0x65,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x47,
- 0x65, 0x74, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
- 0x21, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x65,
- 0x61, 0x76, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x47, 0x65,
- 0x74, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4e, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x13, 0x54, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x4c,
- 0x6f, 0x67, 0x41, 0x70, 0x69, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x1a, 0x67, 0x69,
- 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
- 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_trillian_log_api_proto_rawDescOnce sync.Once
- file_trillian_log_api_proto_rawDescData = file_trillian_log_api_proto_rawDesc
-)
-
-func file_trillian_log_api_proto_rawDescGZIP() []byte {
- file_trillian_log_api_proto_rawDescOnce.Do(func() {
- file_trillian_log_api_proto_rawDescData = protoimpl.X.CompressGZIP(file_trillian_log_api_proto_rawDescData)
- })
- return file_trillian_log_api_proto_rawDescData
-}
-
-var file_trillian_log_api_proto_msgTypes = make([]protoimpl.MessageInfo, 21)
-var file_trillian_log_api_proto_goTypes = []interface{}{
- (*ChargeTo)(nil), // 0: trillian.ChargeTo
- (*QueueLeafRequest)(nil), // 1: trillian.QueueLeafRequest
- (*QueueLeafResponse)(nil), // 2: trillian.QueueLeafResponse
- (*GetInclusionProofRequest)(nil), // 3: trillian.GetInclusionProofRequest
- (*GetInclusionProofResponse)(nil), // 4: trillian.GetInclusionProofResponse
- (*GetInclusionProofByHashRequest)(nil), // 5: trillian.GetInclusionProofByHashRequest
- (*GetInclusionProofByHashResponse)(nil), // 6: trillian.GetInclusionProofByHashResponse
- (*GetConsistencyProofRequest)(nil), // 7: trillian.GetConsistencyProofRequest
- (*GetConsistencyProofResponse)(nil), // 8: trillian.GetConsistencyProofResponse
- (*GetLatestSignedLogRootRequest)(nil), // 9: trillian.GetLatestSignedLogRootRequest
- (*GetLatestSignedLogRootResponse)(nil), // 10: trillian.GetLatestSignedLogRootResponse
- (*GetEntryAndProofRequest)(nil), // 11: trillian.GetEntryAndProofRequest
- (*GetEntryAndProofResponse)(nil), // 12: trillian.GetEntryAndProofResponse
- (*InitLogRequest)(nil), // 13: trillian.InitLogRequest
- (*InitLogResponse)(nil), // 14: trillian.InitLogResponse
- (*AddSequencedLeavesRequest)(nil), // 15: trillian.AddSequencedLeavesRequest
- (*AddSequencedLeavesResponse)(nil), // 16: trillian.AddSequencedLeavesResponse
- (*GetLeavesByRangeRequest)(nil), // 17: trillian.GetLeavesByRangeRequest
- (*GetLeavesByRangeResponse)(nil), // 18: trillian.GetLeavesByRangeResponse
- (*QueuedLogLeaf)(nil), // 19: trillian.QueuedLogLeaf
- (*LogLeaf)(nil), // 20: trillian.LogLeaf
- (*Proof)(nil), // 21: trillian.Proof
- (*SignedLogRoot)(nil), // 22: trillian.SignedLogRoot
- (*status.Status)(nil), // 23: google.rpc.Status
- (*timestamppb.Timestamp)(nil), // 24: google.protobuf.Timestamp
-}
-var file_trillian_log_api_proto_depIdxs = []int32{
- 20, // 0: trillian.QueueLeafRequest.leaf:type_name -> trillian.LogLeaf
- 0, // 1: trillian.QueueLeafRequest.charge_to:type_name -> trillian.ChargeTo
- 19, // 2: trillian.QueueLeafResponse.queued_leaf:type_name -> trillian.QueuedLogLeaf
- 0, // 3: trillian.GetInclusionProofRequest.charge_to:type_name -> trillian.ChargeTo
- 21, // 4: trillian.GetInclusionProofResponse.proof:type_name -> trillian.Proof
- 22, // 5: trillian.GetInclusionProofResponse.signed_log_root:type_name -> trillian.SignedLogRoot
- 0, // 6: trillian.GetInclusionProofByHashRequest.charge_to:type_name -> trillian.ChargeTo
- 21, // 7: trillian.GetInclusionProofByHashResponse.proof:type_name -> trillian.Proof
- 22, // 8: trillian.GetInclusionProofByHashResponse.signed_log_root:type_name -> trillian.SignedLogRoot
- 0, // 9: trillian.GetConsistencyProofRequest.charge_to:type_name -> trillian.ChargeTo
- 21, // 10: trillian.GetConsistencyProofResponse.proof:type_name -> trillian.Proof
- 22, // 11: trillian.GetConsistencyProofResponse.signed_log_root:type_name -> trillian.SignedLogRoot
- 0, // 12: trillian.GetLatestSignedLogRootRequest.charge_to:type_name -> trillian.ChargeTo
- 22, // 13: trillian.GetLatestSignedLogRootResponse.signed_log_root:type_name -> trillian.SignedLogRoot
- 21, // 14: trillian.GetLatestSignedLogRootResponse.proof:type_name -> trillian.Proof
- 0, // 15: trillian.GetEntryAndProofRequest.charge_to:type_name -> trillian.ChargeTo
- 21, // 16: trillian.GetEntryAndProofResponse.proof:type_name -> trillian.Proof
- 20, // 17: trillian.GetEntryAndProofResponse.leaf:type_name -> trillian.LogLeaf
- 22, // 18: trillian.GetEntryAndProofResponse.signed_log_root:type_name -> trillian.SignedLogRoot
- 0, // 19: trillian.InitLogRequest.charge_to:type_name -> trillian.ChargeTo
- 22, // 20: trillian.InitLogResponse.created:type_name -> trillian.SignedLogRoot
- 20, // 21: trillian.AddSequencedLeavesRequest.leaves:type_name -> trillian.LogLeaf
- 0, // 22: trillian.AddSequencedLeavesRequest.charge_to:type_name -> trillian.ChargeTo
- 19, // 23: trillian.AddSequencedLeavesResponse.results:type_name -> trillian.QueuedLogLeaf
- 0, // 24: trillian.GetLeavesByRangeRequest.charge_to:type_name -> trillian.ChargeTo
- 20, // 25: trillian.GetLeavesByRangeResponse.leaves:type_name -> trillian.LogLeaf
- 22, // 26: trillian.GetLeavesByRangeResponse.signed_log_root:type_name -> trillian.SignedLogRoot
- 20, // 27: trillian.QueuedLogLeaf.leaf:type_name -> trillian.LogLeaf
- 23, // 28: trillian.QueuedLogLeaf.status:type_name -> google.rpc.Status
- 24, // 29: trillian.LogLeaf.queue_timestamp:type_name -> google.protobuf.Timestamp
- 24, // 30: trillian.LogLeaf.integrate_timestamp:type_name -> google.protobuf.Timestamp
- 1, // 31: trillian.TrillianLog.QueueLeaf:input_type -> trillian.QueueLeafRequest
- 3, // 32: trillian.TrillianLog.GetInclusionProof:input_type -> trillian.GetInclusionProofRequest
- 5, // 33: trillian.TrillianLog.GetInclusionProofByHash:input_type -> trillian.GetInclusionProofByHashRequest
- 7, // 34: trillian.TrillianLog.GetConsistencyProof:input_type -> trillian.GetConsistencyProofRequest
- 9, // 35: trillian.TrillianLog.GetLatestSignedLogRoot:input_type -> trillian.GetLatestSignedLogRootRequest
- 11, // 36: trillian.TrillianLog.GetEntryAndProof:input_type -> trillian.GetEntryAndProofRequest
- 13, // 37: trillian.TrillianLog.InitLog:input_type -> trillian.InitLogRequest
- 15, // 38: trillian.TrillianLog.AddSequencedLeaves:input_type -> trillian.AddSequencedLeavesRequest
- 17, // 39: trillian.TrillianLog.GetLeavesByRange:input_type -> trillian.GetLeavesByRangeRequest
- 2, // 40: trillian.TrillianLog.QueueLeaf:output_type -> trillian.QueueLeafResponse
- 4, // 41: trillian.TrillianLog.GetInclusionProof:output_type -> trillian.GetInclusionProofResponse
- 6, // 42: trillian.TrillianLog.GetInclusionProofByHash:output_type -> trillian.GetInclusionProofByHashResponse
- 8, // 43: trillian.TrillianLog.GetConsistencyProof:output_type -> trillian.GetConsistencyProofResponse
- 10, // 44: trillian.TrillianLog.GetLatestSignedLogRoot:output_type -> trillian.GetLatestSignedLogRootResponse
- 12, // 45: trillian.TrillianLog.GetEntryAndProof:output_type -> trillian.GetEntryAndProofResponse
- 14, // 46: trillian.TrillianLog.InitLog:output_type -> trillian.InitLogResponse
- 16, // 47: trillian.TrillianLog.AddSequencedLeaves:output_type -> trillian.AddSequencedLeavesResponse
- 18, // 48: trillian.TrillianLog.GetLeavesByRange:output_type -> trillian.GetLeavesByRangeResponse
- 40, // [40:49] is the sub-list for method output_type
- 31, // [31:40] is the sub-list for method input_type
- 31, // [31:31] is the sub-list for extension type_name
- 31, // [31:31] is the sub-list for extension extendee
- 0, // [0:31] is the sub-list for field type_name
-}
-
-func init() { file_trillian_log_api_proto_init() }
-func file_trillian_log_api_proto_init() {
- if File_trillian_log_api_proto != nil {
- return
- }
- file_trillian_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_trillian_log_api_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ChargeTo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*QueueLeafRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*QueueLeafResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetInclusionProofRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetInclusionProofResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetInclusionProofByHashRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetInclusionProofByHashResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetConsistencyProofRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetConsistencyProofResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetLatestSignedLogRootRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetLatestSignedLogRootResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetEntryAndProofRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetEntryAndProofResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*InitLogRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*InitLogResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddSequencedLeavesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddSequencedLeavesResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetLeavesByRangeRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetLeavesByRangeResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*QueuedLogLeaf); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_trillian_log_api_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LogLeaf); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_trillian_log_api_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 21,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_trillian_log_api_proto_goTypes,
- DependencyIndexes: file_trillian_log_api_proto_depIdxs,
- MessageInfos: file_trillian_log_api_proto_msgTypes,
- }.Build()
- File_trillian_log_api_proto = out.File
- file_trillian_log_api_proto_rawDesc = nil
- file_trillian_log_api_proto_goTypes = nil
- file_trillian_log_api_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/trillian/trillian_log_api.proto b/vendor/github.com/google/trillian/trillian_log_api.proto
deleted file mode 100644
index 74cbb96c3..000000000
--- a/vendor/github.com/google/trillian/trillian_log_api.proto
+++ /dev/null
@@ -1,363 +0,0 @@
-// Copyright 2016 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package trillian;
-
-option go_package = "github.com/google/trillian";
-option java_multiple_files = true;
-option java_outer_classname = "TrillianLogApiProto";
-option java_package = "com.google.trillian.proto";
-
-import "google/protobuf/timestamp.proto";
-import "google/rpc/status.proto";
-import "trillian.proto";
-
-// The TrillianLog service provides access to an append-only Log data structure
-// as described in the [Verifiable Data
-// Structures](docs/papers/VerifiableDataStructures.pdf) paper.
-//
-// The API supports adding new entries to the Merkle tree for a specific Log
-// instance (identified by its log_id) in two modes:
-// - For a normal log, new leaf entries are queued up for subsequent
-// inclusion in the log, and the leaves are assigned consecutive leaf_index
-// values as part of that integration process.
-// - For a 'pre-ordered log', new entries have an already-defined leaf
-// ordering, and leaves are only integrated into the Merkle tree when a
-// contiguous range of leaves is available.
-//
-// The API also supports read operations to retrieve leaf contents, and to
-// provide cryptographic proofs of leaf inclusion and of the append-only nature
-// of the Log.
-//
-// Each API request also includes a charge_to field, which allows API users
-// to provide quota identifiers that should be "charged" for each API request
-// (and potentially rejected with codes.ResourceExhausted).
-//
-// Various operations on the API also allows for 'server skew', which can occur
-// when different API requests happen to be handled by different server instances
-// that may not all be up to date. An API request that is relative to a specific
-// tree size may reach a server instance that is not yet aware of this tree size;
-// in this case the server will typically return an OK response that contains:
-// - a signed log root that indicates the tree size that it is aware of
-// - an empty response otherwise.
-service TrillianLog {
- // QueueLeaf adds a single leaf to the queue of pending leaves for a normal
- // log.
- rpc QueueLeaf(QueueLeafRequest) returns (QueueLeafResponse) {}
-
- // GetInclusionProof returns an inclusion proof for a leaf with a given index
- // in a particular tree.
- //
- // If the requested tree_size is larger than the server is aware of, the
- // response will include the latest known log root and an empty proof.
- rpc GetInclusionProof(GetInclusionProofRequest)
- returns (GetInclusionProofResponse) {}
-
- // GetInclusionProofByHash returns an inclusion proof for any leaves that have
- // the given Merkle hash in a particular tree.
- //
- // If any of the leaves that match the given Merkle has have a leaf index that
- // is beyond the requested tree size, the corresponding proof entry will be empty.
- rpc GetInclusionProofByHash(GetInclusionProofByHashRequest)
- returns (GetInclusionProofByHashResponse) {}
-
- // GetConsistencyProof returns a consistency proof between different sizes of
- // a particular tree.
- //
- // If the requested tree size is larger than the server is aware of,
- // the response will include the latest known log root and an empty proof.
- rpc GetConsistencyProof(GetConsistencyProofRequest)
- returns (GetConsistencyProofResponse) {}
-
- // GetLatestSignedLogRoot returns the latest log root for a given tree,
- // and optionally also includes a consistency proof from an earlier tree size
- // to the new size of the tree.
- //
- // If the earlier tree size is larger than the server is aware of,
- // an InvalidArgument error is returned.
- rpc GetLatestSignedLogRoot(GetLatestSignedLogRootRequest)
- returns (GetLatestSignedLogRootResponse) {}
-
- // GetEntryAndProof returns a log leaf and the corresponding inclusion proof
- // to a specified tree size, for a given leaf index in a particular tree.
- //
- // If the requested tree size is unavailable but the leaf is
- // in scope for the current tree, the returned proof will be for the
- // current tree size rather than the requested tree size.
- rpc GetEntryAndProof(GetEntryAndProofRequest)
- returns (GetEntryAndProofResponse) {}
-
- // InitLog initializes a particular tree, creating the initial signed log
- // root (which will be of size 0).
- rpc InitLog(InitLogRequest) returns (InitLogResponse) {}
-
-
- // AddSequencedLeaves adds a batch of leaves with assigned sequence numbers
- // to a pre-ordered log. The indices of the provided leaves must be contiguous.
- rpc AddSequencedLeaves(AddSequencedLeavesRequest)
- returns (AddSequencedLeavesResponse) {}
-
- // GetLeavesByRange returns a batch of leaves whose leaf indices are in a
- // sequential range.
- rpc GetLeavesByRange(GetLeavesByRangeRequest)
- returns (GetLeavesByRangeResponse) {}
-}
-
-// ChargeTo describes the user(s) associated with the request whose quota should
-// be checked and charged.
-message ChargeTo {
- // user is a list of personality-defined strings.
- // Trillian will treat them as /User/%{user}/... keys when checking and
- // charging quota.
- // If one or more of the specified users has insufficient quota, the
- // request will be denied.
- //
- // As an example, a Certificate Transparency frontend might set the following
- // user strings when sending a QueueLeaf request to the Trillian log:
- // - The requesting IP address.
- // This would limit the number of requests per IP.
- // - The "intermediate-<hash>" for each of the intermediate certificates in
- // the submitted chain.
- // This would have the effect of limiting the rate of submissions under
- // a given intermediate/root.
- repeated string user = 1;
-}
-
-message QueueLeafRequest {
- int64 log_id = 1;
- LogLeaf leaf = 2;
- ChargeTo charge_to = 3;
-}
-
-message QueueLeafResponse {
- // queued_leaf describes the leaf which is or will be incorporated into the
- // Log. If the submitted leaf was already present in the Log (as indicated by
- // its leaf identity hash), then the returned leaf will be the pre-existing
- // leaf entry rather than the submitted leaf.
- QueuedLogLeaf queued_leaf = 2;
-}
-
-message GetInclusionProofRequest {
- int64 log_id = 1;
- int64 leaf_index = 2;
- int64 tree_size = 3;
- ChargeTo charge_to = 4;
-}
-
-message GetInclusionProofResponse {
- // The proof field may be empty if the requested tree_size was larger
- // than that available at the server (e.g. because there is skew between
- // server instances, and an earlier client request was processed by a
- // more up-to-date instance). In this case, the signed_log_root
- // field will indicate the tree size that the server is aware of, and
- // the proof field will be empty.
- Proof proof = 2;
- SignedLogRoot signed_log_root = 3;
-}
-
-message GetInclusionProofByHashRequest {
- int64 log_id = 1;
- // The leaf hash field provides the Merkle tree hash of the leaf entry
- // to be retrieved.
- bytes leaf_hash = 2;
- int64 tree_size = 3;
- bool order_by_sequence = 4;
- ChargeTo charge_to = 5;
-}
-
-message GetInclusionProofByHashResponse {
- // Logs can potentially contain leaves with duplicate hashes so it's possible
- // for this to return multiple proofs. If the leaf index for a particular
- // instance of the requested Merkle leaf hash is beyond the requested tree
- // size, the corresponding proof entry will be missing.
- repeated Proof proof = 2;
- SignedLogRoot signed_log_root = 3;
-}
-
-message GetConsistencyProofRequest {
- int64 log_id = 1;
- int64 first_tree_size = 2;
- int64 second_tree_size = 3;
- ChargeTo charge_to = 4;
-}
-
-message GetConsistencyProofResponse {
- // The proof field may be empty if the requested tree_size was larger
- // than that available at the server (e.g. because there is skew between
- // server instances, and an earlier client request was processed by a
- // more up-to-date instance). In this case, the signed_log_root
- // field will indicate the tree size that the server is aware of, and
- // the proof field will be empty.
- Proof proof = 2;
- SignedLogRoot signed_log_root = 3;
-}
-
-message GetLatestSignedLogRootRequest {
- int64 log_id = 1;
- ChargeTo charge_to = 2;
- // If first_tree_size is non-zero, the response will include a consistency
- // proof between first_tree_size and the new tree size (if not smaller).
- int64 first_tree_size = 3;
-}
-
-message GetLatestSignedLogRootResponse {
- SignedLogRoot signed_log_root = 2;
- // proof is filled in with a consistency proof if first_tree_size in
- // GetLatestSignedLogRootRequest is non-zero (and within the tree size
- // available at the server).
- Proof proof = 3;
-}
-
-message GetEntryAndProofRequest {
- int64 log_id = 1;
- int64 leaf_index = 2;
- int64 tree_size = 3;
- ChargeTo charge_to = 4;
-}
-
-message GetEntryAndProofResponse {
- Proof proof = 2;
- LogLeaf leaf = 3;
- SignedLogRoot signed_log_root = 4;
-}
-
-message InitLogRequest {
- int64 log_id = 1;
- ChargeTo charge_to = 2;
-}
-
-message InitLogResponse {
- SignedLogRoot created = 1;
-}
-
-message AddSequencedLeavesRequest {
- int64 log_id = 1;
- repeated LogLeaf leaves = 2;
- ChargeTo charge_to = 4;
-}
-
-message AddSequencedLeavesResponse {
- // Same number and order as in the corresponding request.
- repeated QueuedLogLeaf results = 2;
-}
-
-message GetLeavesByRangeRequest {
- int64 log_id = 1;
- int64 start_index = 2;
- int64 count = 3;
- ChargeTo charge_to = 4;
-}
-
-message GetLeavesByRangeResponse {
- // Returned log leaves starting from the `start_index` of the request, in
- // order. There may be fewer than `request.count` leaves returned, if the
- // requested range extended beyond the size of the tree or if the server opted
- // to return fewer leaves than requested.
- repeated LogLeaf leaves = 1;
- SignedLogRoot signed_log_root = 2;
-}
-
-// QueuedLogLeaf provides the result of submitting an entry to the log.
-// TODO(pavelkalinnikov): Consider renaming it to AddLogLeafResult or the like.
-message QueuedLogLeaf {
- // The leaf as it was stored by Trillian. Empty unless `status.code` is:
- // - `google.rpc.OK`: the `leaf` data is the same as in the request.
- // - `google.rpc.ALREADY_EXISTS` or 'google.rpc.FAILED_PRECONDITION`: the
- // `leaf` is the conflicting one already in the log.
- LogLeaf leaf = 1;
-
- // The status of adding the leaf.
- // - `google.rpc.OK`: successfully added.
- // - `google.rpc.ALREADY_EXISTS`: the leaf is a duplicate of an already
- // existing one. Either `leaf_identity_hash` is the same in the `LOG`
- // mode, or `leaf_index` in the `PREORDERED_LOG`.
- // - `google.rpc.FAILED_PRECONDITION`: A conflicting entry is already
- // present in the log, e.g., same `leaf_index` but different `leaf_data`.
- google.rpc.Status status = 2;
-}
-
-// LogLeaf describes a leaf in the Log's Merkle tree, corresponding to a single log entry.
-// Each leaf has a unique leaf index in the scope of this tree. Clients submitting new
-// leaf entries should only set the following fields:
-// - leaf_value
-// - extra_data (optionally)
-// - leaf_identity_hash (optionally)
-// - leaf_index (iff the log is a PREORDERED_LOG)
-message LogLeaf {
- // merkle_leaf_hash holds the Merkle leaf hash over leaf_value. This is
- // calculated by the Trillian server when leaves are added to the tree, using
- // the defined hashing algorithm and strategy for the tree; as such, the client
- // does not need to set it on leaf submissions.
- bytes merkle_leaf_hash = 1;
-
- // leaf_value holds the data that forms the value of the Merkle tree leaf.
- // The client should set this field on all leaf submissions, and is
- // responsible for ensuring its validity (the Trillian server treats it as an
- // opaque blob).
- bytes leaf_value = 2;
-
- // extra_data holds additional data associated with the Merkle tree leaf.
- // The client may set this data on leaf submissions, and the Trillian server
- // will return it on subsequent read operations. However, the contents of
- // this field are not covered by and do not affect the Merkle tree hash
- // calculations.
- bytes extra_data = 3;
-
- // leaf_index indicates the index of this leaf in the Merkle tree.
- // This field is returned on all read operations, but should only be
- // set for leaf submissions in PREORDERED_LOG mode (for a normal log
- // the leaf index is assigned by Trillian when the submitted leaf is
- // integrated into the Merkle tree).
- int64 leaf_index = 4;
-
- // leaf_identity_hash provides a hash value that indicates the client's
- // concept of which leaf entries should be considered identical.
- //
- // This mechanism allows the client personality to indicate that two leaves
- // should be considered "duplicates" even though their `leaf_value`s differ.
- //
- // If this is not set on leaf submissions, the Trillian server will take its
- // value to be the same as merkle_leaf_hash (and thus only leaves with
- // identical leaf_value contents will be considered identical).
- //
- // For example, in Certificate Transparency each certificate submission is
- // associated with a submission timestamp, but subsequent submissions of the
- // same certificate should be considered identical. This is achieved
- // by setting the leaf identity hash to a hash over (just) the certificate,
- // whereas the Merkle leaf hash encompasses both the certificate and its
- // submission time -- allowing duplicate certificates to be detected.
- //
- //
- // Continuing the CT example, for a CT mirror personality (which must allow
- // dupes since the source log could contain them), the part of the
- // personality which fetches and submits the entries might set
- // `leaf_identity_hash` to `H(leaf_index||cert)`.
- //
- // TODO(pavelkalinnikov): Consider instead using `H(cert)` and allowing
- // identity hash dupes in `PREORDERED_LOG` mode, for it can later be
- // upgraded to `LOG` which will need to correctly detect duplicates with
- // older entries when new ones get queued.
- bytes leaf_identity_hash = 5;
-
- // queue_timestamp holds the time at which this leaf was queued for
- // inclusion in the Log, or zero if the entry was submitted without
- // queuing. Clients should not set this field on submissions.
- google.protobuf.Timestamp queue_timestamp = 6;
-
- // integrate_timestamp holds the time at which this leaf was integrated into
- // the tree. Clients should not set this field on submissions.
- google.protobuf.Timestamp integrate_timestamp = 7;
-}
diff --git a/vendor/github.com/google/trillian/trillian_log_api_grpc.pb.go b/vendor/github.com/google/trillian/trillian_log_api_grpc.pb.go
deleted file mode 100644
index 32e2ff8b3..000000000
--- a/vendor/github.com/google/trillian/trillian_log_api_grpc.pb.go
+++ /dev/null
@@ -1,461 +0,0 @@
-// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
-// versions:
-// - protoc-gen-go-grpc v1.2.0
-// - protoc v3.20.1
-// source: trillian_log_api.proto
-
-package trillian
-
-import (
- context "context"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
-
-// TrillianLogClient is the client API for TrillianLog service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type TrillianLogClient interface {
- // QueueLeaf adds a single leaf to the queue of pending leaves for a normal
- // log.
- QueueLeaf(ctx context.Context, in *QueueLeafRequest, opts ...grpc.CallOption) (*QueueLeafResponse, error)
- // GetInclusionProof returns an inclusion proof for a leaf with a given index
- // in a particular tree.
- //
- // If the requested tree_size is larger than the server is aware of, the
- // response will include the latest known log root and an empty proof.
- GetInclusionProof(ctx context.Context, in *GetInclusionProofRequest, opts ...grpc.CallOption) (*GetInclusionProofResponse, error)
- // GetInclusionProofByHash returns an inclusion proof for any leaves that have
- // the given Merkle hash in a particular tree.
- //
- // If any of the leaves that match the given Merkle has have a leaf index that
- // is beyond the requested tree size, the corresponding proof entry will be empty.
- GetInclusionProofByHash(ctx context.Context, in *GetInclusionProofByHashRequest, opts ...grpc.CallOption) (*GetInclusionProofByHashResponse, error)
- // GetConsistencyProof returns a consistency proof between different sizes of
- // a particular tree.
- //
- // If the requested tree size is larger than the server is aware of,
- // the response will include the latest known log root and an empty proof.
- GetConsistencyProof(ctx context.Context, in *GetConsistencyProofRequest, opts ...grpc.CallOption) (*GetConsistencyProofResponse, error)
- // GetLatestSignedLogRoot returns the latest log root for a given tree,
- // and optionally also includes a consistency proof from an earlier tree size
- // to the new size of the tree.
- //
- // If the earlier tree size is larger than the server is aware of,
- // an InvalidArgument error is returned.
- GetLatestSignedLogRoot(ctx context.Context, in *GetLatestSignedLogRootRequest, opts ...grpc.CallOption) (*GetLatestSignedLogRootResponse, error)
- // GetEntryAndProof returns a log leaf and the corresponding inclusion proof
- // to a specified tree size, for a given leaf index in a particular tree.
- //
- // If the requested tree size is unavailable but the leaf is
- // in scope for the current tree, the returned proof will be for the
- // current tree size rather than the requested tree size.
- GetEntryAndProof(ctx context.Context, in *GetEntryAndProofRequest, opts ...grpc.CallOption) (*GetEntryAndProofResponse, error)
- // InitLog initializes a particular tree, creating the initial signed log
- // root (which will be of size 0).
- InitLog(ctx context.Context, in *InitLogRequest, opts ...grpc.CallOption) (*InitLogResponse, error)
- // AddSequencedLeaves adds a batch of leaves with assigned sequence numbers
- // to a pre-ordered log. The indices of the provided leaves must be contiguous.
- AddSequencedLeaves(ctx context.Context, in *AddSequencedLeavesRequest, opts ...grpc.CallOption) (*AddSequencedLeavesResponse, error)
- // GetLeavesByRange returns a batch of leaves whose leaf indices are in a
- // sequential range.
- GetLeavesByRange(ctx context.Context, in *GetLeavesByRangeRequest, opts ...grpc.CallOption) (*GetLeavesByRangeResponse, error)
-}
-
-type trillianLogClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewTrillianLogClient(cc grpc.ClientConnInterface) TrillianLogClient {
- return &trillianLogClient{cc}
-}
-
-func (c *trillianLogClient) QueueLeaf(ctx context.Context, in *QueueLeafRequest, opts ...grpc.CallOption) (*QueueLeafResponse, error) {
- out := new(QueueLeafResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/QueueLeaf", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *trillianLogClient) GetInclusionProof(ctx context.Context, in *GetInclusionProofRequest, opts ...grpc.CallOption) (*GetInclusionProofResponse, error) {
- out := new(GetInclusionProofResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/GetInclusionProof", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *trillianLogClient) GetInclusionProofByHash(ctx context.Context, in *GetInclusionProofByHashRequest, opts ...grpc.CallOption) (*GetInclusionProofByHashResponse, error) {
- out := new(GetInclusionProofByHashResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/GetInclusionProofByHash", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *trillianLogClient) GetConsistencyProof(ctx context.Context, in *GetConsistencyProofRequest, opts ...grpc.CallOption) (*GetConsistencyProofResponse, error) {
- out := new(GetConsistencyProofResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/GetConsistencyProof", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *trillianLogClient) GetLatestSignedLogRoot(ctx context.Context, in *GetLatestSignedLogRootRequest, opts ...grpc.CallOption) (*GetLatestSignedLogRootResponse, error) {
- out := new(GetLatestSignedLogRootResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/GetLatestSignedLogRoot", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *trillianLogClient) GetEntryAndProof(ctx context.Context, in *GetEntryAndProofRequest, opts ...grpc.CallOption) (*GetEntryAndProofResponse, error) {
- out := new(GetEntryAndProofResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/GetEntryAndProof", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *trillianLogClient) InitLog(ctx context.Context, in *InitLogRequest, opts ...grpc.CallOption) (*InitLogResponse, error) {
- out := new(InitLogResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/InitLog", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *trillianLogClient) AddSequencedLeaves(ctx context.Context, in *AddSequencedLeavesRequest, opts ...grpc.CallOption) (*AddSequencedLeavesResponse, error) {
- out := new(AddSequencedLeavesResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/AddSequencedLeaves", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *trillianLogClient) GetLeavesByRange(ctx context.Context, in *GetLeavesByRangeRequest, opts ...grpc.CallOption) (*GetLeavesByRangeResponse, error) {
- out := new(GetLeavesByRangeResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/GetLeavesByRange", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// TrillianLogServer is the server API for TrillianLog service.
-// All implementations should embed UnimplementedTrillianLogServer
-// for forward compatibility
-type TrillianLogServer interface {
- // QueueLeaf adds a single leaf to the queue of pending leaves for a normal
- // log.
- QueueLeaf(context.Context, *QueueLeafRequest) (*QueueLeafResponse, error)
- // GetInclusionProof returns an inclusion proof for a leaf with a given index
- // in a particular tree.
- //
- // If the requested tree_size is larger than the server is aware of, the
- // response will include the latest known log root and an empty proof.
- GetInclusionProof(context.Context, *GetInclusionProofRequest) (*GetInclusionProofResponse, error)
- // GetInclusionProofByHash returns an inclusion proof for any leaves that have
- // the given Merkle hash in a particular tree.
- //
- // If any of the leaves that match the given Merkle has have a leaf index that
- // is beyond the requested tree size, the corresponding proof entry will be empty.
- GetInclusionProofByHash(context.Context, *GetInclusionProofByHashRequest) (*GetInclusionProofByHashResponse, error)
- // GetConsistencyProof returns a consistency proof between different sizes of
- // a particular tree.
- //
- // If the requested tree size is larger than the server is aware of,
- // the response will include the latest known log root and an empty proof.
- GetConsistencyProof(context.Context, *GetConsistencyProofRequest) (*GetConsistencyProofResponse, error)
- // GetLatestSignedLogRoot returns the latest log root for a given tree,
- // and optionally also includes a consistency proof from an earlier tree size
- // to the new size of the tree.
- //
- // If the earlier tree size is larger than the server is aware of,
- // an InvalidArgument error is returned.
- GetLatestSignedLogRoot(context.Context, *GetLatestSignedLogRootRequest) (*GetLatestSignedLogRootResponse, error)
- // GetEntryAndProof returns a log leaf and the corresponding inclusion proof
- // to a specified tree size, for a given leaf index in a particular tree.
- //
- // If the requested tree size is unavailable but the leaf is
- // in scope for the current tree, the returned proof will be for the
- // current tree size rather than the requested tree size.
- GetEntryAndProof(context.Context, *GetEntryAndProofRequest) (*GetEntryAndProofResponse, error)
- // InitLog initializes a particular tree, creating the initial signed log
- // root (which will be of size 0).
- InitLog(context.Context, *InitLogRequest) (*InitLogResponse, error)
- // AddSequencedLeaves adds a batch of leaves with assigned sequence numbers
- // to a pre-ordered log. The indices of the provided leaves must be contiguous.
- AddSequencedLeaves(context.Context, *AddSequencedLeavesRequest) (*AddSequencedLeavesResponse, error)
- // GetLeavesByRange returns a batch of leaves whose leaf indices are in a
- // sequential range.
- GetLeavesByRange(context.Context, *GetLeavesByRangeRequest) (*GetLeavesByRangeResponse, error)
-}
-
-// UnimplementedTrillianLogServer should be embedded to have forward compatible implementations.
-type UnimplementedTrillianLogServer struct {
-}
-
-func (UnimplementedTrillianLogServer) QueueLeaf(context.Context, *QueueLeafRequest) (*QueueLeafResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method QueueLeaf not implemented")
-}
-func (UnimplementedTrillianLogServer) GetInclusionProof(context.Context, *GetInclusionProofRequest) (*GetInclusionProofResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetInclusionProof not implemented")
-}
-func (UnimplementedTrillianLogServer) GetInclusionProofByHash(context.Context, *GetInclusionProofByHashRequest) (*GetInclusionProofByHashResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetInclusionProofByHash not implemented")
-}
-func (UnimplementedTrillianLogServer) GetConsistencyProof(context.Context, *GetConsistencyProofRequest) (*GetConsistencyProofResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetConsistencyProof not implemented")
-}
-func (UnimplementedTrillianLogServer) GetLatestSignedLogRoot(context.Context, *GetLatestSignedLogRootRequest) (*GetLatestSignedLogRootResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetLatestSignedLogRoot not implemented")
-}
-func (UnimplementedTrillianLogServer) GetEntryAndProof(context.Context, *GetEntryAndProofRequest) (*GetEntryAndProofResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetEntryAndProof not implemented")
-}
-func (UnimplementedTrillianLogServer) InitLog(context.Context, *InitLogRequest) (*InitLogResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method InitLog not implemented")
-}
-func (UnimplementedTrillianLogServer) AddSequencedLeaves(context.Context, *AddSequencedLeavesRequest) (*AddSequencedLeavesResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method AddSequencedLeaves not implemented")
-}
-func (UnimplementedTrillianLogServer) GetLeavesByRange(context.Context, *GetLeavesByRangeRequest) (*GetLeavesByRangeResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetLeavesByRange not implemented")
-}
-
-// UnsafeTrillianLogServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to TrillianLogServer will
-// result in compilation errors.
-type UnsafeTrillianLogServer interface {
- mustEmbedUnimplementedTrillianLogServer()
-}
-
-func RegisterTrillianLogServer(s grpc.ServiceRegistrar, srv TrillianLogServer) {
- s.RegisterService(&TrillianLog_ServiceDesc, srv)
-}
-
-func _TrillianLog_QueueLeaf_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(QueueLeafRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianLogServer).QueueLeaf(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianLog/QueueLeaf",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianLogServer).QueueLeaf(ctx, req.(*QueueLeafRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _TrillianLog_GetInclusionProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetInclusionProofRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianLogServer).GetInclusionProof(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianLog/GetInclusionProof",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianLogServer).GetInclusionProof(ctx, req.(*GetInclusionProofRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _TrillianLog_GetInclusionProofByHash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetInclusionProofByHashRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianLogServer).GetInclusionProofByHash(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianLog/GetInclusionProofByHash",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianLogServer).GetInclusionProofByHash(ctx, req.(*GetInclusionProofByHashRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _TrillianLog_GetConsistencyProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetConsistencyProofRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianLogServer).GetConsistencyProof(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianLog/GetConsistencyProof",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianLogServer).GetConsistencyProof(ctx, req.(*GetConsistencyProofRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _TrillianLog_GetLatestSignedLogRoot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetLatestSignedLogRootRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianLogServer).GetLatestSignedLogRoot(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianLog/GetLatestSignedLogRoot",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianLogServer).GetLatestSignedLogRoot(ctx, req.(*GetLatestSignedLogRootRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _TrillianLog_GetEntryAndProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetEntryAndProofRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianLogServer).GetEntryAndProof(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianLog/GetEntryAndProof",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianLogServer).GetEntryAndProof(ctx, req.(*GetEntryAndProofRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _TrillianLog_InitLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(InitLogRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianLogServer).InitLog(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianLog/InitLog",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianLogServer).InitLog(ctx, req.(*InitLogRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _TrillianLog_AddSequencedLeaves_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AddSequencedLeavesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianLogServer).AddSequencedLeaves(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianLog/AddSequencedLeaves",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianLogServer).AddSequencedLeaves(ctx, req.(*AddSequencedLeavesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _TrillianLog_GetLeavesByRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetLeavesByRangeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TrillianLogServer).GetLeavesByRange(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/trillian.TrillianLog/GetLeavesByRange",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TrillianLogServer).GetLeavesByRange(ctx, req.(*GetLeavesByRangeRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-// TrillianLog_ServiceDesc is the grpc.ServiceDesc for TrillianLog service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var TrillianLog_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "trillian.TrillianLog",
- HandlerType: (*TrillianLogServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "QueueLeaf",
- Handler: _TrillianLog_QueueLeaf_Handler,
- },
- {
- MethodName: "GetInclusionProof",
- Handler: _TrillianLog_GetInclusionProof_Handler,
- },
- {
- MethodName: "GetInclusionProofByHash",
- Handler: _TrillianLog_GetInclusionProofByHash_Handler,
- },
- {
- MethodName: "GetConsistencyProof",
- Handler: _TrillianLog_GetConsistencyProof_Handler,
- },
- {
- MethodName: "GetLatestSignedLogRoot",
- Handler: _TrillianLog_GetLatestSignedLogRoot_Handler,
- },
- {
- MethodName: "GetEntryAndProof",
- Handler: _TrillianLog_GetEntryAndProof_Handler,
- },
- {
- MethodName: "InitLog",
- Handler: _TrillianLog_InitLog_Handler,
- },
- {
- MethodName: "AddSequencedLeaves",
- Handler: _TrillianLog_AddSequencedLeaves_Handler,
- },
- {
- MethodName: "GetLeavesByRange",
- Handler: _TrillianLog_GetLeavesByRange_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "trillian_log_api.proto",
-}
diff --git a/vendor/github.com/google/trillian/types/internal/tls/tls.go b/vendor/github.com/google/trillian/types/internal/tls/tls.go
deleted file mode 100644
index badec7b50..000000000
--- a/vendor/github.com/google/trillian/types/internal/tls/tls.go
+++ /dev/null
@@ -1,713 +0,0 @@
-// Copyright 2016 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package tls implements functionality for dealing with TLS-encoded data,
-// as defined in RFC 5246. This includes parsing and generation of TLS-encoded
-// data, together with utility functions for dealing with the DigitallySigned
-// TLS type.
-// N.B. This is copied from https://github.com/google/certificate-transparency-go/tree/master/tls
-// - DO NOT MAKE CHANGES TO THIS FILE except to sync to the latest from ct-go.
-package tls
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "reflect"
- "strconv"
- "strings"
-)
-
-// This file holds utility functions for TLS encoding/decoding data
-// as per RFC 5246 section 4.
-
-// A structuralError suggests that the TLS data is valid, but the Go type
-// which is receiving it doesn't match.
-type structuralError struct {
- field string
- msg string
-}
-
-func (e structuralError) Error() string {
- var prefix string
- if e.field != "" {
- prefix = e.field + ": "
- }
- return "tls: structure error: " + prefix + e.msg
-}
-
-// A syntaxError suggests that the TLS data is invalid.
-type syntaxError struct {
- field string
- msg string
-}
-
-func (e syntaxError) Error() string {
- var prefix string
- if e.field != "" {
- prefix = e.field + ": "
- }
- return "tls: syntax error: " + prefix + e.msg
-}
-
-// Uint24 is an unsigned 3-byte integer.
-type Uint24 uint32
-
-// Enum is an unsigned integer.
-type Enum uint64
-
-var (
- uint8Type = reflect.TypeOf(uint8(0))
- uint16Type = reflect.TypeOf(uint16(0))
- uint24Type = reflect.TypeOf(Uint24(0))
- uint32Type = reflect.TypeOf(uint32(0))
- uint64Type = reflect.TypeOf(uint64(0))
- enumType = reflect.TypeOf(Enum(0))
-)
-
-// Unmarshal parses the TLS-encoded data in b and uses the reflect package to
-// fill in an arbitrary value pointed at by val. Because Unmarshal uses the
-// reflect package, the structs being written to must use exported fields
-// (upper case names).
-//
-// The mappings between TLS types and Go types is as follows; some fields
-// must have tags (to indicate their encoded size).
-//
-// TLS Go Required Tags
-// opaque byte / uint8
-// uint8 byte / uint8
-// uint16 uint16
-// uint24 tls.Uint24
-// uint32 uint32
-// uint64 uint64
-// enum tls.Enum size:S or maxval:N
-// Type<N,M> []Type minlen:N,maxlen:M
-// opaque[N] [N]byte / [N]uint8
-// uint8[N] [N]byte / [N]uint8
-// struct { } struct { }
-// select(T) {
-// case e1: Type *T selector:Field,val:e1
-// }
-//
-// TLS variants (RFC 5246 s4.6.1) are only supported when the value of the
-// associated enumeration type is available earlier in the same enclosing
-// struct, and each possible variant is marked with a selector tag (to
-// indicate which field selects the variants) and a val tag (to indicate
-// what value of the selector picks this particular field).
-//
-// For example, a TLS structure:
-//
-// enum { e1(1), e2(2) } EnumType;
-// struct {
-// EnumType sel;
-// select(sel) {
-// case e1: uint16
-// case e2: uint32
-// } data;
-// } VariantItem;
-//
-// would have a corresponding Go type:
-//
-// type VariantItem struct {
-// Sel tls.Enum `tls:"maxval:2"`
-// Data16 *uint16 `tls:"selector:Sel,val:1"`
-// Data32 *uint32 `tls:"selector:Sel,val:2"`
-// }
-//
-// TLS fixed-length vectors of types other than opaque or uint8 are not supported.
-//
-// For TLS variable-length vectors that are themselves used in other vectors,
-// create a single-field structure to represent the inner type. For example, for:
-//
-// opaque InnerType<1..65535>;
-// struct {
-// InnerType inners<1,65535>;
-// } Something;
-//
-// convert to:
-//
-// type InnerType struct {
-// Val []byte `tls:"minlen:1,maxlen:65535"`
-// }
-// type Something struct {
-// Inners []InnerType `tls:"minlen:1,maxlen:65535"`
-// }
-//
-// If the encoded value does not fit in the Go type, Unmarshal returns a parse error.
-func Unmarshal(b []byte, val interface{}) ([]byte, error) {
- return UnmarshalWithParams(b, val, "")
-}
-
-// UnmarshalWithParams allows field parameters to be specified for the
-// top-level element. The form of the params is the same as the field tags.
-func UnmarshalWithParams(b []byte, val interface{}, params string) ([]byte, error) {
- info, err := fieldTagToFieldInfo(params, "")
- if err != nil {
- return nil, err
- }
- // The passed in interface{} is a pointer (to allow the value to be written
- // to); extract the pointed-to object as a reflect.Value, so parseField
- // can do various introspection things.
- v := reflect.ValueOf(val).Elem()
- offset, err := parseField(v, b, 0, info)
- if err != nil {
- return nil, err
- }
- return b[offset:], nil
-}
-
-// Return the number of bytes needed to encode values up to (and including) x.
-func byteCount(x uint64) uint {
- switch {
- case x < 0x100:
- return 1
- case x < 0x10000:
- return 2
- case x < 0x1000000:
- return 3
- case x < 0x100000000:
- return 4
- case x < 0x10000000000:
- return 5
- case x < 0x1000000000000:
- return 6
- case x < 0x100000000000000:
- return 7
- default:
- return 8
- }
-}
-
-type fieldInfo struct {
- count uint // Number of bytes
- countSet bool
- minlen uint64 // Only relevant for slices
- maxlen uint64 // Only relevant for slices
- selector string // Only relevant for select sub-values
- val uint64 // Only relevant for select sub-values
- name string // Used for better error messages
-}
-
-func (i *fieldInfo) fieldName() string {
- if i == nil {
- return ""
- }
- return i.name
-}
-
-// Given a tag string, return a fieldInfo describing the field.
-func fieldTagToFieldInfo(str string, name string) (*fieldInfo, error) {
- var info *fieldInfo
- // Iterate over clauses in the tag, ignoring any that don't parse properly.
- for _, part := range strings.Split(str, ",") {
- switch {
- case strings.HasPrefix(part, "maxval:"):
- if v, err := strconv.ParseUint(part[7:], 10, 64); err == nil {
- info = &fieldInfo{count: byteCount(v), countSet: true}
- }
- case strings.HasPrefix(part, "size:"):
- if sz, err := strconv.ParseUint(part[5:], 10, 32); err == nil {
- info = &fieldInfo{count: uint(sz), countSet: true}
- }
- case strings.HasPrefix(part, "maxlen:"):
- v, err := strconv.ParseUint(part[7:], 10, 64)
- if err != nil {
- continue
- }
- if info == nil {
- info = &fieldInfo{}
- }
- info.count = byteCount(v)
- info.countSet = true
- info.maxlen = v
- case strings.HasPrefix(part, "minlen:"):
- v, err := strconv.ParseUint(part[7:], 10, 64)
- if err != nil {
- continue
- }
- if info == nil {
- info = &fieldInfo{}
- }
- info.minlen = v
- case strings.HasPrefix(part, "selector:"):
- if info == nil {
- info = &fieldInfo{}
- }
- info.selector = part[9:]
- case strings.HasPrefix(part, "val:"):
- v, err := strconv.ParseUint(part[4:], 10, 64)
- if err != nil {
- continue
- }
- if info == nil {
- info = &fieldInfo{}
- }
- info.val = v
- }
- }
- if info != nil {
- info.name = name
- if info.selector == "" {
- if info.count < 1 {
- return nil, structuralError{name, "field of unknown size in " + str}
- } else if info.count > 8 {
- return nil, structuralError{name, "specified size too large in " + str}
- } else if info.minlen > info.maxlen {
- return nil, structuralError{name, "specified length range inverted in " + str}
- } else if info.val > 0 {
- return nil, structuralError{name, "specified selector value but not field in " + str}
- }
- }
- } else if name != "" {
- info = &fieldInfo{name: name}
- }
- return info, nil
-}
-
-// Check that a value fits into a field described by a fieldInfo structure.
-func (i fieldInfo) check(val uint64, fldName string) error {
- if val >= (1 << (8 * i.count)) {
- return structuralError{fldName, fmt.Sprintf("value %d too large for size", val)}
- }
- if i.maxlen != 0 {
- if val < i.minlen {
- return structuralError{fldName, fmt.Sprintf("value %d too small for minimum %d", val, i.minlen)}
- }
- if val > i.maxlen {
- return structuralError{fldName, fmt.Sprintf("value %d too large for maximum %d", val, i.maxlen)}
- }
- }
- return nil
-}
-
-// readVarUint reads an big-endian unsigned integer of the given size in
-// bytes.
-func readVarUint(data []byte, info *fieldInfo) (uint64, error) {
- if info == nil || !info.countSet {
- return 0, structuralError{info.fieldName(), "no field size information available"}
- }
- if len(data) < int(info.count) {
- return 0, syntaxError{info.fieldName(), "truncated variable-length integer"}
- }
- var result uint64
- for i := uint(0); i < info.count; i++ {
- result = (result << 8) | uint64(data[i])
- }
- if err := info.check(result, info.name); err != nil {
- return 0, err
- }
- return result, nil
-}
-
-// parseField is the main parsing function. Given a byte slice and an offset
-// (in bytes) into the data, it will try to parse a suitable ASN.1 value out
-// and store it in the given Value.
-func parseField(v reflect.Value, data []byte, initOffset int, info *fieldInfo) (int, error) {
- offset := initOffset
- rest := data[offset:]
-
- fieldType := v.Type()
- // First look for known fixed types.
- switch fieldType {
- case uint8Type:
- if len(rest) < 1 {
- return offset, syntaxError{info.fieldName(), "truncated uint8"}
- }
- v.SetUint(uint64(rest[0]))
- offset++
- return offset, nil
- case uint16Type:
- if len(rest) < 2 {
- return offset, syntaxError{info.fieldName(), "truncated uint16"}
- }
- v.SetUint(uint64(binary.BigEndian.Uint16(rest)))
- offset += 2
- return offset, nil
- case uint24Type:
- if len(rest) < 3 {
- return offset, syntaxError{info.fieldName(), "truncated uint24"}
- }
- v.SetUint(uint64(data[0])<<16 | uint64(data[1])<<8 | uint64(data[2]))
- offset += 3
- return offset, nil
- case uint32Type:
- if len(rest) < 4 {
- return offset, syntaxError{info.fieldName(), "truncated uint32"}
- }
- v.SetUint(uint64(binary.BigEndian.Uint32(rest)))
- offset += 4
- return offset, nil
- case uint64Type:
- if len(rest) < 8 {
- return offset, syntaxError{info.fieldName(), "truncated uint64"}
- }
- v.SetUint(uint64(binary.BigEndian.Uint64(rest)))
- offset += 8
- return offset, nil
- }
-
- // Now deal with user-defined types.
- switch v.Kind() {
- case enumType.Kind():
- // Assume that anything of the same kind as Enum is an Enum, so that
- // users can alias types of their own to Enum.
- val, err := readVarUint(rest, info)
- if err != nil {
- return offset, err
- }
- v.SetUint(val)
- offset += int(info.count)
- return offset, nil
- case reflect.Struct:
- structType := fieldType
- // TLS includes a select(Enum) {..} construct, where the value of an enum
- // indicates which variant field is present (like a C union). We require
- // that the enum value be an earlier field in the same structure (the selector),
- // and that each of the possible variant destination fields be pointers.
- // So the Go mapping looks like:
- // type variantType struct {
- // Which tls.Enum `tls:"size:1"` // this is the selector
- // Val1 *type1 `tls:"selector:Which,val:1"` // this is a destination
- // Val2 *type2 `tls:"selector:Which,val:1"` // this is a destination
- // }
-
- // To deal with this, we track any enum-like fields and their values...
- enums := make(map[string]uint64)
- // .. and we track which selector names we've seen (in the destination field tags),
- // and whether a destination for that selector has been chosen.
- selectorSeen := make(map[string]bool)
- for i := 0; i < structType.NumField(); i++ {
- // Find information about this field.
- tag := structType.Field(i).Tag.Get("tls")
- fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name)
- if err != nil {
- return offset, err
- }
-
- destination := v.Field(i)
- if fieldInfo.selector != "" {
- // This is a possible select(Enum) destination, so first check that the referenced
- // selector field has already been seen earlier in the struct.
- choice, ok := enums[fieldInfo.selector]
- if !ok {
- return offset, structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector}
- }
- if structType.Field(i).Type.Kind() != reflect.Ptr {
- return offset, structuralError{fieldInfo.name, "choice field not a pointer type"}
- }
- // Is this the first mention of the selector field name? If so, remember it.
- seen, ok := selectorSeen[fieldInfo.selector]
- if !ok {
- selectorSeen[fieldInfo.selector] = false
- }
- if choice != fieldInfo.val {
- // This destination field was not the chosen one, so make it nil (we checked
- // it was a pointer above).
- v.Field(i).Set(reflect.Zero(structType.Field(i).Type))
- continue
- }
- if seen {
- // We already saw a different destination field receive the value for this
- // selector value, which indicates a badly annotated structure.
- return offset, structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector}
- }
- selectorSeen[fieldInfo.selector] = true
- // Make an object of the pointed-to type and parse into that.
- v.Field(i).Set(reflect.New(structType.Field(i).Type.Elem()))
- destination = v.Field(i).Elem()
- }
- offset, err = parseField(destination, data, offset, fieldInfo)
- if err != nil {
- return offset, err
- }
-
- // Remember any possible tls.Enum values encountered in case they are selectors.
- if structType.Field(i).Type.Kind() == enumType.Kind() {
- enums[structType.Field(i).Name] = v.Field(i).Uint()
- }
-
- }
-
- // Now we have seen all fields in the structure, check that all select(Enum) {..} selector
- // fields found a destination to put their data in.
- for selector, seen := range selectorSeen {
- if !seen {
- return offset, syntaxError{info.fieldName(), selector + ": unhandled value for selector"}
- }
- }
- return offset, nil
- case reflect.Array:
- datalen := v.Len()
-
- if datalen > len(rest) {
- return offset, syntaxError{info.fieldName(), "truncated array"}
- }
- inner := rest[:datalen]
- offset += datalen
- if fieldType.Elem().Kind() != reflect.Uint8 {
- // Only byte/uint8 arrays are supported
- return offset, structuralError{info.fieldName(), "unsupported array type: " + v.Type().String()}
- }
- reflect.Copy(v, reflect.ValueOf(inner))
- return offset, nil
-
- case reflect.Slice:
- sliceType := fieldType
- // Slices represent variable-length vectors, which are prefixed by a length field.
- // The fieldInfo indicates the size of that length field.
- varlen, err := readVarUint(rest, info)
- if err != nil {
- return offset, err
- }
- datalen := int(varlen)
- offset += int(info.count)
- rest = rest[info.count:]
-
- if datalen > len(rest) {
- return offset, syntaxError{info.fieldName(), "truncated slice"}
- }
- inner := rest[:datalen]
- offset += datalen
- if fieldType.Elem().Kind() == reflect.Uint8 {
- // Fast version for []byte
- v.Set(reflect.MakeSlice(sliceType, datalen, datalen))
- reflect.Copy(v, reflect.ValueOf(inner))
- return offset, nil
- }
-
- v.Set(reflect.MakeSlice(sliceType, 0, datalen))
- single := reflect.New(sliceType.Elem())
- for innerOffset := 0; innerOffset < len(inner); {
- var err error
- innerOffset, err = parseField(single.Elem(), inner, innerOffset, nil)
- if err != nil {
- return offset, err
- }
- v.Set(reflect.Append(v, single.Elem()))
- }
- return offset, nil
-
- default:
- return offset, structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())}
- }
-}
-
-// Marshal returns the TLS encoding of val.
-func Marshal(val interface{}) ([]byte, error) {
- return MarshalWithParams(val, "")
-}
-
-// MarshalWithParams returns the TLS encoding of val, and allows field
-// parameters to be specified for the top-level element. The form
-// of the params is the same as the field tags.
-func MarshalWithParams(val interface{}, params string) ([]byte, error) {
- info, err := fieldTagToFieldInfo(params, "")
- if err != nil {
- return nil, err
- }
- var out bytes.Buffer
- v := reflect.ValueOf(val)
- if err := marshalField(&out, v, info); err != nil {
- return nil, err
- }
- return out.Bytes(), err
-}
-
-func marshalField(out *bytes.Buffer, v reflect.Value, info *fieldInfo) error {
- var prefix string
- if info != nil && len(info.name) > 0 {
- prefix = info.name + ": "
- }
- fieldType := v.Type()
- // First look for known fixed types.
- switch fieldType {
- case uint8Type:
- out.WriteByte(byte(v.Uint()))
- return nil
- case uint16Type:
- scratch := make([]byte, 2)
- binary.BigEndian.PutUint16(scratch, uint16(v.Uint()))
- out.Write(scratch)
- return nil
- case uint24Type:
- i := v.Uint()
- if i > 0xffffff {
- return structuralError{info.fieldName(), fmt.Sprintf("uint24 overflow %d", i)}
- }
- scratch := make([]byte, 4)
- binary.BigEndian.PutUint32(scratch, uint32(i))
- out.Write(scratch[1:])
- return nil
- case uint32Type:
- scratch := make([]byte, 4)
- binary.BigEndian.PutUint32(scratch, uint32(v.Uint()))
- out.Write(scratch)
- return nil
- case uint64Type:
- scratch := make([]byte, 8)
- binary.BigEndian.PutUint64(scratch, uint64(v.Uint()))
- out.Write(scratch)
- return nil
- }
-
- // Now deal with user-defined types.
- switch v.Kind() {
- case enumType.Kind():
- i := v.Uint()
- if info == nil {
- return structuralError{info.fieldName(), "enum field tag missing"}
- }
- if err := info.check(i, prefix); err != nil {
- return err
- }
- scratch := make([]byte, 8)
- binary.BigEndian.PutUint64(scratch, uint64(i))
- out.Write(scratch[(8 - info.count):])
- return nil
- case reflect.Struct:
- structType := fieldType
- enums := make(map[string]uint64) // Values of any Enum fields
- // The comment parseField() describes the mapping of the TLS select(Enum) {..} construct;
- // here we have selector and source (rather than destination) fields.
-
- // Track which selector names we've seen (in the source field tags), and whether a source
- // value for that selector has been processed.
- selectorSeen := make(map[string]bool)
- for i := 0; i < structType.NumField(); i++ {
- // Find information about this field.
- tag := structType.Field(i).Tag.Get("tls")
- fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name)
- if err != nil {
- return err
- }
-
- source := v.Field(i)
- if fieldInfo.selector != "" {
- // This field is a possible source for a select(Enum) {..}. First check
- // the selector field name has been seen.
- choice, ok := enums[fieldInfo.selector]
- if !ok {
- return structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector}
- }
- if structType.Field(i).Type.Kind() != reflect.Ptr {
- return structuralError{fieldInfo.name, "choice field not a pointer type"}
- }
- // Is this the first mention of the selector field name? If so, remember it.
- seen, ok := selectorSeen[fieldInfo.selector]
- if !ok {
- selectorSeen[fieldInfo.selector] = false
- }
- if choice != fieldInfo.val {
- // This source was not chosen; police that it should be nil.
- if v.Field(i).Pointer() != uintptr(0) {
- return structuralError{fieldInfo.name, "unchosen field is non-nil"}
- }
- continue
- }
- if seen {
- // We already saw a different source field generate the value for this
- // selector value, which indicates a badly annotated structure.
- return structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector}
- }
- selectorSeen[fieldInfo.selector] = true
- if v.Field(i).Pointer() == uintptr(0) {
- return structuralError{fieldInfo.name, "chosen field is nil"}
- }
- // Marshal from the pointed-to source object.
- source = v.Field(i).Elem()
- }
-
- var fieldData bytes.Buffer
- if err := marshalField(&fieldData, source, fieldInfo); err != nil {
- return err
- }
- out.Write(fieldData.Bytes())
-
- // Remember any tls.Enum values encountered in case they are selectors.
- if structType.Field(i).Type.Kind() == enumType.Kind() {
- enums[structType.Field(i).Name] = v.Field(i).Uint()
- }
- }
- // Now we have seen all fields in the structure, check that all select(Enum) {..} selector
- // fields found a source field get get their data from.
- for selector, seen := range selectorSeen {
- if !seen {
- return syntaxError{info.fieldName(), selector + ": unhandled value for selector"}
- }
- }
- return nil
-
- case reflect.Array:
- datalen := v.Len()
- arrayType := fieldType
- if arrayType.Elem().Kind() != reflect.Uint8 {
- // Only byte/uint8 arrays are supported
- return structuralError{info.fieldName(), "unsupported array type"}
- }
- bytes := make([]byte, datalen)
- for i := 0; i < datalen; i++ {
- bytes[i] = uint8(v.Index(i).Uint())
- }
- _, err := out.Write(bytes)
- return err
-
- case reflect.Slice:
- if info == nil {
- return structuralError{info.fieldName(), "slice field tag missing"}
- }
-
- sliceType := fieldType
- if sliceType.Elem().Kind() == reflect.Uint8 {
- // Fast version for []byte: first write the length as info.count bytes.
- datalen := v.Len()
- scratch := make([]byte, 8)
- binary.BigEndian.PutUint64(scratch, uint64(datalen))
- out.Write(scratch[(8 - info.count):])
-
- if err := info.check(uint64(datalen), prefix); err != nil {
- return err
- }
- // Then just write the data.
- bytes := make([]byte, datalen)
- for i := 0; i < datalen; i++ {
- bytes[i] = uint8(v.Index(i).Uint())
- }
- _, err := out.Write(bytes)
- return err
- }
- // General version: use a separate Buffer to write the slice entries into.
- var innerBuf bytes.Buffer
- for i := 0; i < v.Len(); i++ {
- if err := marshalField(&innerBuf, v.Index(i), nil); err != nil {
- return err
- }
- }
-
- // Now insert (and check) the size.
- size := uint64(innerBuf.Len())
- if err := info.check(size, prefix); err != nil {
- return err
- }
- scratch := make([]byte, 8)
- binary.BigEndian.PutUint64(scratch, size)
- out.Write(scratch[(8 - info.count):])
-
- // Then copy the data.
- _, err := out.Write(innerBuf.Bytes())
- return err
-
- default:
- return structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())}
- }
-}
diff --git a/vendor/github.com/google/trillian/types/logroot.go b/vendor/github.com/google/trillian/types/logroot.go
deleted file mode 100644
index 1c3a38045..000000000
--- a/vendor/github.com/google/trillian/types/logroot.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2018 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package types defines serialization and parsing functions for SignedLogRoot
-// fields.
-package types
-
-import (
- "encoding/binary"
- "fmt"
-
- "github.com/google/trillian/types/internal/tls"
-
- "github.com/google/trillian"
-)
-
-// LogRootV1 holds the TLS-deserialization of the following structure
-// (described in RFC5246 section 4 notation):
-//
-// struct {
-// uint64 tree_size;
-// opaque root_hash<0..128>;
-// uint64 timestamp_nanos;
-// uint64 revision;
-// opaque metadata<0..65535>;
-// } LogRootV1;
-type LogRootV1 struct {
- // TreeSize is the number of leaves in the log Merkle tree.
- TreeSize uint64
- // RootHash is the hash of the root node of the tree.
- RootHash []byte `tls:"minlen:0,maxlen:128"`
- // TimestampNanos is the time in nanoseconds for when this root was created,
- // counting from the UNIX epoch.
- TimestampNanos uint64
-
- // Revision is the Merkle tree revision associated with this root.
- //
- // Deprecated: Revision is a concept internal to the storage layer.
- Revision uint64
-
- // Metadata holds additional data associated with this root.
- Metadata []byte `tls:"minlen:0,maxlen:65535"`
-}
-
-// LogRoot holds the TLS-deserialization of the following structure
-// (described in RFC5246 section 4 notation):
-// enum { v1(1), (65535)} Version;
-//
-// struct {
-// Version version;
-// select(version) {
-// case v1: LogRootV1;
-// }
-// } LogRoot;
-type LogRoot struct {
- Version tls.Enum `tls:"size:2"`
- V1 *LogRootV1 `tls:"selector:Version,val:1"`
-}
-
-// UnmarshalBinary verifies that logRootBytes is a TLS serialized LogRoot, has
-// the LOG_ROOT_FORMAT_V1 tag, and populates the caller with the deserialized
-// *LogRootV1.
-func (l *LogRootV1) UnmarshalBinary(logRootBytes []byte) error {
- if len(logRootBytes) < 3 {
- return fmt.Errorf("logRootBytes too short")
- }
- if l == nil {
- return fmt.Errorf("nil log root")
- }
- version := binary.BigEndian.Uint16(logRootBytes)
- if version != uint16(trillian.LogRootFormat_LOG_ROOT_FORMAT_V1) {
- return fmt.Errorf("invalid LogRoot.Version: %v, want %v",
- version, trillian.LogRootFormat_LOG_ROOT_FORMAT_V1)
- }
-
- var logRoot LogRoot
- if _, err := tls.Unmarshal(logRootBytes, &logRoot); err != nil {
- return err
- }
-
- *l = *logRoot.V1
- return nil
-}
-
-// MarshalBinary returns a canonical TLS serialization of LogRoot.
-func (l *LogRootV1) MarshalBinary() ([]byte, error) {
- return tls.Marshal(LogRoot{
- Version: tls.Enum(trillian.LogRootFormat_LOG_ROOT_FORMAT_V1),
- V1: l,
- })
-}
diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md
new file mode 100644
index 000000000..33686e4da
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md
@@ -0,0 +1,9 @@
+## 0.7.4 (Jun 6, 2023)
+
+BUG FIXES
+
+- client: fixing an issue where the Content-Type header wouldn't be sent with an empty payload when using HTTP/2 [GH-194]
+
+## 0.7.3 (May 15, 2023)
+
+Initial release
diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS
new file mode 100644
index 000000000..f8389c995
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS
@@ -0,0 +1 @@
+* @hashicorp/release-engineering \ No newline at end of file
diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE
index e87a115e4..f4f97ee58 100644
--- a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE
+++ b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE
@@ -1,3 +1,5 @@
+Copyright (c) 2015 HashiCorp, Inc.
+
Mozilla Public License, version 2.0
1. Definitions
diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go
index f40d2411c..cad96bd97 100644
--- a/vendor/github.com/hashicorp/go-retryablehttp/client.go
+++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go
@@ -1,3 +1,6 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
// Package retryablehttp provides a familiar HTTP client interface with
// automatic retries and exponential backoff. It is a thin wrapper over the
// standard net/http client library and exposes nearly the same public API.
@@ -257,10 +260,17 @@ func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, erro
if err != nil {
return nil, 0, err
}
- bodyReader = func() (io.Reader, error) {
- return bytes.NewReader(buf), nil
+ if len(buf) == 0 {
+ bodyReader = func() (io.Reader, error) {
+ return http.NoBody, nil
+ }
+ contentLength = 0
+ } else {
+ bodyReader = func() (io.Reader, error) {
+ return bytes.NewReader(buf), nil
+ }
+ contentLength = int64(len(buf))
}
- contentLength = int64(len(buf))
// No body provided, nothing to do
case nil:
diff --git a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go
index 8f3ee3584..8c407adb3 100644
--- a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go
+++ b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go
@@ -1,3 +1,6 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
package retryablehttp
import (
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
index 4f0287498..ffbbb62c7 100644
--- a/vendor/github.com/imdario/mergo/README.md
+++ b/vendor/github.com/imdario/mergo/README.md
@@ -1,17 +1,20 @@
# Mergo
-[![GoDoc][3]][4]
[![GitHub release][5]][6]
[![GoCard][7]][8]
-[![Build Status][1]][2]
-[![Coverage Status][9]][10]
+[![Test status][1]][2]
+[![OpenSSF Scorecard][21]][22]
+[![OpenSSF Best Practices][19]][20]
+[![Coverage status][9]][10]
[![Sourcegraph][11]][12]
-[![FOSSA Status][13]][14]
+[![FOSSA status][13]][14]
+
+[![GoDoc][3]][4]
[![Become my sponsor][15]][16]
[![Tidelift][17]][18]
-[1]: https://travis-ci.org/imdario/mergo.png
-[2]: https://travis-ci.org/imdario/mergo
+[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master
+[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml
[3]: https://godoc.org/github.com/imdario/mergo?status.svg
[4]: https://godoc.org/github.com/imdario/mergo
[5]: https://img.shields.io/github/release/imdario/mergo.svg
@@ -28,6 +31,10 @@
[16]: https://github.com/sponsors/imdario
[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo
[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo
+[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge
+[20]: https://bestpractices.coreinfrastructure.org/projects/7177
+[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge
+[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
@@ -232,5 +239,4 @@ Written by [Dario Castañé](http://dario.im).
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
-
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils.go b/vendor/github.com/mistifyio/go-zfs/v3/utils.go
index 0c2cce7d9..b69942b53 100644
--- a/vendor/github.com/mistifyio/go-zfs/v3/utils.go
+++ b/vendor/github.com/mistifyio/go-zfs/v3/utils.go
@@ -37,13 +37,16 @@ func (c *command) Run(arg ...string) ([][]string, error) {
cmd.Stderr = &stderr
id := uuid.New().String()
- joinedArgs := strings.Join(cmd.Args, " ")
+ joinedArgs := cmd.Path
+ if len(cmd.Args) > 1 {
+ joinedArgs = strings.Join(append([]string{cmd.Path}, cmd.Args[1:]...), " ")
+ }
logger.Log([]string{"ID:" + id, "START", joinedArgs})
if err := cmd.Run(); err != nil {
return nil, &Error{
Err: err,
- Debug: strings.Join([]string{cmd.Path, joinedArgs[1:]}, " "),
+ Debug: joinedArgs,
Stderr: stderr.String(),
}
}
@@ -61,7 +64,7 @@ func (c *command) Run(arg ...string) ([][]string, error) {
output := make([][]string, len(lines))
for i, l := range lines {
- output[i] = strings.Fields(l)
+ output[i] = strings.Split(l, "\t")
}
return output, nil
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go b/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go
index ef1beac90..b1ce59656 100644
--- a/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go
+++ b/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go
@@ -15,5 +15,5 @@ var (
zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"}
zpoolPropListOptions = strings.Join(zpoolPropList, ",")
- zpoolArgs = []string{"get", "-p", zpoolPropListOptions}
+ zpoolArgs = []string{"get", "-Hp", zpoolPropListOptions}
)
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go b/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go
index c6bf6d87a..f19aebabb 100644
--- a/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go
+++ b/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go
@@ -15,5 +15,5 @@ var (
zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"}
zpoolPropListOptions = strings.Join(zpoolPropList, ",")
- zpoolArgs = []string{"get", "-p", zpoolPropListOptions}
+ zpoolArgs = []string{"get", "-Hp", zpoolPropListOptions}
)
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/zpool.go b/vendor/github.com/mistifyio/go-zfs/v3/zpool.go
index 2f7071305..a0bd6471a 100644
--- a/vendor/github.com/mistifyio/go-zfs/v3/zpool.go
+++ b/vendor/github.com/mistifyio/go-zfs/v3/zpool.go
@@ -49,9 +49,6 @@ func GetZpool(name string) (*Zpool, error) {
return nil, err
}
- // there is no -H
- out = out[1:]
-
z := &Zpool{Name: name}
for _, line := range out {
if err := z.parseLine(line); err != nil {
diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
index f4671ec1c..f06d37740 100644
--- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
+++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
@@ -1,3 +1,15 @@
+## 2.10.0
+
+### Features
+- feat(ginkgo/generators): add --tags flag (#1216) [a782a77]
+ adds a new --tags flag to ginkgo generate
+
+### Fixes
+- Fix broken link of MIGRATING_TO_V2.md (#1217) [548d78e]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.9.1 to 0.9.3 (#1215) [2b76a5e]
+
## 2.9.7
### Fixes
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
index 48d23f919..be01dec97 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
@@ -32,6 +32,9 @@ func BuildGenerateCommand() command.Command {
{Name: "template-data", KeyPath: "CustomTemplateData",
UsageArgument: "template-data-file",
Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"},
+ {Name: "tags", KeyPath: "Tags",
+ UsageArgument: "build-tags",
+ Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"},
},
&conf,
types.GinkgoFlagSections{},
@@ -59,6 +62,7 @@ You can also pass a <filename> of the form "file.go" and generate will emit "fil
}
type specData struct {
+ BuildTags string
Package string
Subject string
PackageImportPath string
@@ -93,6 +97,7 @@ func generateTestFileForSubject(subject string, conf GeneratorsConfig) {
}
data := specData{
+ BuildTags: getBuildTags(conf.Tags),
Package: determinePackageName(packageName, conf.Internal),
Subject: formattedName,
PackageImportPath: getPackageImportPath(),
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
index c3470adbf..4dab07d03 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
@@ -1,6 +1,7 @@
package generators
-var specText = `package {{.Package}}
+var specText = `{{.BuildTags}}
+package {{.Package}}
import (
{{.GinkgoImport}}
@@ -14,7 +15,8 @@ var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
})
`
-var agoutiSpecText = `package {{.Package}}
+var agoutiSpecText = `{{.BuildTags}}
+package {{.Package}}
import (
{{.GinkgoImport}}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
index 3046a4487..28c7aa6f4 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
@@ -1,6 +1,7 @@
package generators
import (
+ "fmt"
"go/build"
"os"
"path/filepath"
@@ -14,6 +15,7 @@ type GeneratorsConfig struct {
Agouti, NoDot, Internal bool
CustomTemplate string
CustomTemplateData string
+ Tags string
}
func getPackageAndFormattedName() (string, string, string) {
@@ -62,3 +64,13 @@ func determinePackageName(name string, internal bool) string {
return name + "_test"
}
+
+// getBuildTags returns the resultant string to be added.
+// If the input string is not empty, then returns a `//go:build {}` string,
+// otherwise returns an empty string.
+func getBuildTags(tags string) string {
+ if tags != "" {
+ return fmt.Sprintf("//go:build %s\n", tags)
+ }
+ return ""
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go
index 6bc46150e..b7ed8ff79 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/version.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go
@@ -1,3 +1,3 @@
package types
-const VERSION = "2.9.7"
+const VERSION = "2.10.0"
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go
index e0ae2cdd3..b2e329427 100644
--- a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go
@@ -30,6 +30,7 @@ import (
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
)
// NewGetLogInfoParams creates a new GetLogInfoParams object,
@@ -76,6 +77,13 @@ GetLogInfoParams contains all the parameters to send to the API endpoint
Typically these are written to a http.Request.
*/
type GetLogInfoParams struct {
+
+ /* Stable.
+
+ Whether to return a stable checkpoint for the active shard
+ */
+ Stable *bool
+
timeout time.Duration
Context context.Context
HTTPClient *http.Client
@@ -93,7 +101,18 @@ func (o *GetLogInfoParams) WithDefaults() *GetLogInfoParams {
//
// All values with no default are reset to their zero value.
func (o *GetLogInfoParams) SetDefaults() {
- // no default values defined for this parameter
+ var (
+ stableDefault = bool(false)
+ )
+
+ val := GetLogInfoParams{
+ Stable: &stableDefault,
+ }
+
+ val.timeout = o.timeout
+ val.Context = o.Context
+ val.HTTPClient = o.HTTPClient
+ *o = val
}
// WithTimeout adds the timeout to the get log info params
@@ -129,6 +148,17 @@ func (o *GetLogInfoParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
+// WithStable adds the stable to the get log info params
+func (o *GetLogInfoParams) WithStable(stable *bool) *GetLogInfoParams {
+ o.SetStable(stable)
+ return o
+}
+
+// SetStable adds the stable to the get log info params
+func (o *GetLogInfoParams) SetStable(stable *bool) {
+ o.Stable = stable
+}
+
// WriteToRequest writes these params to a swagger request
func (o *GetLogInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
@@ -137,6 +167,23 @@ func (o *GetLogInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Re
}
var res []error
+ if o.Stable != nil {
+
+ // query param stable
+ var qrStable bool
+
+ if o.Stable != nil {
+ qrStable = *o.Stable
+ }
+ qStable := swag.FormatBool(qrStable)
+ if qStable != "" {
+
+ if err := r.SetQueryParam("stable", qStable); err != nil {
+ return err
+ }
+ }
+ }
+
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go
new file mode 100644
index 000000000..dde562054
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go
@@ -0,0 +1,210 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// DSSE DSSE envelope
+//
+// swagger:model dsse
+type DSSE struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec DSSESchema `json:"spec"`
+}
+
+// Kind gets the kind of this subtype
+func (m *DSSE) Kind() string {
+ return "dsse"
+}
+
+// SetKind sets the kind of this subtype
+func (m *DSSE) SetKind(val string) {
+}
+
+// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure
+func (m *DSSE) UnmarshalJSON(raw []byte) error {
+ var data struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec DSSESchema `json:"spec"`
+ }
+ buf := bytes.NewBuffer(raw)
+ dec := json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ var base struct {
+ /* Just the base type fields. Used for unmashalling polymorphic types.*/
+
+ Kind string `json:"kind"`
+ }
+ buf = bytes.NewBuffer(raw)
+ dec = json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&base); err != nil {
+ return err
+ }
+
+ var result DSSE
+
+ if base.Kind != result.Kind() {
+ /* Not the type we're looking for. */
+ return errors.New(422, "invalid kind value: %q", base.Kind)
+ }
+
+ result.APIVersion = data.APIVersion
+ result.Spec = data.Spec
+
+ *m = result
+
+ return nil
+}
+
+// MarshalJSON marshals this object with a polymorphic type to a JSON structure
+func (m DSSE) MarshalJSON() ([]byte, error) {
+ var b1, b2, b3 []byte
+ var err error
+ b1, err = json.Marshal(struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec DSSESchema `json:"spec"`
+ }{
+
+ APIVersion: m.APIVersion,
+
+ Spec: m.Spec,
+ })
+ if err != nil {
+ return nil, err
+ }
+ b2, err = json.Marshal(struct {
+ Kind string `json:"kind"`
+ }{
+
+ Kind: m.Kind(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// Validate validates this dsse
+func (m *DSSE) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAPIVersion(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSpec(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSE) validateAPIVersion(formats strfmt.Registry) error {
+
+ if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSE) validateSpec(formats strfmt.Registry) error {
+
+ if m.Spec == nil {
+ return errors.Required("spec", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validate this dsse based on the context it is used
+func (m *DSSE) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSE) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSE) UnmarshalBinary(b []byte) error {
+ var res DSSE
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go
new file mode 100644
index 000000000..779562643
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go
@@ -0,0 +1,29 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// DSSESchema DSSE Schema
+//
+// log entry schema for dsse envelopes
+//
+// swagger:model dsseSchema
+type DSSESchema interface{}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go
new file mode 100644
index 000000000..a28dd5244
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go
@@ -0,0 +1,665 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "encoding/json"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// DSSEV001Schema DSSE v0.0.1 Schema
+//
+// # Schema for DSSE envelopes
+//
+// swagger:model dsseV001Schema
+type DSSEV001Schema struct {
+
+ // envelope hash
+ EnvelopeHash *DSSEV001SchemaEnvelopeHash `json:"envelopeHash,omitempty"`
+
+ // payload hash
+ PayloadHash *DSSEV001SchemaPayloadHash `json:"payloadHash,omitempty"`
+
+ // proposed content
+ ProposedContent *DSSEV001SchemaProposedContent `json:"proposedContent,omitempty"`
+
+ // extracted collection of all signatures of the envelope's payload; elements will be sorted by lexicographical order of the base64 encoded signature strings
+ // Read Only: true
+ // Min Items: 1
+ Signatures []*DSSEV001SchemaSignaturesItems0 `json:"signatures"`
+}
+
+// Validate validates this dsse v001 schema
+func (m *DSSEV001Schema) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateEnvelopeHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePayloadHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateProposedContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSignatures(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSEV001Schema) validateEnvelopeHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.EnvelopeHash) { // not required
+ return nil
+ }
+
+ if m.EnvelopeHash != nil {
+ if err := m.EnvelopeHash.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("envelopeHash")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("envelopeHash")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) validatePayloadHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.PayloadHash) { // not required
+ return nil
+ }
+
+ if m.PayloadHash != nil {
+ if err := m.PayloadHash.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("payloadHash")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("payloadHash")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) validateProposedContent(formats strfmt.Registry) error {
+ if swag.IsZero(m.ProposedContent) { // not required
+ return nil
+ }
+
+ if m.ProposedContent != nil {
+ if err := m.ProposedContent.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("proposedContent")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("proposedContent")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) validateSignatures(formats strfmt.Registry) error {
+ if swag.IsZero(m.Signatures) { // not required
+ return nil
+ }
+
+ iSignaturesSize := int64(len(m.Signatures))
+
+ if err := validate.MinItems("signatures", "body", iSignaturesSize, 1); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(m.Signatures); i++ {
+ if swag.IsZero(m.Signatures[i]) { // not required
+ continue
+ }
+
+ if m.Signatures[i] != nil {
+ if err := m.Signatures[i].Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("signatures" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("signatures" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// ContextValidate validate this dsse v001 schema based on the context it is used
+func (m *DSSEV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidatePayloadHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateProposedContent(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateSignatures(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSEV001Schema) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.EnvelopeHash != nil {
+ if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("envelopeHash")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("envelopeHash")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PayloadHash != nil {
+ if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("payloadHash")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("payloadHash")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) contextValidateProposedContent(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.ProposedContent != nil {
+ if err := m.ProposedContent.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("proposedContent")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("proposedContent")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) contextValidateSignatures(ctx context.Context, formats strfmt.Registry) error {
+
+ if err := validate.ReadOnly(ctx, "signatures", "body", []*DSSEV001SchemaSignaturesItems0(m.Signatures)); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(m.Signatures); i++ {
+
+ if m.Signatures[i] != nil {
+ if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("signatures" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("signatures" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001Schema) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001Schema) UnmarshalBinary(b []byte) error {
+ var res DSSEV001Schema
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// DSSEV001SchemaEnvelopeHash Specifies the hash algorithm and value encompassing the entire envelope sent to Rekor
+//
+// swagger:model DSSEV001SchemaEnvelopeHash
+type DSSEV001SchemaEnvelopeHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: [sha256]
+ Algorithm *string `json:"algorithm"`
+
+ // The value of the computed digest over the entire envelope
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this DSSE v001 schema envelope hash
+func (m *DSSEV001SchemaEnvelopeHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum []interface{}
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum = append(dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // DSSEV001SchemaEnvelopeHashAlgorithmSha256 captures enum value "sha256"
+ DSSEV001SchemaEnvelopeHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSEV001SchemaEnvelopeHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("envelopeHash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this DSSE v001 schema envelope hash based on the context it is used
+func (m *DSSEV001SchemaEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001SchemaEnvelopeHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001SchemaEnvelopeHash) UnmarshalBinary(b []byte) error {
+ var res DSSEV001SchemaEnvelopeHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// DSSEV001SchemaPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope
+//
+// swagger:model DSSEV001SchemaPayloadHash
+type DSSEV001SchemaPayloadHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: [sha256]
+ Algorithm *string `json:"algorithm"`
+
+ // The value of the computed digest over the payload within the envelope
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this DSSE v001 schema payload hash
+func (m *DSSEV001SchemaPayloadHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var dsseV001SchemaPayloadHashTypeAlgorithmPropEnum []interface{}
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ dsseV001SchemaPayloadHashTypeAlgorithmPropEnum = append(dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // DSSEV001SchemaPayloadHashAlgorithmSha256 captures enum value "sha256"
+ DSSEV001SchemaPayloadHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *DSSEV001SchemaPayloadHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *DSSEV001SchemaPayloadHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSEV001SchemaPayloadHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("payloadHash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this DSSE v001 schema payload hash based on the context it is used
+func (m *DSSEV001SchemaPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001SchemaPayloadHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001SchemaPayloadHash) UnmarshalBinary(b []byte) error {
+ var res DSSEV001SchemaPayloadHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// DSSEV001SchemaProposedContent DSSE v001 schema proposed content
+//
+// swagger:model DSSEV001SchemaProposedContent
+type DSSEV001SchemaProposedContent struct {
+
+ // DSSE envelope specified as a stringified JSON object
+ // Required: true
+ Envelope *string `json:"envelope"`
+
+ // collection of all verification material (e.g. public keys or certificates) used to verify signatures over envelope's payload, specified as base64-encoded strings
+ // Required: true
+ // Min Items: 1
+ Verifiers []strfmt.Base64 `json:"verifiers"`
+}
+
+// Validate validates this DSSE v001 schema proposed content
+func (m *DSSEV001SchemaProposedContent) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateEnvelope(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateVerifiers(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSEV001SchemaProposedContent) validateEnvelope(formats strfmt.Registry) error {
+
+ if err := validate.Required("proposedContent"+"."+"envelope", "body", m.Envelope); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSEV001SchemaProposedContent) validateVerifiers(formats strfmt.Registry) error {
+
+ if err := validate.Required("proposedContent"+"."+"verifiers", "body", m.Verifiers); err != nil {
+ return err
+ }
+
+ iVerifiersSize := int64(len(m.Verifiers))
+
+ if err := validate.MinItems("proposedContent"+"."+"verifiers", "body", iVerifiersSize, 1); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this DSSE v001 schema proposed content based on context it is used
+func (m *DSSEV001SchemaProposedContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001SchemaProposedContent) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001SchemaProposedContent) UnmarshalBinary(b []byte) error {
+ var res DSSEV001SchemaProposedContent
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// DSSEV001SchemaSignaturesItems0 a signature of the envelope's payload along with the verification material for the signature
+//
+// swagger:model DSSEV001SchemaSignaturesItems0
+type DSSEV001SchemaSignaturesItems0 struct {
+
+ // base64 encoded signature of the payload
+ // Required: true
+ // Pattern: ^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$
+ Signature *string `json:"signature"`
+
+ // verification material that was used to verify the corresponding signature, specified as a base64 encoded string
+ // Required: true
+ // Format: byte
+ Verifier *strfmt.Base64 `json:"verifier"`
+}
+
+// Validate validates this DSSE v001 schema signatures items0
+func (m *DSSEV001SchemaSignaturesItems0) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateSignature(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateVerifier(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSEV001SchemaSignaturesItems0) validateSignature(formats strfmt.Registry) error {
+
+ if err := validate.Required("signature", "body", m.Signature); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("signature", "body", *m.Signature, `^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSEV001SchemaSignaturesItems0) validateVerifier(formats strfmt.Registry) error {
+
+ if err := validate.Required("verifier", "body", m.Verifier); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this DSSE v001 schema signatures items0 based on context it is used
+func (m *DSSEV001SchemaSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001SchemaSignaturesItems0) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001SchemaSignaturesItems0) UnmarshalBinary(b []byte) error {
+ var res DSSEV001SchemaSignaturesItems0
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go
index 3297e5a91..816435cb2 100644
--- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go
@@ -450,25 +450,25 @@ type IntotoV002SchemaContentEnvelopeSignaturesItems0 struct {
Keyid string `json:"keyid,omitempty"`
// public key that corresponds to this signature
- // Read Only: true
+ // Required: true
// Format: byte
- PublicKey strfmt.Base64 `json:"publicKey,omitempty"`
+ PublicKey *strfmt.Base64 `json:"publicKey"`
// signature of the payload
+ // Required: true
// Format: byte
- Sig strfmt.Base64 `json:"sig,omitempty"`
+ Sig *strfmt.Base64 `json:"sig"`
}
// Validate validates this intoto v002 schema content envelope signatures items0
func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) Validate(formats strfmt.Registry) error {
- return nil
-}
-
-// ContextValidate validate this intoto v002 schema content envelope signatures items0 based on the context it is used
-func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
- if err := m.contextValidatePublicKey(ctx, formats); err != nil {
+ if err := m.validatePublicKey(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSig(formats); err != nil {
res = append(res, err)
}
@@ -478,15 +478,29 @@ func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) ContextValidate(ctx co
return nil
}
-func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
+func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validatePublicKey(formats strfmt.Registry) error {
+
+ if err := validate.Required("publicKey", "body", m.PublicKey); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validateSig(formats strfmt.Registry) error {
- if err := validate.ReadOnly(ctx, "publicKey", "body", strfmt.Base64(m.PublicKey)); err != nil {
+ if err := validate.Required("sig", "body", m.Sig); err != nil {
return err
}
return nil
}
+// ContextValidate validates this intoto v002 schema content envelope signatures items0 based on context it is used
+func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
// MarshalBinary interface implementation
func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) MarshalBinary() ([]byte, error) {
if m == nil {
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go
index 76b28019c..5b734a5ff 100644
--- a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go
@@ -126,6 +126,12 @@ func unmarshalProposedEntry(data []byte, consumer runtime.Consumer) (ProposedEnt
return nil, err
}
return &result, nil
+ case "dsse":
+ var result DSSE
+ if err := consumer.Consume(buf2, &result); err != nil {
+ return nil, err
+ }
+ return &result, nil
case "hashedrekord":
var result Hashedrekord
if err := consumer.Consume(buf2, &result); err != nil {
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go
index f8bf4b020..db5d8a3a9 100644
--- a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go
@@ -195,11 +195,30 @@ func (m *TUFV001Schema) UnmarshalBinary(b []byte) error {
type TUFV001SchemaMetadata struct {
// Specifies the metadata inline within the document
- Content interface{} `json:"content,omitempty"`
+ // Required: true
+ Content interface{} `json:"content"`
}
// Validate validates this TUF v001 schema metadata
func (m *TUFV001SchemaMetadata) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *TUFV001SchemaMetadata) validateContent(formats strfmt.Registry) error {
+
+ if m.Content == nil {
+ return errors.Required("metadata"+"."+"content", "body", nil)
+ }
+
return nil
}
diff --git a/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go b/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go
index ee6059e2b..94dc68c66 100644
--- a/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go
+++ b/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go
@@ -25,7 +25,6 @@ import (
"strings"
"time"
- "github.com/google/trillian/types"
"github.com/sigstore/sigstore/pkg/signature"
"github.com/sigstore/sigstore/pkg/signature/options"
)
@@ -168,11 +167,11 @@ func (r *SignedCheckpoint) GetTimestamp() uint64 {
}
// CreateAndSignCheckpoint creates a signed checkpoint as a commitment to the current root hash
-func CreateAndSignCheckpoint(ctx context.Context, hostname string, treeID int64, root *types.LogRootV1, signer signature.Signer) ([]byte, error) {
+func CreateAndSignCheckpoint(ctx context.Context, hostname string, treeID int64, treeSize uint64, rootHash []byte, signer signature.Signer) ([]byte, error) {
sth, err := CreateSignedCheckpoint(Checkpoint{
Origin: fmt.Sprintf("%s - %d", hostname, treeID),
- Size: root.TreeSize,
- Hash: root.RootHash,
+ Size: treeSize,
+ Hash: rootHash,
})
if err != nil {
return nil, fmt.Errorf("error creating checkpoint: %v", err)
diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go
index c0ab196cf..14989beaf 100644
--- a/vendor/golang.org/x/oauth2/internal/oauth2.go
+++ b/vendor/golang.org/x/oauth2/internal/oauth2.go
@@ -14,7 +14,7 @@ import (
// ParseKey converts the binary contents of a private key file
// to an *rsa.PrivateKey. It detects whether the private key is in a
-// PEM container or not. If so, it extracts the the private key
+// PEM container or not. If so, it extracts the private key
// from PEM container before conversion. It only supports PEM
// containers with no passphrase.
func ParseKey(key []byte) (*rsa.PrivateKey, error) {
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
index b4723fcac..58901bda5 100644
--- a/vendor/golang.org/x/oauth2/internal/token.go
+++ b/vendor/golang.org/x/oauth2/internal/token.go
@@ -55,12 +55,18 @@ type Token struct {
}
// tokenJSON is the struct representing the HTTP response from OAuth2
-// providers returning a token in JSON form.
+// providers returning a token or error in JSON form.
+// https://datatracker.ietf.org/doc/html/rfc6749#section-5.1
type tokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
+ // error fields
+ // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2
+ ErrorCode string `json:"error"`
+ ErrorDescription string `json:"error_description"`
+ ErrorURI string `json:"error_uri"`
}
func (e *tokenJSON) expiry() (t time.Time) {
@@ -236,21 +242,29 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
- if code := r.StatusCode; code < 200 || code > 299 {
- return nil, &RetrieveError{
- Response: r,
- Body: body,
- }
+
+ failureStatus := r.StatusCode < 200 || r.StatusCode > 299
+ retrieveError := &RetrieveError{
+ Response: r,
+ Body: body,
+ // attempt to populate error detail below
}
var token *Token
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
switch content {
case "application/x-www-form-urlencoded", "text/plain":
+ // some endpoints return a query string
vals, err := url.ParseQuery(string(body))
if err != nil {
- return nil, err
+ if failureStatus {
+ return nil, retrieveError
+ }
+ return nil, fmt.Errorf("oauth2: cannot parse response: %v", err)
}
+ retrieveError.ErrorCode = vals.Get("error")
+ retrieveError.ErrorDescription = vals.Get("error_description")
+ retrieveError.ErrorURI = vals.Get("error_uri")
token = &Token{
AccessToken: vals.Get("access_token"),
TokenType: vals.Get("token_type"),
@@ -265,8 +279,14 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
default:
var tj tokenJSON
if err = json.Unmarshal(body, &tj); err != nil {
- return nil, err
+ if failureStatus {
+ return nil, retrieveError
+ }
+ return nil, fmt.Errorf("oauth2: cannot parse json: %v", err)
}
+ retrieveError.ErrorCode = tj.ErrorCode
+ retrieveError.ErrorDescription = tj.ErrorDescription
+ retrieveError.ErrorURI = tj.ErrorURI
token = &Token{
AccessToken: tj.AccessToken,
TokenType: tj.TokenType,
@@ -276,17 +296,37 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
}
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
}
+ // according to spec, servers should respond status 400 in error case
+ // https://www.rfc-editor.org/rfc/rfc6749#section-5.2
+ // but some unorthodox servers respond 200 in error case
+ if failureStatus || retrieveError.ErrorCode != "" {
+ return nil, retrieveError
+ }
if token.AccessToken == "" {
return nil, errors.New("oauth2: server response missing access_token")
}
return token, nil
}
+// mirrors oauth2.RetrieveError
type RetrieveError struct {
- Response *http.Response
- Body []byte
+ Response *http.Response
+ Body []byte
+ ErrorCode string
+ ErrorDescription string
+ ErrorURI string
}
func (r *RetrieveError) Error() string {
+ if r.ErrorCode != "" {
+ s := fmt.Sprintf("oauth2: %q", r.ErrorCode)
+ if r.ErrorDescription != "" {
+ s += fmt.Sprintf(" %q", r.ErrorDescription)
+ }
+ if r.ErrorURI != "" {
+ s += fmt.Sprintf(" %q", r.ErrorURI)
+ }
+ return s
+ }
return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
}
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
index 7c64006de..5ffce9764 100644
--- a/vendor/golang.org/x/oauth2/token.go
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -175,14 +175,31 @@ func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error)
}
// RetrieveError is the error returned when the token endpoint returns a
-// non-2XX HTTP status code.
+// non-2XX HTTP status code or populates RFC 6749's 'error' parameter.
+// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2
type RetrieveError struct {
Response *http.Response
// Body is the body that was consumed by reading Response.Body.
// It may be truncated.
Body []byte
+ // ErrorCode is RFC 6749's 'error' parameter.
+ ErrorCode string
+ // ErrorDescription is RFC 6749's 'error_description' parameter.
+ ErrorDescription string
+ // ErrorURI is RFC 6749's 'error_uri' parameter.
+ ErrorURI string
}
func (r *RetrieveError) Error() string {
+ if r.ErrorCode != "" {
+ s := fmt.Sprintf("oauth2: %q", r.ErrorCode)
+ if r.ErrorDescription != "" {
+ s += fmt.Sprintf(" %q", r.ErrorDescription)
+ }
+ if r.ErrorURI != "" {
+ s += fmt.Sprintf(" %q", r.ErrorURI)
+ }
+ return s
+ }
return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
}
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
index 165ede0f8..03543bd4b 100644
--- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
+++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
@@ -128,15 +128,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
// (from "version"). Select appropriate importer.
if len(data) > 0 {
switch data[0] {
- case 'i':
- _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
- return pkg, err
+ case 'v', 'c', 'd': // binary, till go1.10
+ return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
- case 'v', 'c', 'd':
- _, pkg, err := gcimporter.BImportData(fset, imports, data, path)
+ case 'i': // indexed, till go1.19
+ _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
return pkg, err
- case 'u':
+ case 'u': // unified, from go1.20
_, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
return pkg, err
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
index 6bb7168d2..e84f19dfa 100644
--- a/vendor/golang.org/x/tools/go/packages/golist.go
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -625,7 +625,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
}
if pkg.PkgPath == "unsafe" {
- pkg.GoFiles = nil // ignore fake unsafe.go file
+ pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929)
+ } else if len(pkg.CompiledGoFiles) == 0 {
+ // Work around for pre-go.1.11 versions of go list.
+ // TODO(matloob): they should be handled by the fallback.
+ // Can we delete this?
+ pkg.CompiledGoFiles = pkg.GoFiles
}
// Assume go list emits only absolute paths for Dir.
@@ -663,13 +668,6 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
response.Roots = append(response.Roots, pkg.ID)
}
- // Work around for pre-go.1.11 versions of go list.
- // TODO(matloob): they should be handled by the fallback.
- // Can we delete this?
- if len(pkg.CompiledGoFiles) == 0 {
- pkg.CompiledGoFiles = pkg.GoFiles
- }
-
// Temporary work-around for golang/go#39986. Parse filenames out of
// error messages. This happens if there are unrecoverable syntax
// errors in the source, so we can't match on a specific error message.
@@ -891,6 +889,15 @@ func golistargs(cfg *Config, words []string, goVersion int) []string {
// probably because you'd just get the TestMain.
fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)),
}
+
+ // golang/go#60456: with go1.21 and later, go list serves pgo variants, which
+ // can be costly to compute and may result in redundant processing for the
+ // caller. Disable these variants. If someone wants to add e.g. a NeedPGO
+ // mode flag, that should be a separate proposal.
+ if goVersion >= 21 {
+ fullargs = append(fullargs, "-pgo=off")
+ }
+
fullargs = append(fullargs, cfg.BuildFlags...)
fullargs = append(fullargs, "--")
fullargs = append(fullargs, words...)
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
index 0f1505b80..632be722a 100644
--- a/vendor/golang.org/x/tools/go/packages/packages.go
+++ b/vendor/golang.org/x/tools/go/packages/packages.go
@@ -308,6 +308,9 @@ type Package struct {
TypeErrors []types.Error
// GoFiles lists the absolute file paths of the package's Go source files.
+ // It may include files that should not be compiled, for example because
+ // they contain non-matching build tags, are documentary pseudo-files such as
+ // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing.
GoFiles []string
// CompiledGoFiles lists the absolute file paths of the package's source
diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
deleted file mode 100644
index aa7dfaccf..000000000
--- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
+++ /dev/null
@@ -1,764 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package objectpath defines a naming scheme for types.Objects
-// (that is, named entities in Go programs) relative to their enclosing
-// package.
-//
-// Type-checker objects are canonical, so they are usually identified by
-// their address in memory (a pointer), but a pointer has meaning only
-// within one address space. By contrast, objectpath names allow the
-// identity of an object to be sent from one program to another,
-// establishing a correspondence between types.Object variables that are
-// distinct but logically equivalent.
-//
-// A single object may have multiple paths. In this example,
-//
-// type A struct{ X int }
-// type B A
-//
-// the field X has two paths due to its membership of both A and B.
-// The For(obj) function always returns one of these paths, arbitrarily
-// but consistently.
-package objectpath
-
-import (
- "fmt"
- "go/types"
- "sort"
- "strconv"
- "strings"
-
- "golang.org/x/tools/internal/typeparams"
-
- _ "unsafe" // for go:linkname
-)
-
-// A Path is an opaque name that identifies a types.Object
-// relative to its package. Conceptually, the name consists of a
-// sequence of destructuring operations applied to the package scope
-// to obtain the original object.
-// The name does not include the package itself.
-type Path string
-
-// Encoding
-//
-// An object path is a textual and (with training) human-readable encoding
-// of a sequence of destructuring operators, starting from a types.Package.
-// The sequences represent a path through the package/object/type graph.
-// We classify these operators by their type:
-//
-// PO package->object Package.Scope.Lookup
-// OT object->type Object.Type
-// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
-// TO type->object Type.{At,Field,Method,Obj} [AFMO]
-//
-// All valid paths start with a package and end at an object
-// and thus may be defined by the regular language:
-//
-// objectpath = PO (OT TT* TO)*
-//
-// The concrete encoding follows directly:
-// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
-// - The only OT operator is Object.Type,
-// which we encode as '.' because dot cannot appear in an identifier.
-// - The TT operators are encoded as [EKPRUTC];
-// one of these (TypeParam) requires an integer operand,
-// which is encoded as a string of decimal digits.
-// - The TO operators are encoded as [AFMO];
-// three of these (At,Field,Method) require an integer operand,
-// which is encoded as a string of decimal digits.
-// These indices are stable across different representations
-// of the same package, even source and export data.
-// The indices used are implementation specific and may not correspond to
-// the argument to the go/types function.
-//
-// In the example below,
-//
-// package p
-//
-// type T interface {
-// f() (a string, b struct{ X int })
-// }
-//
-// field X has the path "T.UM0.RA1.F0",
-// representing the following sequence of operations:
-//
-// p.Lookup("T") T
-// .Type().Underlying().Method(0). f
-// .Type().Results().At(1) b
-// .Type().Field(0) X
-//
-// The encoding is not maximally compact---every R or P is
-// followed by an A, for example---but this simplifies the
-// encoder and decoder.
-const (
- // object->type operators
- opType = '.' // .Type() (Object)
-
- // type->type operators
- opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
- opKey = 'K' // .Key() (Map)
- opParams = 'P' // .Params() (Signature)
- opResults = 'R' // .Results() (Signature)
- opUnderlying = 'U' // .Underlying() (Named)
- opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
- opConstraint = 'C' // .Constraint() (TypeParam)
-
- // type->object operators
- opAt = 'A' // .At(i) (Tuple)
- opField = 'F' // .Field(i) (Struct)
- opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
- opObj = 'O' // .Obj() (Named, TypeParam)
-)
-
-// For is equivalent to new(Encoder).For(obj).
-//
-// It may be more efficient to reuse a single Encoder across several calls.
-func For(obj types.Object) (Path, error) {
- return new(Encoder).For(obj)
-}
-
-// An Encoder amortizes the cost of encoding the paths of multiple objects.
-// The zero value of an Encoder is ready to use.
-type Encoder struct {
- scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names()
- namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods()
-}
-
-// For returns the path to an object relative to its package,
-// or an error if the object is not accessible from the package's Scope.
-//
-// The For function guarantees to return a path only for the following objects:
-// - package-level types
-// - exported package-level non-types
-// - methods
-// - parameter and result variables
-// - struct fields
-// These objects are sufficient to define the API of their package.
-// The objects described by a package's export data are drawn from this set.
-//
-// For does not return a path for predeclared names, imported package
-// names, local names, and unexported package-level names (except
-// types).
-//
-// Example: given this definition,
-//
-// package p
-//
-// type T interface {
-// f() (a string, b struct{ X int })
-// }
-//
-// For(X) would return a path that denotes the following sequence of operations:
-//
-// p.Scope().Lookup("T") (TypeName T)
-// .Type().Underlying().Method(0). (method Func f)
-// .Type().Results().At(1) (field Var b)
-// .Type().Field(0) (field Var X)
-//
-// where p is the package (*types.Package) to which X belongs.
-func (enc *Encoder) For(obj types.Object) (Path, error) {
- pkg := obj.Pkg()
-
- // This table lists the cases of interest.
- //
- // Object Action
- // ------ ------
- // nil reject
- // builtin reject
- // pkgname reject
- // label reject
- // var
- // package-level accept
- // func param/result accept
- // local reject
- // struct field accept
- // const
- // package-level accept
- // local reject
- // func
- // package-level accept
- // init functions reject
- // concrete method accept
- // interface method accept
- // type
- // package-level accept
- // local reject
- //
- // The only accessible package-level objects are members of pkg itself.
- //
- // The cases are handled in four steps:
- //
- // 1. reject nil and builtin
- // 2. accept package-level objects
- // 3. reject obviously invalid objects
- // 4. search the API for the path to the param/result/field/method.
-
- // 1. reference to nil or builtin?
- if pkg == nil {
- return "", fmt.Errorf("predeclared %s has no path", obj)
- }
- scope := pkg.Scope()
-
- // 2. package-level object?
- if scope.Lookup(obj.Name()) == obj {
- // Only exported objects (and non-exported types) have a path.
- // Non-exported types may be referenced by other objects.
- if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() {
- return "", fmt.Errorf("no path for non-exported %v", obj)
- }
- return Path(obj.Name()), nil
- }
-
- // 3. Not a package-level object.
- // Reject obviously non-viable cases.
- switch obj := obj.(type) {
- case *types.TypeName:
- if _, ok := obj.Type().(*typeparams.TypeParam); !ok {
- // With the exception of type parameters, only package-level type names
- // have a path.
- return "", fmt.Errorf("no path for %v", obj)
- }
- case *types.Const, // Only package-level constants have a path.
- *types.Label, // Labels are function-local.
- *types.PkgName: // PkgNames are file-local.
- return "", fmt.Errorf("no path for %v", obj)
-
- case *types.Var:
- // Could be:
- // - a field (obj.IsField())
- // - a func parameter or result
- // - a local var.
- // Sadly there is no way to distinguish
- // a param/result from a local
- // so we must proceed to the find.
-
- case *types.Func:
- // A func, if not package-level, must be a method.
- if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
- return "", fmt.Errorf("func is not a method: %v", obj)
- }
-
- if path, ok := enc.concreteMethod(obj); ok {
- // Fast path for concrete methods that avoids looping over scope.
- return path, nil
- }
-
- default:
- panic(obj)
- }
-
- // 4. Search the API for the path to the var (field/param/result) or method.
-
- // First inspect package-level named types.
- // In the presence of path aliases, these give
- // the best paths because non-types may
- // refer to types, but not the reverse.
- empty := make([]byte, 0, 48) // initial space
- names := enc.scopeNames(scope)
- for _, name := range names {
- o := scope.Lookup(name)
- tname, ok := o.(*types.TypeName)
- if !ok {
- continue // handle non-types in second pass
- }
-
- path := append(empty, name...)
- path = append(path, opType)
-
- T := o.Type()
-
- if tname.IsAlias() {
- // type alias
- if r := find(obj, T, path, nil); r != nil {
- return Path(r), nil
- }
- } else {
- if named, _ := T.(*types.Named); named != nil {
- if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil {
- // generic named type
- return Path(r), nil
- }
- }
- // defined (named) type
- if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil {
- return Path(r), nil
- }
- }
- }
-
- // Then inspect everything else:
- // non-types, and declared methods of defined types.
- for _, name := range names {
- o := scope.Lookup(name)
- path := append(empty, name...)
- if _, ok := o.(*types.TypeName); !ok {
- if o.Exported() {
- // exported non-type (const, var, func)
- if r := find(obj, o.Type(), append(path, opType), nil); r != nil {
- return Path(r), nil
- }
- }
- continue
- }
-
- // Inspect declared methods of defined types.
- if T, ok := o.Type().(*types.Named); ok {
- path = append(path, opType)
- // Note that method index here is always with respect
- // to canonical ordering of methods, regardless of how
- // they appear in the underlying type.
- for i, m := range enc.namedMethods(T) {
- path2 := appendOpArg(path, opMethod, i)
- if m == obj {
- return Path(path2), nil // found declared method
- }
- if r := find(obj, m.Type(), append(path2, opType), nil); r != nil {
- return Path(r), nil
- }
- }
- }
- }
-
- return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path())
-}
-
-func appendOpArg(path []byte, op byte, arg int) []byte {
- path = append(path, op)
- path = strconv.AppendInt(path, int64(arg), 10)
- return path
-}
-
-// concreteMethod returns the path for meth, which must have a non-nil receiver.
-// The second return value indicates success and may be false if the method is
-// an interface method or if it is an instantiated method.
-//
-// This function is just an optimization that avoids the general scope walking
-// approach. You are expected to fall back to the general approach if this
-// function fails.
-func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
- // Concrete methods can only be declared on package-scoped named types. For
- // that reason we can skip the expensive walk over the package scope: the
- // path will always be package -> named type -> method. We can trivially get
- // the type name from the receiver, and only have to look over the type's
- // methods to find the method index.
- //
- // Methods on generic types require special consideration, however. Consider
- // the following package:
- //
- // L1: type S[T any] struct{}
- // L2: func (recv S[A]) Foo() { recv.Bar() }
- // L3: func (recv S[B]) Bar() { }
- // L4: type Alias = S[int]
- // L5: func _[T any]() { var s S[int]; s.Foo() }
- //
- // The receivers of methods on generic types are instantiations. L2 and L3
- // instantiate S with the type-parameters A and B, which are scoped to the
- // respective methods. L4 and L5 each instantiate S with int. Each of these
- // instantiations has its own method set, full of methods (and thus objects)
- // with receivers whose types are the respective instantiations. In other
- // words, we have
- //
- // S[A].Foo, S[A].Bar
- // S[B].Foo, S[B].Bar
- // S[int].Foo, S[int].Bar
- //
- // We may thus be trying to produce object paths for any of these objects.
- //
- // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo
- // and S.Bar, which are the paths that this function naturally produces.
- //
- // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that
- // don't correspond to the origin methods. For S[int], this is significant.
- // The most precise object path for S[int].Foo, for example, is Alias.Foo,
- // not S.Foo. Our function, however, would produce S.Foo, which would
- // resolve to a different object.
- //
- // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are
- // still the correct paths, since only the origin methods have meaningful
- // paths. But this is likely only true for trivial cases and has edge cases.
- // Since this function is only an optimization, we err on the side of giving
- // up, deferring to the slower but definitely correct algorithm. Most users
- // of objectpath will only be giving us origin methods, anyway, as referring
- // to instantiated methods is usually not useful.
-
- if typeparams.OriginMethod(meth) != meth {
- return "", false
- }
-
- recvT := meth.Type().(*types.Signature).Recv().Type()
- if ptr, ok := recvT.(*types.Pointer); ok {
- recvT = ptr.Elem()
- }
-
- named, ok := recvT.(*types.Named)
- if !ok {
- return "", false
- }
-
- if types.IsInterface(named) {
- // Named interfaces don't have to be package-scoped
- //
- // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface
- // methods, too, I think.
- return "", false
- }
-
- // Preallocate space for the name, opType, opMethod, and some digits.
- name := named.Obj().Name()
- path := make([]byte, 0, len(name)+8)
- path = append(path, name...)
- path = append(path, opType)
- for i, m := range enc.namedMethods(named) {
- if m == meth {
- path = appendOpArg(path, opMethod, i)
- return Path(path), true
- }
- }
-
- // Due to golang/go#59944, go/types fails to associate the receiver with
- // certain methods on cgo types.
- //
- // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go
- // versions gopls supports.
- return "", false
- // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named)))
-}
-
-// find finds obj within type T, returning the path to it, or nil if not found.
-//
-// The seen map is used to short circuit cycles through type parameters. If
-// nil, it will be allocated as necessary.
-func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte {
- switch T := T.(type) {
- case *types.Basic, *types.Named:
- // Named types belonging to pkg were handled already,
- // so T must belong to another package. No path.
- return nil
- case *types.Pointer:
- return find(obj, T.Elem(), append(path, opElem), seen)
- case *types.Slice:
- return find(obj, T.Elem(), append(path, opElem), seen)
- case *types.Array:
- return find(obj, T.Elem(), append(path, opElem), seen)
- case *types.Chan:
- return find(obj, T.Elem(), append(path, opElem), seen)
- case *types.Map:
- if r := find(obj, T.Key(), append(path, opKey), seen); r != nil {
- return r
- }
- return find(obj, T.Elem(), append(path, opElem), seen)
- case *types.Signature:
- if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil {
- return r
- }
- if r := find(obj, T.Params(), append(path, opParams), seen); r != nil {
- return r
- }
- return find(obj, T.Results(), append(path, opResults), seen)
- case *types.Struct:
- for i := 0; i < T.NumFields(); i++ {
- fld := T.Field(i)
- path2 := appendOpArg(path, opField, i)
- if fld == obj {
- return path2 // found field var
- }
- if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil {
- return r
- }
- }
- return nil
- case *types.Tuple:
- for i := 0; i < T.Len(); i++ {
- v := T.At(i)
- path2 := appendOpArg(path, opAt, i)
- if v == obj {
- return path2 // found param/result var
- }
- if r := find(obj, v.Type(), append(path2, opType), seen); r != nil {
- return r
- }
- }
- return nil
- case *types.Interface:
- for i := 0; i < T.NumMethods(); i++ {
- m := T.Method(i)
- path2 := appendOpArg(path, opMethod, i)
- if m == obj {
- return path2 // found interface method
- }
- if r := find(obj, m.Type(), append(path2, opType), seen); r != nil {
- return r
- }
- }
- return nil
- case *typeparams.TypeParam:
- name := T.Obj()
- if name == obj {
- return append(path, opObj)
- }
- if seen[name] {
- return nil
- }
- if seen == nil {
- seen = make(map[*types.TypeName]bool)
- }
- seen[name] = true
- if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil {
- return r
- }
- return nil
- }
- panic(T)
-}
-
-func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte {
- for i := 0; i < list.Len(); i++ {
- tparam := list.At(i)
- path2 := appendOpArg(path, opTypeParam, i)
- if r := find(obj, tparam, path2, seen); r != nil {
- return r
- }
- }
- return nil
-}
-
-// Object returns the object denoted by path p within the package pkg.
-func Object(pkg *types.Package, p Path) (types.Object, error) {
- if p == "" {
- return nil, fmt.Errorf("empty path")
- }
-
- pathstr := string(p)
- var pkgobj, suffix string
- if dot := strings.IndexByte(pathstr, opType); dot < 0 {
- pkgobj = pathstr
- } else {
- pkgobj = pathstr[:dot]
- suffix = pathstr[dot:] // suffix starts with "."
- }
-
- obj := pkg.Scope().Lookup(pkgobj)
- if obj == nil {
- return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
- }
-
- // abstraction of *types.{Pointer,Slice,Array,Chan,Map}
- type hasElem interface {
- Elem() types.Type
- }
- // abstraction of *types.{Named,Signature}
- type hasTypeParams interface {
- TypeParams() *typeparams.TypeParamList
- }
- // abstraction of *types.{Named,TypeParam}
- type hasObj interface {
- Obj() *types.TypeName
- }
-
- // The loop state is the pair (t, obj),
- // exactly one of which is non-nil, initially obj.
- // All suffixes start with '.' (the only object->type operation),
- // followed by optional type->type operations,
- // then a type->object operation.
- // The cycle then repeats.
- var t types.Type
- for suffix != "" {
- code := suffix[0]
- suffix = suffix[1:]
-
- // Codes [AFM] have an integer operand.
- var index int
- switch code {
- case opAt, opField, opMethod, opTypeParam:
- rest := strings.TrimLeft(suffix, "0123456789")
- numerals := suffix[:len(suffix)-len(rest)]
- suffix = rest
- i, err := strconv.Atoi(numerals)
- if err != nil {
- return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code)
- }
- index = int(i)
- case opObj:
- // no operand
- default:
- // The suffix must end with a type->object operation.
- if suffix == "" {
- return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code)
- }
- }
-
- if code == opType {
- if t != nil {
- return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType)
- }
- t = obj.Type()
- obj = nil
- continue
- }
-
- if t == nil {
- return nil, fmt.Errorf("invalid path: code %q in object context", code)
- }
-
- // Inv: t != nil, obj == nil
-
- switch code {
- case opElem:
- hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t)
- }
- t = hasElem.Elem()
-
- case opKey:
- mapType, ok := t.(*types.Map)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t)
- }
- t = mapType.Key()
-
- case opParams:
- sig, ok := t.(*types.Signature)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
- }
- t = sig.Params()
-
- case opResults:
- sig, ok := t.(*types.Signature)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
- }
- t = sig.Results()
-
- case opUnderlying:
- named, ok := t.(*types.Named)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t)
- }
- t = named.Underlying()
-
- case opTypeParam:
- hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t)
- }
- tparams := hasTypeParams.TypeParams()
- if n := tparams.Len(); index >= n {
- return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
- }
- t = tparams.At(index)
-
- case opConstraint:
- tparam, ok := t.(*typeparams.TypeParam)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t)
- }
- t = tparam.Constraint()
-
- case opAt:
- tuple, ok := t.(*types.Tuple)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t)
- }
- if n := tuple.Len(); index >= n {
- return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
- }
- obj = tuple.At(index)
- t = nil
-
- case opField:
- structType, ok := t.(*types.Struct)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t)
- }
- if n := structType.NumFields(); index >= n {
- return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n)
- }
- obj = structType.Field(index)
- t = nil
-
- case opMethod:
- switch t := t.(type) {
- case *types.Interface:
- if index >= t.NumMethods() {
- return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
- }
- obj = t.Method(index) // Id-ordered
-
- case *types.Named:
- methods := namedMethods(t) // (unmemoized)
- if index >= len(methods) {
- return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods))
- }
- obj = methods[index] // Id-ordered
-
- default:
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t)
- }
- t = nil
-
- case opObj:
- hasObj, ok := t.(hasObj)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t)
- }
- obj = hasObj.Obj()
- t = nil
-
- default:
- return nil, fmt.Errorf("invalid path: unknown code %q", code)
- }
- }
-
- if obj.Pkg() != pkg {
- return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
- }
-
- return obj, nil // success
-}
-
-// namedMethods returns the methods of a Named type in ascending Id order.
-func namedMethods(named *types.Named) []*types.Func {
- methods := make([]*types.Func, named.NumMethods())
- for i := range methods {
- methods[i] = named.Method(i)
- }
- sort.Slice(methods, func(i, j int) bool {
- return methods[i].Id() < methods[j].Id()
- })
- return methods
-}
-
-// namedMethods is a memoization of the namedMethods function. Callers must not modify the result.
-func (enc *Encoder) namedMethods(named *types.Named) []*types.Func {
- m := enc.namedMethodsMemo
- if m == nil {
- m = make(map[*types.Named][]*types.Func)
- enc.namedMethodsMemo = m
- }
- methods, ok := m[named]
- if !ok {
- methods = namedMethods(named) // allocates and sorts
- m[named] = methods
- }
- return methods
-}
-
-// scopeNames is a memoization of scope.Names. Callers must not modify the result.
-func (enc *Encoder) scopeNames(scope *types.Scope) []string {
- m := enc.scopeNamesMemo
- if m == nil {
- m = make(map[*types.Scope][]string)
- enc.scopeNamesMemo = m
- }
- names, ok := m[scope]
- if !ok {
- names = scope.Names() // allocates and sorts
- m[scope] = names
- }
- return names
-}
diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go
new file mode 100644
index 000000000..ff2f2ecd3
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/tag/tag.go
@@ -0,0 +1,59 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tag provides the labels used for telemetry throughout gopls.
+package tag
+
+import (
+ "golang.org/x/tools/internal/event/keys"
+)
+
+var (
+ // create the label keys we use
+ Method = keys.NewString("method", "")
+ StatusCode = keys.NewString("status.code", "")
+ StatusMessage = keys.NewString("status.message", "")
+ RPCID = keys.NewString("id", "")
+ RPCDirection = keys.NewString("direction", "")
+ File = keys.NewString("file", "")
+ Directory = keys.New("directory", "")
+ URI = keys.New("URI", "")
+ Package = keys.NewString("package", "") // Package ID
+ PackagePath = keys.NewString("package_path", "")
+ Query = keys.New("query", "")
+ Snapshot = keys.NewUInt64("snapshot", "")
+ Operation = keys.NewString("operation", "")
+
+ Position = keys.New("position", "")
+ Category = keys.NewString("category", "")
+ PackageCount = keys.NewInt("packages", "")
+ Files = keys.New("files", "")
+ Port = keys.NewInt("port", "")
+ Type = keys.New("type", "")
+ HoverKind = keys.NewString("hoverkind", "")
+
+ NewServer = keys.NewString("new_server", "A new server was added")
+ EndServer = keys.NewString("end_server", "A server was shut down")
+
+ ServerID = keys.NewString("server", "The server ID an event is related to")
+ Logfile = keys.NewString("logfile", "")
+ DebugAddress = keys.NewString("debug_address", "")
+ GoplsPath = keys.NewString("gopls_path", "")
+ ClientID = keys.NewString("client_id", "")
+
+ Level = keys.NewInt("level", "The logging level")
+)
+
+var (
+ // create the stats we measure
+ Started = keys.NewInt64("started", "Count of started RPCs.")
+ ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes)
+ SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes)
+ Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds)
+)
+
+const (
+ Inbound = "in"
+ Outbound = "out"
+)
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/internal/gcimporter/bexport.go
deleted file mode 100644
index 30582ed6d..000000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go
+++ /dev/null
@@ -1,852 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Binary package export.
-// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
-// see that file for specification of the format.
-
-package gcimporter
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "go/constant"
- "go/token"
- "go/types"
- "math"
- "math/big"
- "sort"
- "strings"
-)
-
-// If debugFormat is set, each integer and string value is preceded by a marker
-// and position information in the encoding. This mechanism permits an importer
-// to recognize immediately when it is out of sync. The importer recognizes this
-// mode automatically (i.e., it can import export data produced with debugging
-// support even if debugFormat is not set at the time of import). This mode will
-// lead to massively larger export data (by a factor of 2 to 3) and should only
-// be enabled during development and debugging.
-//
-// NOTE: This flag is the first flag to enable if importing dies because of
-// (suspected) format errors, and whenever a change is made to the format.
-const debugFormat = false // default: false
-
-// Current export format version. Increase with each format change.
-//
-// Note: The latest binary (non-indexed) export format is at version 6.
-// This exporter is still at level 4, but it doesn't matter since
-// the binary importer can handle older versions just fine.
-//
-// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
-// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE
-// 4: type name objects support type aliases, uses aliasTag
-// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
-// 2: removed unused bool in ODCL export (compiler only)
-// 1: header format change (more regular), export package for _ struct fields
-// 0: Go1.7 encoding
-const exportVersion = 4
-
-// trackAllTypes enables cycle tracking for all types, not just named
-// types. The existing compiler invariants assume that unnamed types
-// that are not completely set up are not used, or else there are spurious
-// errors.
-// If disabled, only named types are tracked, possibly leading to slightly
-// less efficient encoding in rare cases. It also prevents the export of
-// some corner-case type declarations (but those are not handled correctly
-// with with the textual export format either).
-// TODO(gri) enable and remove once issues caused by it are fixed
-const trackAllTypes = false
-
-type exporter struct {
- fset *token.FileSet
- out bytes.Buffer
-
- // object -> index maps, indexed in order of serialization
- strIndex map[string]int
- pkgIndex map[*types.Package]int
- typIndex map[types.Type]int
-
- // position encoding
- posInfoFormat bool
- prevFile string
- prevLine int
-
- // debugging support
- written int // bytes written
- indent int // for trace
-}
-
-// internalError represents an error generated inside this package.
-type internalError string
-
-func (e internalError) Error() string { return "gcimporter: " + string(e) }
-
-func internalErrorf(format string, args ...interface{}) error {
- return internalError(fmt.Sprintf(format, args...))
-}
-
-// BExportData returns binary export data for pkg.
-// If no file set is provided, position info will be missing.
-func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
- if !debug {
- defer func() {
- if e := recover(); e != nil {
- if ierr, ok := e.(internalError); ok {
- err = ierr
- return
- }
- // Not an internal error; panic again.
- panic(e)
- }
- }()
- }
-
- p := exporter{
- fset: fset,
- strIndex: map[string]int{"": 0}, // empty string is mapped to 0
- pkgIndex: make(map[*types.Package]int),
- typIndex: make(map[types.Type]int),
- posInfoFormat: true, // TODO(gri) might become a flag, eventually
- }
-
- // write version info
- // The version string must start with "version %d" where %d is the version
- // number. Additional debugging information may follow after a blank; that
- // text is ignored by the importer.
- p.rawStringln(fmt.Sprintf("version %d", exportVersion))
- var debug string
- if debugFormat {
- debug = "debug"
- }
- p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
- p.bool(trackAllTypes)
- p.bool(p.posInfoFormat)
-
- // --- generic export data ---
-
- // populate type map with predeclared "known" types
- for index, typ := range predeclared() {
- p.typIndex[typ] = index
- }
- if len(p.typIndex) != len(predeclared()) {
- return nil, internalError("duplicate entries in type map?")
- }
-
- // write package data
- p.pkg(pkg, true)
- if trace {
- p.tracef("\n")
- }
-
- // write objects
- objcount := 0
- scope := pkg.Scope()
- for _, name := range scope.Names() {
- if !token.IsExported(name) {
- continue
- }
- if trace {
- p.tracef("\n")
- }
- p.obj(scope.Lookup(name))
- objcount++
- }
-
- // indicate end of list
- if trace {
- p.tracef("\n")
- }
- p.tag(endTag)
-
- // for self-verification only (redundant)
- p.int(objcount)
-
- if trace {
- p.tracef("\n")
- }
-
- // --- end of export data ---
-
- return p.out.Bytes(), nil
-}
-
-func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
- if pkg == nil {
- panic(internalError("unexpected nil pkg"))
- }
-
- // if we saw the package before, write its index (>= 0)
- if i, ok := p.pkgIndex[pkg]; ok {
- p.index('P', i)
- return
- }
-
- // otherwise, remember the package, write the package tag (< 0) and package data
- if trace {
- p.tracef("P%d = { ", len(p.pkgIndex))
- defer p.tracef("} ")
- }
- p.pkgIndex[pkg] = len(p.pkgIndex)
-
- p.tag(packageTag)
- p.string(pkg.Name())
- if emptypath {
- p.string("")
- } else {
- p.string(pkg.Path())
- }
-}
-
-func (p *exporter) obj(obj types.Object) {
- switch obj := obj.(type) {
- case *types.Const:
- p.tag(constTag)
- p.pos(obj)
- p.qualifiedName(obj)
- p.typ(obj.Type())
- p.value(obj.Val())
-
- case *types.TypeName:
- if obj.IsAlias() {
- p.tag(aliasTag)
- p.pos(obj)
- p.qualifiedName(obj)
- } else {
- p.tag(typeTag)
- }
- p.typ(obj.Type())
-
- case *types.Var:
- p.tag(varTag)
- p.pos(obj)
- p.qualifiedName(obj)
- p.typ(obj.Type())
-
- case *types.Func:
- p.tag(funcTag)
- p.pos(obj)
- p.qualifiedName(obj)
- sig := obj.Type().(*types.Signature)
- p.paramList(sig.Params(), sig.Variadic())
- p.paramList(sig.Results(), false)
-
- default:
- panic(internalErrorf("unexpected object %v (%T)", obj, obj))
- }
-}
-
-func (p *exporter) pos(obj types.Object) {
- if !p.posInfoFormat {
- return
- }
-
- file, line := p.fileLine(obj)
- if file == p.prevFile {
- // common case: write line delta
- // delta == 0 means different file or no line change
- delta := line - p.prevLine
- p.int(delta)
- if delta == 0 {
- p.int(-1) // -1 means no file change
- }
- } else {
- // different file
- p.int(0)
- // Encode filename as length of common prefix with previous
- // filename, followed by (possibly empty) suffix. Filenames
- // frequently share path prefixes, so this can save a lot
- // of space and make export data size less dependent on file
- // path length. The suffix is unlikely to be empty because
- // file names tend to end in ".go".
- n := commonPrefixLen(p.prevFile, file)
- p.int(n) // n >= 0
- p.string(file[n:]) // write suffix only
- p.prevFile = file
- p.int(line)
- }
- p.prevLine = line
-}
-
-func (p *exporter) fileLine(obj types.Object) (file string, line int) {
- if p.fset != nil {
- pos := p.fset.Position(obj.Pos())
- file = pos.Filename
- line = pos.Line
- }
- return
-}
-
-func commonPrefixLen(a, b string) int {
- if len(a) > len(b) {
- a, b = b, a
- }
- // len(a) <= len(b)
- i := 0
- for i < len(a) && a[i] == b[i] {
- i++
- }
- return i
-}
-
-func (p *exporter) qualifiedName(obj types.Object) {
- p.string(obj.Name())
- p.pkg(obj.Pkg(), false)
-}
-
-func (p *exporter) typ(t types.Type) {
- if t == nil {
- panic(internalError("nil type"))
- }
-
- // Possible optimization: Anonymous pointer types *T where
- // T is a named type are common. We could canonicalize all
- // such types *T to a single type PT = *T. This would lead
- // to at most one *T entry in typIndex, and all future *T's
- // would be encoded as the respective index directly. Would
- // save 1 byte (pointerTag) per *T and reduce the typIndex
- // size (at the cost of a canonicalization map). We can do
- // this later, without encoding format change.
-
- // if we saw the type before, write its index (>= 0)
- if i, ok := p.typIndex[t]; ok {
- p.index('T', i)
- return
- }
-
- // otherwise, remember the type, write the type tag (< 0) and type data
- if trackAllTypes {
- if trace {
- p.tracef("T%d = {>\n", len(p.typIndex))
- defer p.tracef("<\n} ")
- }
- p.typIndex[t] = len(p.typIndex)
- }
-
- switch t := t.(type) {
- case *types.Named:
- if !trackAllTypes {
- // if we don't track all types, track named types now
- p.typIndex[t] = len(p.typIndex)
- }
-
- p.tag(namedTag)
- p.pos(t.Obj())
- p.qualifiedName(t.Obj())
- p.typ(t.Underlying())
- if !types.IsInterface(t) {
- p.assocMethods(t)
- }
-
- case *types.Array:
- p.tag(arrayTag)
- p.int64(t.Len())
- p.typ(t.Elem())
-
- case *types.Slice:
- p.tag(sliceTag)
- p.typ(t.Elem())
-
- case *dddSlice:
- p.tag(dddTag)
- p.typ(t.elem)
-
- case *types.Struct:
- p.tag(structTag)
- p.fieldList(t)
-
- case *types.Pointer:
- p.tag(pointerTag)
- p.typ(t.Elem())
-
- case *types.Signature:
- p.tag(signatureTag)
- p.paramList(t.Params(), t.Variadic())
- p.paramList(t.Results(), false)
-
- case *types.Interface:
- p.tag(interfaceTag)
- p.iface(t)
-
- case *types.Map:
- p.tag(mapTag)
- p.typ(t.Key())
- p.typ(t.Elem())
-
- case *types.Chan:
- p.tag(chanTag)
- p.int(int(3 - t.Dir())) // hack
- p.typ(t.Elem())
-
- default:
- panic(internalErrorf("unexpected type %T: %s", t, t))
- }
-}
-
-func (p *exporter) assocMethods(named *types.Named) {
- // Sort methods (for determinism).
- var methods []*types.Func
- for i := 0; i < named.NumMethods(); i++ {
- methods = append(methods, named.Method(i))
- }
- sort.Sort(methodsByName(methods))
-
- p.int(len(methods))
-
- if trace && methods != nil {
- p.tracef("associated methods {>\n")
- }
-
- for i, m := range methods {
- if trace && i > 0 {
- p.tracef("\n")
- }
-
- p.pos(m)
- name := m.Name()
- p.string(name)
- if !exported(name) {
- p.pkg(m.Pkg(), false)
- }
-
- sig := m.Type().(*types.Signature)
- p.paramList(types.NewTuple(sig.Recv()), false)
- p.paramList(sig.Params(), sig.Variadic())
- p.paramList(sig.Results(), false)
- p.int(0) // dummy value for go:nointerface pragma - ignored by importer
- }
-
- if trace && methods != nil {
- p.tracef("<\n} ")
- }
-}
-
-type methodsByName []*types.Func
-
-func (x methodsByName) Len() int { return len(x) }
-func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
-
-func (p *exporter) fieldList(t *types.Struct) {
- if trace && t.NumFields() > 0 {
- p.tracef("fields {>\n")
- defer p.tracef("<\n} ")
- }
-
- p.int(t.NumFields())
- for i := 0; i < t.NumFields(); i++ {
- if trace && i > 0 {
- p.tracef("\n")
- }
- p.field(t.Field(i))
- p.string(t.Tag(i))
- }
-}
-
-func (p *exporter) field(f *types.Var) {
- if !f.IsField() {
- panic(internalError("field expected"))
- }
-
- p.pos(f)
- p.fieldName(f)
- p.typ(f.Type())
-}
-
-func (p *exporter) iface(t *types.Interface) {
- // TODO(gri): enable importer to load embedded interfaces,
- // then emit Embeddeds and ExplicitMethods separately here.
- p.int(0)
-
- n := t.NumMethods()
- if trace && n > 0 {
- p.tracef("methods {>\n")
- defer p.tracef("<\n} ")
- }
- p.int(n)
- for i := 0; i < n; i++ {
- if trace && i > 0 {
- p.tracef("\n")
- }
- p.method(t.Method(i))
- }
-}
-
-func (p *exporter) method(m *types.Func) {
- sig := m.Type().(*types.Signature)
- if sig.Recv() == nil {
- panic(internalError("method expected"))
- }
-
- p.pos(m)
- p.string(m.Name())
- if m.Name() != "_" && !token.IsExported(m.Name()) {
- p.pkg(m.Pkg(), false)
- }
-
- // interface method; no need to encode receiver.
- p.paramList(sig.Params(), sig.Variadic())
- p.paramList(sig.Results(), false)
-}
-
-func (p *exporter) fieldName(f *types.Var) {
- name := f.Name()
-
- if f.Anonymous() {
- // anonymous field - we distinguish between 3 cases:
- // 1) field name matches base type name and is exported
- // 2) field name matches base type name and is not exported
- // 3) field name doesn't match base type name (alias name)
- bname := basetypeName(f.Type())
- if name == bname {
- if token.IsExported(name) {
- name = "" // 1) we don't need to know the field name or package
- } else {
- name = "?" // 2) use unexported name "?" to force package export
- }
- } else {
- // 3) indicate alias and export name as is
- // (this requires an extra "@" but this is a rare case)
- p.string("@")
- }
- }
-
- p.string(name)
- if name != "" && !token.IsExported(name) {
- p.pkg(f.Pkg(), false)
- }
-}
-
-func basetypeName(typ types.Type) string {
- switch typ := deref(typ).(type) {
- case *types.Basic:
- return typ.Name()
- case *types.Named:
- return typ.Obj().Name()
- default:
- return "" // unnamed type
- }
-}
-
-func (p *exporter) paramList(params *types.Tuple, variadic bool) {
- // use negative length to indicate unnamed parameters
- // (look at the first parameter only since either all
- // names are present or all are absent)
- n := params.Len()
- if n > 0 && params.At(0).Name() == "" {
- n = -n
- }
- p.int(n)
- for i := 0; i < params.Len(); i++ {
- q := params.At(i)
- t := q.Type()
- if variadic && i == params.Len()-1 {
- t = &dddSlice{t.(*types.Slice).Elem()}
- }
- p.typ(t)
- if n > 0 {
- name := q.Name()
- p.string(name)
- if name != "_" {
- p.pkg(q.Pkg(), false)
- }
- }
- p.string("") // no compiler-specific info
- }
-}
-
-func (p *exporter) value(x constant.Value) {
- if trace {
- p.tracef("= ")
- }
-
- switch x.Kind() {
- case constant.Bool:
- tag := falseTag
- if constant.BoolVal(x) {
- tag = trueTag
- }
- p.tag(tag)
-
- case constant.Int:
- if v, exact := constant.Int64Val(x); exact {
- // common case: x fits into an int64 - use compact encoding
- p.tag(int64Tag)
- p.int64(v)
- return
- }
- // uncommon case: large x - use float encoding
- // (powers of 2 will be encoded efficiently with exponent)
- p.tag(floatTag)
- p.float(constant.ToFloat(x))
-
- case constant.Float:
- p.tag(floatTag)
- p.float(x)
-
- case constant.Complex:
- p.tag(complexTag)
- p.float(constant.Real(x))
- p.float(constant.Imag(x))
-
- case constant.String:
- p.tag(stringTag)
- p.string(constant.StringVal(x))
-
- case constant.Unknown:
- // package contains type errors
- p.tag(unknownTag)
-
- default:
- panic(internalErrorf("unexpected value %v (%T)", x, x))
- }
-}
-
-func (p *exporter) float(x constant.Value) {
- if x.Kind() != constant.Float {
- panic(internalErrorf("unexpected constant %v, want float", x))
- }
- // extract sign (there is no -0)
- sign := constant.Sign(x)
- if sign == 0 {
- // x == 0
- p.int(0)
- return
- }
- // x != 0
-
- var f big.Float
- if v, exact := constant.Float64Val(x); exact {
- // float64
- f.SetFloat64(v)
- } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
- // TODO(gri): add big.Rat accessor to constant.Value.
- r := valueToRat(num)
- f.SetRat(r.Quo(r, valueToRat(denom)))
- } else {
- // Value too large to represent as a fraction => inaccessible.
- // TODO(gri): add big.Float accessor to constant.Value.
- f.SetFloat64(math.MaxFloat64) // FIXME
- }
-
- // extract exponent such that 0.5 <= m < 1.0
- var m big.Float
- exp := f.MantExp(&m)
-
- // extract mantissa as *big.Int
- // - set exponent large enough so mant satisfies mant.IsInt()
- // - get *big.Int from mant
- m.SetMantExp(&m, int(m.MinPrec()))
- mant, acc := m.Int(nil)
- if acc != big.Exact {
- panic(internalError("internal error"))
- }
-
- p.int(sign)
- p.int(exp)
- p.string(string(mant.Bytes()))
-}
-
-func valueToRat(x constant.Value) *big.Rat {
- // Convert little-endian to big-endian.
- // I can't believe this is necessary.
- bytes := constant.Bytes(x)
- for i := 0; i < len(bytes)/2; i++ {
- bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
- }
- return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
-}
-
-func (p *exporter) bool(b bool) bool {
- if trace {
- p.tracef("[")
- defer p.tracef("= %v] ", b)
- }
-
- x := 0
- if b {
- x = 1
- }
- p.int(x)
- return b
-}
-
-// ----------------------------------------------------------------------------
-// Low-level encoders
-
-func (p *exporter) index(marker byte, index int) {
- if index < 0 {
- panic(internalError("invalid index < 0"))
- }
- if debugFormat {
- p.marker('t')
- }
- if trace {
- p.tracef("%c%d ", marker, index)
- }
- p.rawInt64(int64(index))
-}
-
-func (p *exporter) tag(tag int) {
- if tag >= 0 {
- panic(internalError("invalid tag >= 0"))
- }
- if debugFormat {
- p.marker('t')
- }
- if trace {
- p.tracef("%s ", tagString[-tag])
- }
- p.rawInt64(int64(tag))
-}
-
-func (p *exporter) int(x int) {
- p.int64(int64(x))
-}
-
-func (p *exporter) int64(x int64) {
- if debugFormat {
- p.marker('i')
- }
- if trace {
- p.tracef("%d ", x)
- }
- p.rawInt64(x)
-}
-
-func (p *exporter) string(s string) {
- if debugFormat {
- p.marker('s')
- }
- if trace {
- p.tracef("%q ", s)
- }
- // if we saw the string before, write its index (>= 0)
- // (the empty string is mapped to 0)
- if i, ok := p.strIndex[s]; ok {
- p.rawInt64(int64(i))
- return
- }
- // otherwise, remember string and write its negative length and bytes
- p.strIndex[s] = len(p.strIndex)
- p.rawInt64(-int64(len(s)))
- for i := 0; i < len(s); i++ {
- p.rawByte(s[i])
- }
-}
-
-// marker emits a marker byte and position information which makes
-// it easy for a reader to detect if it is "out of sync". Used for
-// debugFormat format only.
-func (p *exporter) marker(m byte) {
- p.rawByte(m)
- // Enable this for help tracking down the location
- // of an incorrect marker when running in debugFormat.
- if false && trace {
- p.tracef("#%d ", p.written)
- }
- p.rawInt64(int64(p.written))
-}
-
-// rawInt64 should only be used by low-level encoders.
-func (p *exporter) rawInt64(x int64) {
- var tmp [binary.MaxVarintLen64]byte
- n := binary.PutVarint(tmp[:], x)
- for i := 0; i < n; i++ {
- p.rawByte(tmp[i])
- }
-}
-
-// rawStringln should only be used to emit the initial version string.
-func (p *exporter) rawStringln(s string) {
- for i := 0; i < len(s); i++ {
- p.rawByte(s[i])
- }
- p.rawByte('\n')
-}
-
-// rawByte is the bottleneck interface to write to p.out.
-// rawByte escapes b as follows (any encoding does that
-// hides '$'):
-//
-// '$' => '|' 'S'
-// '|' => '|' '|'
-//
-// Necessary so other tools can find the end of the
-// export data by searching for "$$".
-// rawByte should only be used by low-level encoders.
-func (p *exporter) rawByte(b byte) {
- switch b {
- case '$':
- // write '$' as '|' 'S'
- b = 'S'
- fallthrough
- case '|':
- // write '|' as '|' '|'
- p.out.WriteByte('|')
- p.written++
- }
- p.out.WriteByte(b)
- p.written++
-}
-
-// tracef is like fmt.Printf but it rewrites the format string
-// to take care of indentation.
-func (p *exporter) tracef(format string, args ...interface{}) {
- if strings.ContainsAny(format, "<>\n") {
- var buf bytes.Buffer
- for i := 0; i < len(format); i++ {
- // no need to deal with runes
- ch := format[i]
- switch ch {
- case '>':
- p.indent++
- continue
- case '<':
- p.indent--
- continue
- }
- buf.WriteByte(ch)
- if ch == '\n' {
- for j := p.indent; j > 0; j-- {
- buf.WriteString(". ")
- }
- }
- }
- format = buf.String()
- }
- fmt.Printf(format, args...)
-}
-
-// Debugging support.
-// (tagString is only used when tracing is enabled)
-var tagString = [...]string{
- // Packages
- -packageTag: "package",
-
- // Types
- -namedTag: "named type",
- -arrayTag: "array",
- -sliceTag: "slice",
- -dddTag: "ddd",
- -structTag: "struct",
- -pointerTag: "pointer",
- -signatureTag: "signature",
- -interfaceTag: "interface",
- -mapTag: "map",
- -chanTag: "chan",
-
- // Values
- -falseTag: "false",
- -trueTag: "true",
- -int64Tag: "int64",
- -floatTag: "float",
- -fractionTag: "fraction",
- -complexTag: "complex",
- -stringTag: "string",
- -unknownTag: "unknown",
-
- // Type aliases
- -aliasTag: "alias",
-}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
index b85de0147..d98b0db2a 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
@@ -2,340 +2,24 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go.
+// This file contains the remaining vestiges of
+// $GOROOT/src/go/internal/gcimporter/bimport.go.
package gcimporter
import (
- "encoding/binary"
"fmt"
- "go/constant"
"go/token"
"go/types"
- "sort"
- "strconv"
- "strings"
"sync"
- "unicode"
- "unicode/utf8"
)
-type importer struct {
- imports map[string]*types.Package
- data []byte
- importpath string
- buf []byte // for reading strings
- version int // export format version
-
- // object lists
- strList []string // in order of appearance
- pathList []string // in order of appearance
- pkgList []*types.Package // in order of appearance
- typList []types.Type // in order of appearance
- interfaceList []*types.Interface // for delayed completion only
- trackAllTypes bool
-
- // position encoding
- posInfoFormat bool
- prevFile string
- prevLine int
- fake fakeFileSet
-
- // debugging support
- debugFormat bool
- read int // bytes read
-}
-
-// BImportData imports a package from the serialized package data
-// and returns the number of bytes consumed and a reference to the package.
-// If the export data version is not recognized or the format is otherwise
-// compromised, an error is returned.
-func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
- // catch panics and return them as errors
- const currentVersion = 6
- version := -1 // unknown version
- defer func() {
- if e := recover(); e != nil {
- // Return a (possibly nil or incomplete) package unchanged (see #16088).
- if version > currentVersion {
- err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
- } else {
- err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
- }
- }
- }()
-
- p := importer{
- imports: imports,
- data: data,
- importpath: path,
- version: version,
- strList: []string{""}, // empty string is mapped to 0
- pathList: []string{""}, // empty string is mapped to 0
- fake: fakeFileSet{
- fset: fset,
- files: make(map[string]*fileInfo),
- },
- }
- defer p.fake.setLines() // set lines for files in fset
-
- // read version info
- var versionstr string
- if b := p.rawByte(); b == 'c' || b == 'd' {
- // Go1.7 encoding; first byte encodes low-level
- // encoding format (compact vs debug).
- // For backward-compatibility only (avoid problems with
- // old installed packages). Newly compiled packages use
- // the extensible format string.
- // TODO(gri) Remove this support eventually; after Go1.8.
- if b == 'd' {
- p.debugFormat = true
- }
- p.trackAllTypes = p.rawByte() == 'a'
- p.posInfoFormat = p.int() != 0
- versionstr = p.string()
- if versionstr == "v1" {
- version = 0
- }
- } else {
- // Go1.8 extensible encoding
- // read version string and extract version number (ignore anything after the version number)
- versionstr = p.rawStringln(b)
- if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
- if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
- version = v
- }
- }
- }
- p.version = version
-
- // read version specific flags - extend as necessary
- switch p.version {
- // case currentVersion:
- // ...
- // fallthrough
- case currentVersion, 5, 4, 3, 2, 1:
- p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
- p.trackAllTypes = p.int() != 0
- p.posInfoFormat = p.int() != 0
- case 0:
- // Go1.7 encoding format - nothing to do here
- default:
- errorf("unknown bexport format version %d (%q)", p.version, versionstr)
- }
-
- // --- generic export data ---
-
- // populate typList with predeclared "known" types
- p.typList = append(p.typList, predeclared()...)
-
- // read package data
- pkg = p.pkg()
-
- // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go)
- objcount := 0
- for {
- tag := p.tagOrIndex()
- if tag == endTag {
- break
- }
- p.obj(tag)
- objcount++
- }
-
- // self-verification
- if count := p.int(); count != objcount {
- errorf("got %d objects; want %d", objcount, count)
- }
-
- // ignore compiler-specific import data
-
- // complete interfaces
- // TODO(gri) re-investigate if we still need to do this in a delayed fashion
- for _, typ := range p.interfaceList {
- typ.Complete()
- }
-
- // record all referenced packages as imports
- list := append(([]*types.Package)(nil), p.pkgList[1:]...)
- sort.Sort(byPath(list))
- pkg.SetImports(list)
-
- // package was imported completely and without errors
- pkg.MarkComplete()
-
- return p.read, pkg, nil
-}
-
func errorf(format string, args ...interface{}) {
panic(fmt.Sprintf(format, args...))
}
-func (p *importer) pkg() *types.Package {
- // if the package was seen before, i is its index (>= 0)
- i := p.tagOrIndex()
- if i >= 0 {
- return p.pkgList[i]
- }
-
- // otherwise, i is the package tag (< 0)
- if i != packageTag {
- errorf("unexpected package tag %d version %d", i, p.version)
- }
-
- // read package data
- name := p.string()
- var path string
- if p.version >= 5 {
- path = p.path()
- } else {
- path = p.string()
- }
- if p.version >= 6 {
- p.int() // package height; unused by go/types
- }
-
- // we should never see an empty package name
- if name == "" {
- errorf("empty package name in import")
- }
-
- // an empty path denotes the package we are currently importing;
- // it must be the first package we see
- if (path == "") != (len(p.pkgList) == 0) {
- errorf("package path %q for pkg index %d", path, len(p.pkgList))
- }
-
- // if the package was imported before, use that one; otherwise create a new one
- if path == "" {
- path = p.importpath
- }
- pkg := p.imports[path]
- if pkg == nil {
- pkg = types.NewPackage(path, name)
- p.imports[path] = pkg
- } else if pkg.Name() != name {
- errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path)
- }
- p.pkgList = append(p.pkgList, pkg)
-
- return pkg
-}
-
-// objTag returns the tag value for each object kind.
-func objTag(obj types.Object) int {
- switch obj.(type) {
- case *types.Const:
- return constTag
- case *types.TypeName:
- return typeTag
- case *types.Var:
- return varTag
- case *types.Func:
- return funcTag
- default:
- errorf("unexpected object: %v (%T)", obj, obj) // panics
- panic("unreachable")
- }
-}
-
-func sameObj(a, b types.Object) bool {
- // Because unnamed types are not canonicalized, we cannot simply compare types for
- // (pointer) identity.
- // Ideally we'd check equality of constant values as well, but this is good enough.
- return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type())
-}
-
-func (p *importer) declare(obj types.Object) {
- pkg := obj.Pkg()
- if alt := pkg.Scope().Insert(obj); alt != nil {
- // This can only trigger if we import a (non-type) object a second time.
- // Excluding type aliases, this cannot happen because 1) we only import a package
- // once; and b) we ignore compiler-specific export data which may contain
- // functions whose inlined function bodies refer to other functions that
- // were already imported.
- // However, type aliases require reexporting the original type, so we need
- // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go,
- // method importer.obj, switch case importing functions).
- // TODO(gri) review/update this comment once the gc compiler handles type aliases.
- if !sameObj(obj, alt) {
- errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt)
- }
- }
-}
-
-func (p *importer) obj(tag int) {
- switch tag {
- case constTag:
- pos := p.pos()
- pkg, name := p.qualifiedName()
- typ := p.typ(nil, nil)
- val := p.value()
- p.declare(types.NewConst(pos, pkg, name, typ, val))
-
- case aliasTag:
- // TODO(gri) verify type alias hookup is correct
- pos := p.pos()
- pkg, name := p.qualifiedName()
- typ := p.typ(nil, nil)
- p.declare(types.NewTypeName(pos, pkg, name, typ))
-
- case typeTag:
- p.typ(nil, nil)
-
- case varTag:
- pos := p.pos()
- pkg, name := p.qualifiedName()
- typ := p.typ(nil, nil)
- p.declare(types.NewVar(pos, pkg, name, typ))
-
- case funcTag:
- pos := p.pos()
- pkg, name := p.qualifiedName()
- params, isddd := p.paramList()
- result, _ := p.paramList()
- sig := types.NewSignature(nil, params, result, isddd)
- p.declare(types.NewFunc(pos, pkg, name, sig))
-
- default:
- errorf("unexpected object tag %d", tag)
- }
-}
-
const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
-func (p *importer) pos() token.Pos {
- if !p.posInfoFormat {
- return token.NoPos
- }
-
- file := p.prevFile
- line := p.prevLine
- delta := p.int()
- line += delta
- if p.version >= 5 {
- if delta == deltaNewFile {
- if n := p.int(); n >= 0 {
- // file changed
- file = p.path()
- line = n
- }
- }
- } else {
- if delta == 0 {
- if n := p.int(); n >= 0 {
- // file changed
- file = p.prevFile[:n] + p.string()
- line = p.int()
- }
- }
- }
- p.prevFile = file
- p.prevLine = line
-
- return p.fake.pos(file, line, 0)
-}
-
// Synthesize a token.Pos
type fakeFileSet struct {
fset *token.FileSet
@@ -389,205 +73,6 @@ var (
fakeLinesOnce sync.Once
)
-func (p *importer) qualifiedName() (pkg *types.Package, name string) {
- name = p.string()
- pkg = p.pkg()
- return
-}
-
-func (p *importer) record(t types.Type) {
- p.typList = append(p.typList, t)
-}
-
-// A dddSlice is a types.Type representing ...T parameters.
-// It only appears for parameter types and does not escape
-// the importer.
-type dddSlice struct {
- elem types.Type
-}
-
-func (t *dddSlice) Underlying() types.Type { return t }
-func (t *dddSlice) String() string { return "..." + t.elem.String() }
-
-// parent is the package which declared the type; parent == nil means
-// the package currently imported. The parent package is needed for
-// exported struct fields and interface methods which don't contain
-// explicit package information in the export data.
-//
-// A non-nil tname is used as the "owner" of the result type; i.e.,
-// the result type is the underlying type of tname. tname is used
-// to give interface methods a named receiver type where possible.
-func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type {
- // if the type was seen before, i is its index (>= 0)
- i := p.tagOrIndex()
- if i >= 0 {
- return p.typList[i]
- }
-
- // otherwise, i is the type tag (< 0)
- switch i {
- case namedTag:
- // read type object
- pos := p.pos()
- parent, name := p.qualifiedName()
- scope := parent.Scope()
- obj := scope.Lookup(name)
-
- // if the object doesn't exist yet, create and insert it
- if obj == nil {
- obj = types.NewTypeName(pos, parent, name, nil)
- scope.Insert(obj)
- }
-
- if _, ok := obj.(*types.TypeName); !ok {
- errorf("pkg = %s, name = %s => %s", parent, name, obj)
- }
-
- // associate new named type with obj if it doesn't exist yet
- t0 := types.NewNamed(obj.(*types.TypeName), nil, nil)
-
- // but record the existing type, if any
- tname := obj.Type().(*types.Named) // tname is either t0 or the existing type
- p.record(tname)
-
- // read underlying type
- t0.SetUnderlying(p.typ(parent, t0))
-
- // interfaces don't have associated methods
- if types.IsInterface(t0) {
- return tname
- }
-
- // read associated methods
- for i := p.int(); i > 0; i-- {
- // TODO(gri) replace this with something closer to fieldName
- pos := p.pos()
- name := p.string()
- if !exported(name) {
- p.pkg()
- }
-
- recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver?
- params, isddd := p.paramList()
- result, _ := p.paramList()
- p.int() // go:nointerface pragma - discarded
-
- sig := types.NewSignature(recv.At(0), params, result, isddd)
- t0.AddMethod(types.NewFunc(pos, parent, name, sig))
- }
-
- return tname
-
- case arrayTag:
- t := new(types.Array)
- if p.trackAllTypes {
- p.record(t)
- }
-
- n := p.int64()
- *t = *types.NewArray(p.typ(parent, nil), n)
- return t
-
- case sliceTag:
- t := new(types.Slice)
- if p.trackAllTypes {
- p.record(t)
- }
-
- *t = *types.NewSlice(p.typ(parent, nil))
- return t
-
- case dddTag:
- t := new(dddSlice)
- if p.trackAllTypes {
- p.record(t)
- }
-
- t.elem = p.typ(parent, nil)
- return t
-
- case structTag:
- t := new(types.Struct)
- if p.trackAllTypes {
- p.record(t)
- }
-
- *t = *types.NewStruct(p.fieldList(parent))
- return t
-
- case pointerTag:
- t := new(types.Pointer)
- if p.trackAllTypes {
- p.record(t)
- }
-
- *t = *types.NewPointer(p.typ(parent, nil))
- return t
-
- case signatureTag:
- t := new(types.Signature)
- if p.trackAllTypes {
- p.record(t)
- }
-
- params, isddd := p.paramList()
- result, _ := p.paramList()
- *t = *types.NewSignature(nil, params, result, isddd)
- return t
-
- case interfaceTag:
- // Create a dummy entry in the type list. This is safe because we
- // cannot expect the interface type to appear in a cycle, as any
- // such cycle must contain a named type which would have been
- // first defined earlier.
- // TODO(gri) Is this still true now that we have type aliases?
- // See issue #23225.
- n := len(p.typList)
- if p.trackAllTypes {
- p.record(nil)
- }
-
- var embeddeds []types.Type
- for n := p.int(); n > 0; n-- {
- p.pos()
- embeddeds = append(embeddeds, p.typ(parent, nil))
- }
-
- t := newInterface(p.methodList(parent, tname), embeddeds)
- p.interfaceList = append(p.interfaceList, t)
- if p.trackAllTypes {
- p.typList[n] = t
- }
- return t
-
- case mapTag:
- t := new(types.Map)
- if p.trackAllTypes {
- p.record(t)
- }
-
- key := p.typ(parent, nil)
- val := p.typ(parent, nil)
- *t = *types.NewMap(key, val)
- return t
-
- case chanTag:
- t := new(types.Chan)
- if p.trackAllTypes {
- p.record(t)
- }
-
- dir := chanDir(p.int())
- val := p.typ(parent, nil)
- *t = *types.NewChan(dir, val)
- return t
-
- default:
- errorf("unexpected type tag %d", i) // panics
- panic("unreachable")
- }
-}
-
func chanDir(d int) types.ChanDir {
// tag values must match the constants in cmd/compile/internal/gc/go.go
switch d {
@@ -603,394 +88,6 @@ func chanDir(d int) types.ChanDir {
}
}
-func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) {
- if n := p.int(); n > 0 {
- fields = make([]*types.Var, n)
- tags = make([]string, n)
- for i := range fields {
- fields[i], tags[i] = p.field(parent)
- }
- }
- return
-}
-
-func (p *importer) field(parent *types.Package) (*types.Var, string) {
- pos := p.pos()
- pkg, name, alias := p.fieldName(parent)
- typ := p.typ(parent, nil)
- tag := p.string()
-
- anonymous := false
- if name == "" {
- // anonymous field - typ must be T or *T and T must be a type name
- switch typ := deref(typ).(type) {
- case *types.Basic: // basic types are named types
- pkg = nil // // objects defined in Universe scope have no package
- name = typ.Name()
- case *types.Named:
- name = typ.Obj().Name()
- default:
- errorf("named base type expected")
- }
- anonymous = true
- } else if alias {
- // anonymous field: we have an explicit name because it's an alias
- anonymous = true
- }
-
- return types.NewField(pos, pkg, name, typ, anonymous), tag
-}
-
-func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) {
- if n := p.int(); n > 0 {
- methods = make([]*types.Func, n)
- for i := range methods {
- methods[i] = p.method(parent, baseType)
- }
- }
- return
-}
-
-func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func {
- pos := p.pos()
- pkg, name, _ := p.fieldName(parent)
- // If we don't have a baseType, use a nil receiver.
- // A receiver using the actual interface type (which
- // we don't know yet) will be filled in when we call
- // types.Interface.Complete.
- var recv *types.Var
- if baseType != nil {
- recv = types.NewVar(token.NoPos, parent, "", baseType)
- }
- params, isddd := p.paramList()
- result, _ := p.paramList()
- sig := types.NewSignature(recv, params, result, isddd)
- return types.NewFunc(pos, pkg, name, sig)
-}
-
-func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) {
- name = p.string()
- pkg = parent
- if pkg == nil {
- // use the imported package instead
- pkg = p.pkgList[0]
- }
- if p.version == 0 && name == "_" {
- // version 0 didn't export a package for _ fields
- return
- }
- switch name {
- case "":
- // 1) field name matches base type name and is exported: nothing to do
- case "?":
- // 2) field name matches base type name and is not exported: need package
- name = ""
- pkg = p.pkg()
- case "@":
- // 3) field name doesn't match type name (alias)
- name = p.string()
- alias = true
- fallthrough
- default:
- if !exported(name) {
- pkg = p.pkg()
- }
- }
- return
-}
-
-func (p *importer) paramList() (*types.Tuple, bool) {
- n := p.int()
- if n == 0 {
- return nil, false
- }
- // negative length indicates unnamed parameters
- named := true
- if n < 0 {
- n = -n
- named = false
- }
- // n > 0
- params := make([]*types.Var, n)
- isddd := false
- for i := range params {
- params[i], isddd = p.param(named)
- }
- return types.NewTuple(params...), isddd
-}
-
-func (p *importer) param(named bool) (*types.Var, bool) {
- t := p.typ(nil, nil)
- td, isddd := t.(*dddSlice)
- if isddd {
- t = types.NewSlice(td.elem)
- }
-
- var pkg *types.Package
- var name string
- if named {
- name = p.string()
- if name == "" {
- errorf("expected named parameter")
- }
- if name != "_" {
- pkg = p.pkg()
- }
- if i := strings.Index(name, "·"); i > 0 {
- name = name[:i] // cut off gc-specific parameter numbering
- }
- }
-
- // read and discard compiler-specific info
- p.string()
-
- return types.NewVar(token.NoPos, pkg, name, t), isddd
-}
-
-func exported(name string) bool {
- ch, _ := utf8.DecodeRuneInString(name)
- return unicode.IsUpper(ch)
-}
-
-func (p *importer) value() constant.Value {
- switch tag := p.tagOrIndex(); tag {
- case falseTag:
- return constant.MakeBool(false)
- case trueTag:
- return constant.MakeBool(true)
- case int64Tag:
- return constant.MakeInt64(p.int64())
- case floatTag:
- return p.float()
- case complexTag:
- re := p.float()
- im := p.float()
- return constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
- case stringTag:
- return constant.MakeString(p.string())
- case unknownTag:
- return constant.MakeUnknown()
- default:
- errorf("unexpected value tag %d", tag) // panics
- panic("unreachable")
- }
-}
-
-func (p *importer) float() constant.Value {
- sign := p.int()
- if sign == 0 {
- return constant.MakeInt64(0)
- }
-
- exp := p.int()
- mant := []byte(p.string()) // big endian
-
- // remove leading 0's if any
- for len(mant) > 0 && mant[0] == 0 {
- mant = mant[1:]
- }
-
- // convert to little endian
- // TODO(gri) go/constant should have a more direct conversion function
- // (e.g., once it supports a big.Float based implementation)
- for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 {
- mant[i], mant[j] = mant[j], mant[i]
- }
-
- // adjust exponent (constant.MakeFromBytes creates an integer value,
- // but mant represents the mantissa bits such that 0.5 <= mant < 1.0)
- exp -= len(mant) << 3
- if len(mant) > 0 {
- for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 {
- exp++
- }
- }
-
- x := constant.MakeFromBytes(mant)
- switch {
- case exp < 0:
- d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
- x = constant.BinaryOp(x, token.QUO, d)
- case exp > 0:
- x = constant.Shift(x, token.SHL, uint(exp))
- }
-
- if sign < 0 {
- x = constant.UnaryOp(token.SUB, x, 0)
- }
- return x
-}
-
-// ----------------------------------------------------------------------------
-// Low-level decoders
-
-func (p *importer) tagOrIndex() int {
- if p.debugFormat {
- p.marker('t')
- }
-
- return int(p.rawInt64())
-}
-
-func (p *importer) int() int {
- x := p.int64()
- if int64(int(x)) != x {
- errorf("exported integer too large")
- }
- return int(x)
-}
-
-func (p *importer) int64() int64 {
- if p.debugFormat {
- p.marker('i')
- }
-
- return p.rawInt64()
-}
-
-func (p *importer) path() string {
- if p.debugFormat {
- p.marker('p')
- }
- // if the path was seen before, i is its index (>= 0)
- // (the empty string is at index 0)
- i := p.rawInt64()
- if i >= 0 {
- return p.pathList[i]
- }
- // otherwise, i is the negative path length (< 0)
- a := make([]string, -i)
- for n := range a {
- a[n] = p.string()
- }
- s := strings.Join(a, "/")
- p.pathList = append(p.pathList, s)
- return s
-}
-
-func (p *importer) string() string {
- if p.debugFormat {
- p.marker('s')
- }
- // if the string was seen before, i is its index (>= 0)
- // (the empty string is at index 0)
- i := p.rawInt64()
- if i >= 0 {
- return p.strList[i]
- }
- // otherwise, i is the negative string length (< 0)
- if n := int(-i); n <= cap(p.buf) {
- p.buf = p.buf[:n]
- } else {
- p.buf = make([]byte, n)
- }
- for i := range p.buf {
- p.buf[i] = p.rawByte()
- }
- s := string(p.buf)
- p.strList = append(p.strList, s)
- return s
-}
-
-func (p *importer) marker(want byte) {
- if got := p.rawByte(); got != want {
- errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
- }
-
- pos := p.read
- if n := int(p.rawInt64()); n != pos {
- errorf("incorrect position: got %d; want %d", n, pos)
- }
-}
-
-// rawInt64 should only be used by low-level decoders.
-func (p *importer) rawInt64() int64 {
- i, err := binary.ReadVarint(p)
- if err != nil {
- errorf("read error: %v", err)
- }
- return i
-}
-
-// rawStringln should only be used to read the initial version string.
-func (p *importer) rawStringln(b byte) string {
- p.buf = p.buf[:0]
- for b != '\n' {
- p.buf = append(p.buf, b)
- b = p.rawByte()
- }
- return string(p.buf)
-}
-
-// needed for binary.ReadVarint in rawInt64
-func (p *importer) ReadByte() (byte, error) {
- return p.rawByte(), nil
-}
-
-// byte is the bottleneck interface for reading p.data.
-// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
-// rawByte should only be used by low-level decoders.
-func (p *importer) rawByte() byte {
- b := p.data[0]
- r := 1
- if b == '|' {
- b = p.data[1]
- r = 2
- switch b {
- case 'S':
- b = '$'
- case '|':
- // nothing to do
- default:
- errorf("unexpected escape sequence in export data")
- }
- }
- p.data = p.data[r:]
- p.read += r
- return b
-
-}
-
-// ----------------------------------------------------------------------------
-// Export format
-
-// Tags. Must be < 0.
-const (
- // Objects
- packageTag = -(iota + 1)
- constTag
- typeTag
- varTag
- funcTag
- endTag
-
- // Types
- namedTag
- arrayTag
- sliceTag
- dddTag
- structTag
- pointerTag
- signatureTag
- interfaceTag
- mapTag
- chanTag
-
- // Values
- falseTag
- trueTag
- int64Tag
- floatTag
- fractionTag // not used by gc
- complexTag
- stringTag
- nilTag // only used by gc (appears in exported inlined function bodies)
- unknownTag // not used by gc (only appears in packages with errors)
-
- // Type aliases
- aliasTag
-)
-
var predeclOnce sync.Once
var predecl []types.Type // initialized lazily
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
index a973dece9..b1223713b 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
@@ -230,20 +230,17 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
// Or, define a new standard go/types/gcexportdata package.
fset := token.NewFileSet()
- // The indexed export format starts with an 'i'; the older
- // binary export format starts with a 'c', 'd', or 'v'
- // (from "version"). Select appropriate importer.
+ // Select appropriate importer.
if len(data) > 0 {
switch data[0] {
- case 'i':
- _, pkg, err := IImportData(fset, packages, data[1:], id)
- return pkg, err
+ case 'v', 'c', 'd': // binary, till go1.10
+ return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
- case 'v', 'c', 'd':
- _, pkg, err := BImportData(fset, packages, data, id)
+ case 'i': // indexed, till go1.19
+ _, pkg, err := IImportData(fset, packages, data[1:], id)
return pkg, err
- case 'u':
+ case 'u': // unified, from go1.20
_, pkg, err := UImportData(fset, packages, data[1:size], id)
return pkg, err
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
index a0dc0b5e2..9930d8c36 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -969,6 +969,16 @@ func constantToFloat(x constant.Value) *big.Float {
return &f
}
+func valueToRat(x constant.Value) *big.Rat {
+ // Convert little-endian to big-endian.
+ // I can't believe this is necessary.
+ bytes := constant.Bytes(x)
+ for i := 0; i < len(bytes)/2; i++ {
+ bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
+ }
+ return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
+}
+
// mpint exports a multi-precision integer.
//
// For unsigned types, small values are written out as a single
@@ -1178,3 +1188,12 @@ func (q *objQueue) popHead() types.Object {
q.head++
return obj
}
+
+// internalError represents an error generated inside this package.
+type internalError string
+
+func (e internalError) Error() string { return "gcimporter: " + string(e) }
+
+func internalErrorf(format string, args ...interface{}) error {
+ return internalError(fmt.Sprintf(format, args...))
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
index be6dace15..94a5eba33 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -131,7 +131,7 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte,
} else if version > currentVersion {
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
} else {
- err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
+ err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e)
}
}
}()
@@ -140,11 +140,8 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte,
r := &intReader{bytes.NewReader(data), path}
if bundle {
- bundleVersion := r.uint64()
- switch bundleVersion {
- case bundleVersion:
- default:
- errorf("unknown bundle format version %d", bundleVersion)
+ if v := r.uint64(); v != bundleVersion {
+ errorf("unknown bundle format version %d", v)
}
}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
index 34fc783f8..b977435f6 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -10,6 +10,7 @@
package gcimporter
import (
+ "fmt"
"go/token"
"go/types"
"sort"
@@ -63,6 +64,14 @@ type typeInfo struct {
}
func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+ if !debug {
+ defer func() {
+ if x := recover(); x != nil {
+ err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x)
+ }
+ }()
+ }
+
s := string(data)
s = s[:strings.LastIndex(s, "\n$$\n")]
input := pkgbits.NewPkgDecoder(path, s)
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
index 3c0afe723..8d9fc98d8 100644
--- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
@@ -24,6 +24,9 @@ import (
exec "golang.org/x/sys/execabs"
"golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/keys"
+ "golang.org/x/tools/internal/event/label"
+ "golang.org/x/tools/internal/event/tag"
)
// An Runner will run go command invocations and serialize
@@ -53,9 +56,19 @@ func (runner *Runner) initialize() {
// 1.14: go: updating go.mod: existing contents have changed since last read
var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`)
+// verb is an event label for the go command verb.
+var verb = keys.NewString("verb", "go command verb")
+
+func invLabels(inv Invocation) []label.Label {
+ return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)}
+}
+
// Run is a convenience wrapper around RunRaw.
// It returns only stdout and a "friendly" error.
func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) {
+ ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...)
+ defer done()
+
stdout, _, friendly, _ := runner.RunRaw(ctx, inv)
return stdout, friendly
}
@@ -63,6 +76,9 @@ func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, e
// RunPiped runs the invocation serially, always waiting for any concurrent
// invocations to complete first.
func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error {
+ ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...)
+ defer done()
+
_, err := runner.runPiped(ctx, inv, stdout, stderr)
return err
}
@@ -70,6 +86,8 @@ func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stde
// RunRaw runs the invocation, serializing requests only if they fight over
// go.mod changes.
func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
+ ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...)
+ defer done()
// Make sure the runner is always initialized.
runner.initialize()
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
index 3c53fbc63..ce7d4351b 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -11,8 +11,6 @@ import (
"go/types"
"reflect"
"unsafe"
-
- "golang.org/x/tools/go/types/objectpath"
)
func SetUsesCgo(conf *types.Config) bool {
@@ -52,10 +50,3 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos,
}
var SetGoVersion = func(conf *types.Config, version string) bool { return false }
-
-// NewObjectpathEncoder returns a function closure equivalent to
-// objectpath.For but amortized for multiple (sequential) calls.
-// It is a temporary workaround, pending the approval of proposal 58668.
-//
-//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor
-func NewObjectpathFunc() func(types.Object) (objectpath.Path, error)
diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
deleted file mode 100644
index e8789cb33..000000000
--- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
+++ /dev/null
@@ -1,588 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/field_mask.proto
-
-// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto.
-//
-// The FieldMask message represents a set of symbolic field paths.
-// The paths are specific to some target message type,
-// which is not stored within the FieldMask message itself.
-//
-// # Constructing a FieldMask
-//
-// The New function is used construct a FieldMask:
-//
-// var messageType *descriptorpb.DescriptorProto
-// fm, err := fieldmaskpb.New(messageType, "field.name", "field.number")
-// if err != nil {
-// ... // handle error
-// }
-// ... // make use of fm
-//
-// The "field.name" and "field.number" paths are valid paths according to the
-// google.protobuf.DescriptorProto message. Use of a path that does not correlate
-// to valid fields reachable from DescriptorProto would result in an error.
-//
-// Once a FieldMask message has been constructed,
-// the Append method can be used to insert additional paths to the path set:
-//
-// var messageType *descriptorpb.DescriptorProto
-// if err := fm.Append(messageType, "options"); err != nil {
-// ... // handle error
-// }
-//
-// # Type checking a FieldMask
-//
-// In order to verify that a FieldMask represents a set of fields that are
-// reachable from some target message type, use the IsValid method:
-//
-// var messageType *descriptorpb.DescriptorProto
-// if fm.IsValid(messageType) {
-// ... // make use of fm
-// }
-//
-// IsValid needs to be passed the target message type as an input since the
-// FieldMask message itself does not store the message type that the set of paths
-// are for.
-package fieldmaskpb
-
-import (
- proto "google.golang.org/protobuf/proto"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sort "sort"
- strings "strings"
- sync "sync"
-)
-
-// `FieldMask` represents a set of symbolic field paths, for example:
-//
-// paths: "f.a"
-// paths: "f.b.d"
-//
-// Here `f` represents a field in some root message, `a` and `b`
-// fields in the message found in `f`, and `d` a field found in the
-// message in `f.b`.
-//
-// Field masks are used to specify a subset of fields that should be
-// returned by a get operation or modified by an update operation.
-// Field masks also have a custom JSON encoding (see below).
-//
-// # Field Masks in Projections
-//
-// When used in the context of a projection, a response message or
-// sub-message is filtered by the API to only contain those fields as
-// specified in the mask. For example, if the mask in the previous
-// example is applied to a response message as follows:
-//
-// f {
-// a : 22
-// b {
-// d : 1
-// x : 2
-// }
-// y : 13
-// }
-// z: 8
-//
-// The result will not contain specific values for fields x,y and z
-// (their value will be set to the default, and omitted in proto text
-// output):
-//
-// f {
-// a : 22
-// b {
-// d : 1
-// }
-// }
-//
-// A repeated field is not allowed except at the last position of a
-// paths string.
-//
-// If a FieldMask object is not present in a get operation, the
-// operation applies to all fields (as if a FieldMask of all fields
-// had been specified).
-//
-// Note that a field mask does not necessarily apply to the
-// top-level response message. In case of a REST get operation, the
-// field mask applies directly to the response, but in case of a REST
-// list operation, the mask instead applies to each individual message
-// in the returned resource list. In case of a REST custom method,
-// other definitions may be used. Where the mask applies will be
-// clearly documented together with its declaration in the API. In
-// any case, the effect on the returned resource/resources is required
-// behavior for APIs.
-//
-// # Field Masks in Update Operations
-//
-// A field mask in update operations specifies which fields of the
-// targeted resource are going to be updated. The API is required
-// to only change the values of the fields as specified in the mask
-// and leave the others untouched. If a resource is passed in to
-// describe the updated values, the API ignores the values of all
-// fields not covered by the mask.
-//
-// If a repeated field is specified for an update operation, new values will
-// be appended to the existing repeated field in the target resource. Note that
-// a repeated field is only allowed in the last position of a `paths` string.
-//
-// If a sub-message is specified in the last position of the field mask for an
-// update operation, then new value will be merged into the existing sub-message
-// in the target resource.
-//
-// For example, given the target message:
-//
-// f {
-// b {
-// d: 1
-// x: 2
-// }
-// c: [1]
-// }
-//
-// And an update message:
-//
-// f {
-// b {
-// d: 10
-// }
-// c: [2]
-// }
-//
-// then if the field mask is:
-//
-// paths: ["f.b", "f.c"]
-//
-// then the result will be:
-//
-// f {
-// b {
-// d: 10
-// x: 2
-// }
-// c: [1, 2]
-// }
-//
-// An implementation may provide options to override this default behavior for
-// repeated and message fields.
-//
-// In order to reset a field's value to the default, the field must
-// be in the mask and set to the default value in the provided resource.
-// Hence, in order to reset all fields of a resource, provide a default
-// instance of the resource and set all fields in the mask, or do
-// not provide a mask as described below.
-//
-// If a field mask is not present on update, the operation applies to
-// all fields (as if a field mask of all fields has been specified).
-// Note that in the presence of schema evolution, this may mean that
-// fields the client does not know and has therefore not filled into
-// the request will be reset to their default. If this is unwanted
-// behavior, a specific service may require a client to always specify
-// a field mask, producing an error if not.
-//
-// As with get operations, the location of the resource which
-// describes the updated values in the request message depends on the
-// operation kind. In any case, the effect of the field mask is
-// required to be honored by the API.
-//
-// ## Considerations for HTTP REST
-//
-// The HTTP kind of an update operation which uses a field mask must
-// be set to PATCH instead of PUT in order to satisfy HTTP semantics
-// (PUT must only be used for full updates).
-//
-// # JSON Encoding of Field Masks
-//
-// In JSON, a field mask is encoded as a single string where paths are
-// separated by a comma. Fields name in each path are converted
-// to/from lower-camel naming conventions.
-//
-// As an example, consider the following message declarations:
-//
-// message Profile {
-// User user = 1;
-// Photo photo = 2;
-// }
-// message User {
-// string display_name = 1;
-// string address = 2;
-// }
-//
-// In proto a field mask for `Profile` may look as such:
-//
-// mask {
-// paths: "user.display_name"
-// paths: "photo"
-// }
-//
-// In JSON, the same mask is represented as below:
-//
-// {
-// mask: "user.displayName,photo"
-// }
-//
-// # Field Masks and Oneof Fields
-//
-// Field masks treat fields in oneofs just as regular fields. Consider the
-// following message:
-//
-// message SampleMessage {
-// oneof test_oneof {
-// string name = 4;
-// SubMessage sub_message = 9;
-// }
-// }
-//
-// The field mask can be:
-//
-// mask {
-// paths: "name"
-// }
-//
-// Or:
-//
-// mask {
-// paths: "sub_message"
-// }
-//
-// Note that oneof type names ("test_oneof" in this case) cannot be used in
-// paths.
-//
-// ## Field Mask Verification
-//
-// The implementation of any API method which has a FieldMask type field in the
-// request should verify the included field paths, and return an
-// `INVALID_ARGUMENT` error if any path is unmappable.
-type FieldMask struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The set of field mask paths.
- Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
-}
-
-// New constructs a field mask from a list of paths and verifies that
-// each one is valid according to the specified message type.
-func New(m proto.Message, paths ...string) (*FieldMask, error) {
- x := new(FieldMask)
- return x, x.Append(m, paths...)
-}
-
-// Union returns the union of all the paths in the input field masks.
-func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {
- var out []string
- out = append(out, mx.GetPaths()...)
- out = append(out, my.GetPaths()...)
- for _, m := range ms {
- out = append(out, m.GetPaths()...)
- }
- return &FieldMask{Paths: normalizePaths(out)}
-}
-
-// Intersect returns the intersection of all the paths in the input field masks.
-func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {
- var ss1, ss2 []string // reused buffers for performance
- intersect := func(out, in []string) []string {
- ss1 = normalizePaths(append(ss1[:0], in...))
- ss2 = normalizePaths(append(ss2[:0], out...))
- out = out[:0]
- for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); {
- switch s1, s2 := ss1[i1], ss2[i2]; {
- case hasPathPrefix(s1, s2):
- out = append(out, s1)
- i1++
- case hasPathPrefix(s2, s1):
- out = append(out, s2)
- i2++
- case lessPath(s1, s2):
- i1++
- case lessPath(s2, s1):
- i2++
- }
- }
- return out
- }
-
- out := Union(mx, my, ms...).GetPaths()
- out = intersect(out, mx.GetPaths())
- out = intersect(out, my.GetPaths())
- for _, m := range ms {
- out = intersect(out, m.GetPaths())
- }
- return &FieldMask{Paths: normalizePaths(out)}
-}
-
-// IsValid reports whether all the paths are syntactically valid and
-// refer to known fields in the specified message type.
-// It reports false for a nil FieldMask.
-func (x *FieldMask) IsValid(m proto.Message) bool {
- paths := x.GetPaths()
- return x != nil && numValidPaths(m, paths) == len(paths)
-}
-
-// Append appends a list of paths to the mask and verifies that each one
-// is valid according to the specified message type.
-// An invalid path is not appended and breaks insertion of subsequent paths.
-func (x *FieldMask) Append(m proto.Message, paths ...string) error {
- numValid := numValidPaths(m, paths)
- x.Paths = append(x.Paths, paths[:numValid]...)
- paths = paths[numValid:]
- if len(paths) > 0 {
- name := m.ProtoReflect().Descriptor().FullName()
- return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name)
- }
- return nil
-}
-
-func numValidPaths(m proto.Message, paths []string) int {
- md0 := m.ProtoReflect().Descriptor()
- for i, path := range paths {
- md := md0
- if !rangeFields(path, func(field string) bool {
- // Search the field within the message.
- if md == nil {
- return false // not within a message
- }
- fd := md.Fields().ByName(protoreflect.Name(field))
- // The real field name of a group is the message name.
- if fd == nil {
- gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field)))
- if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field {
- fd = gd
- }
- } else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field {
- fd = nil
- }
- if fd == nil {
- return false // message has does not have this field
- }
-
- // Identify the next message to search within.
- md = fd.Message() // may be nil
-
- // Repeated fields are only allowed at the last position.
- if fd.IsList() || fd.IsMap() {
- md = nil
- }
-
- return true
- }) {
- return i
- }
- }
- return len(paths)
-}
-
-// Normalize converts the mask to its canonical form where all paths are sorted
-// and redundant paths are removed.
-func (x *FieldMask) Normalize() {
- x.Paths = normalizePaths(x.Paths)
-}
-
-func normalizePaths(paths []string) []string {
- sort.Slice(paths, func(i, j int) bool {
- return lessPath(paths[i], paths[j])
- })
-
- // Elide any path that is a prefix match on the previous.
- out := paths[:0]
- for _, path := range paths {
- if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) {
- continue
- }
- out = append(out, path)
- }
- return out
-}
-
-// hasPathPrefix is like strings.HasPrefix, but further checks for either
-// an exact matche or that the prefix is delimited by a dot.
-func hasPathPrefix(path, prefix string) bool {
- return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.')
-}
-
-// lessPath is a lexicographical comparison where dot is specially treated
-// as the smallest symbol.
-func lessPath(x, y string) bool {
- for i := 0; i < len(x) && i < len(y); i++ {
- if x[i] != y[i] {
- return (x[i] - '.') < (y[i] - '.')
- }
- }
- return len(x) < len(y)
-}
-
-// rangeFields is like strings.Split(path, "."), but avoids allocations by
-// iterating over each field in place and calling a iterator function.
-func rangeFields(path string, f func(field string) bool) bool {
- for {
- var field string
- if i := strings.IndexByte(path, '.'); i >= 0 {
- field, path = path[:i], path[i:]
- } else {
- field, path = path, ""
- }
-
- if !f(field) {
- return false
- }
-
- if len(path) == 0 {
- return true
- }
- path = strings.TrimPrefix(path, ".")
- }
-}
-
-func (x *FieldMask) Reset() {
- *x = FieldMask{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FieldMask) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FieldMask) ProtoMessage() {}
-
-func (x *FieldMask) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead.
-func (*FieldMask) Descriptor() ([]byte, []int) {
- return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *FieldMask) GetPaths() []string {
- if x != nil {
- return x.Paths
- }
- return nil
-}
-
-var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor
-
-var file_google_protobuf_field_mask_proto_rawDesc = []byte{
- 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b,
- 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e,
- 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
- 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70,
- 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61,
- 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
- 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_protobuf_field_mask_proto_rawDescOnce sync.Once
- file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc
-)
-
-func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte {
- file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() {
- file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData)
- })
- return file_google_protobuf_field_mask_proto_rawDescData
-}
-
-var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_field_mask_proto_goTypes = []interface{}{
- (*FieldMask)(nil), // 0: google.protobuf.FieldMask
-}
-var file_google_protobuf_field_mask_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_google_protobuf_field_mask_proto_init() }
-func file_google_protobuf_field_mask_proto_init() {
- if File_google_protobuf_field_mask_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FieldMask); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 1,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_protobuf_field_mask_proto_goTypes,
- DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs,
- MessageInfos: file_google_protobuf_field_mask_proto_msgTypes,
- }.Build()
- File_google_protobuf_field_mask_proto = out.File
- file_google_protobuf_field_mask_proto_rawDesc = nil
- file_google_protobuf_field_mask_proto_goTypes = nil
- file_google_protobuf_field_mask_proto_depIdxs = nil
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 1c4976e05..663fa1e09 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,4 +1,4 @@
-# github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1
+# github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161
## explicit; go 1.16
github.com/Azure/go-ansiterm
github.com/Azure/go-ansiterm/winterm
@@ -16,7 +16,7 @@ github.com/Microsoft/go-winio/internal/stringbuffer
github.com/Microsoft/go-winio/pkg/guid
github.com/Microsoft/go-winio/tools/mkwinsyscall
github.com/Microsoft/go-winio/vhd
-# github.com/Microsoft/hcsshim v0.10.0-rc.7
+# github.com/Microsoft/hcsshim v0.10.0-rc.8
## explicit; go 1.18
github.com/Microsoft/hcsshim
github.com/Microsoft/hcsshim/computestorage
@@ -77,7 +77,7 @@ github.com/container-orchestrated-devices/container-device-interface/specs-go
# github.com/containerd/cgroups v1.1.0
## explicit; go 1.17
github.com/containerd/cgroups/stats/v1
-# github.com/containerd/containerd v1.7.0
+# github.com/containerd/containerd v1.7.2
## explicit; go 1.19
github.com/containerd/containerd/errdefs
github.com/containerd/containerd/log
@@ -125,7 +125,7 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/util
-# github.com/containers/common v0.53.1-0.20230506101404-3e93a76d461c
+# github.com/containers/common v0.53.1-0.20230608150349-2c1849f43e14
## explicit; go 1.18
github.com/containers/common/libimage
github.com/containers/common/libimage/define
@@ -179,7 +179,7 @@ github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
## explicit
github.com/containers/conmon/runner/config
-# github.com/containers/image/v5 v5.25.1-0.20230505072505-dc4a4be9cc1e
+# github.com/containers/image/v5 v5.25.1-0.20230608153337-8ad019310ff2
## explicit; go 1.18
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@@ -286,8 +286,8 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.46.1
-## explicit; go 1.18
+# github.com/containers/storage v1.46.2-0.20230530174214-1dc289a244ce
+## explicit; go 1.19
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -334,8 +334,8 @@ github.com/containers/storage/pkg/tarlog
github.com/containers/storage/pkg/truncindex
github.com/containers/storage/pkg/unshare
github.com/containers/storage/types
-# github.com/coreos/go-oidc/v3 v3.5.0
-## explicit; go 1.14
+# github.com/coreos/go-oidc/v3 v3.6.0
+## explicit; go 1.19
github.com/coreos/go-oidc/v3/oidc
# github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
## explicit
@@ -359,7 +359,7 @@ github.com/coreos/stream-metadata-go/stream/rhcos
# github.com/crc-org/vfkit v0.0.5-0.20230602131541-3d57f09010c9
## explicit; go 1.17
github.com/crc-org/vfkit/pkg/rest/define
-# github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7
+# github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1
## explicit
github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer
# github.com/cyphar/filepath-securejoin v0.2.3
@@ -389,7 +389,7 @@ github.com/docker/distribution/reference
github.com/docker/distribution/registry/api/errcode
github.com/docker/distribution/registry/api/v2
github.com/docker/distribution/registry/client/auth/challenge
-# github.com/docker/docker v24.0.0+incompatible
+# github.com/docker/docker v24.0.2+incompatible
## explicit
github.com/docker/docker/api
github.com/docker/docker/api/types
@@ -536,7 +536,7 @@ github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value
-# github.com/google/go-containerregistry v0.14.0
+# github.com/google/go-containerregistry v0.15.2
## explicit; go 1.18
github.com/google/go-containerregistry/pkg/name
# github.com/google/go-intervals v0.0.2
@@ -552,11 +552,6 @@ github.com/google/pprof/profile
# github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
## explicit; go 1.13
github.com/google/shlex
-# github.com/google/trillian v1.5.1
-## explicit; go 1.17
-github.com/google/trillian
-github.com/google/trillian/types
-github.com/google/trillian/types/internal/tls
# github.com/google/uuid v1.3.0
## explicit
github.com/google/uuid
@@ -578,10 +573,10 @@ github.com/hashicorp/go-cleanhttp
# github.com/hashicorp/go-multierror v1.1.1
## explicit; go 1.13
github.com/hashicorp/go-multierror
-# github.com/hashicorp/go-retryablehttp v0.7.2
+# github.com/hashicorp/go-retryablehttp v0.7.4
## explicit; go 1.13
github.com/hashicorp/go-retryablehttp
-# github.com/imdario/mergo v0.3.15
+# github.com/imdario/mergo v0.3.16
## explicit; go 1.13
github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.1.0
@@ -642,7 +637,7 @@ github.com/mattn/go-sqlite3
# github.com/miekg/pkcs11 v1.1.1
## explicit; go 1.12
github.com/miekg/pkcs11
-# github.com/mistifyio/go-zfs/v3 v3.0.0
+# github.com/mistifyio/go-zfs/v3 v3.0.1
## explicit; go 1.14
github.com/mistifyio/go-zfs/v3
# github.com/mitchellh/mapstructure v1.5.0
@@ -680,7 +675,7 @@ github.com/nxadm/tail/winfile
# github.com/oklog/ulid v1.3.1
## explicit
github.com/oklog/ulid
-# github.com/onsi/ginkgo/v2 v2.9.7
+# github.com/onsi/ginkgo/v2 v2.10.0
## explicit; go 1.18
github.com/onsi/ginkgo/v2
github.com/onsi/ginkgo/v2/config
@@ -807,7 +802,7 @@ github.com/segmentio/ksuid
## explicit; go 1.20
github.com/sigstore/fulcio/pkg/api
github.com/sigstore/fulcio/pkg/certificate
-# github.com/sigstore/rekor v1.1.2-0.20230508234306-ad288b385a44
+# github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12
## explicit; go 1.19
github.com/sigstore/rekor/pkg/client
github.com/sigstore/rekor/pkg/generated/client
@@ -817,7 +812,7 @@ github.com/sigstore/rekor/pkg/generated/client/pubkey
github.com/sigstore/rekor/pkg/generated/client/tlog
github.com/sigstore/rekor/pkg/generated/models
github.com/sigstore/rekor/pkg/util
-# github.com/sigstore/sigstore v1.6.4
+# github.com/sigstore/sigstore v1.6.5
## explicit; go 1.18
github.com/sigstore/sigstore/pkg/cryptoutils
github.com/sigstore/sigstore/pkg/oauth
@@ -845,7 +840,7 @@ github.com/stefanberger/go-pkcs11uri
## explicit; go 1.20
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
-# github.com/sylabs/sif/v2 v2.11.3
+# github.com/sylabs/sif/v2 v2.11.4
## explicit; go 1.19
github.com/sylabs/sif/v2/pkg/sif
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
@@ -922,7 +917,7 @@ go.opentelemetry.io/otel/semconv/v1.12.0
# go.opentelemetry.io/otel/trace v1.15.0
## explicit; go 1.19
go.opentelemetry.io/otel/trace
-# golang.org/x/crypto v0.8.0
+# golang.org/x/crypto v0.9.0
## explicit; go 1.17
golang.org/x/crypto/blowfish
golang.org/x/crypto/cast5
@@ -948,7 +943,7 @@ golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/knownhosts
-# golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53
+# golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
## explicit; go 1.20
golang.org/x/exp/constraints
golang.org/x/exp/maps
@@ -971,7 +966,7 @@ golang.org/x/net/internal/socks
golang.org/x/net/internal/timeseries
golang.org/x/net/proxy
golang.org/x/net/trace
-# golang.org/x/oauth2 v0.7.0
+# golang.org/x/oauth2 v0.8.0
## explicit; go 1.17
golang.org/x/oauth2
golang.org/x/oauth2/internal
@@ -1014,18 +1009,18 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/tools v0.9.1
+# golang.org/x/tools v0.9.3
## explicit; go 1.18
golang.org/x/tools/cmd/stringer
golang.org/x/tools/go/ast/inspector
golang.org/x/tools/go/gcexportdata
golang.org/x/tools/go/internal/packagesdriver
golang.org/x/tools/go/packages
-golang.org/x/tools/go/types/objectpath
golang.org/x/tools/internal/event
golang.org/x/tools/internal/event/core
golang.org/x/tools/internal/event/keys
golang.org/x/tools/internal/event/label
+golang.org/x/tools/internal/event/tag
golang.org/x/tools/internal/gcimporter
golang.org/x/tools/internal/gocommand
golang.org/x/tools/internal/packagesinternal
@@ -1128,7 +1123,6 @@ google.golang.org/protobuf/runtime/protoimpl
google.golang.org/protobuf/types/descriptorpb
google.golang.org/protobuf/types/known/anypb
google.golang.org/protobuf/types/known/durationpb
-google.golang.org/protobuf/types/known/fieldmaskpb
google.golang.org/protobuf/types/known/timestamppb
# gopkg.in/go-jose/go-jose.v2 v2.6.1
## explicit