summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml3
-rw-r--r--.gitignore1
-rw-r--r--cmd/quadlet/main.go118
-rw-r--r--cmd/quadlet/main_test.go100
-rwxr-xr-xcontrib/cirrus/runner.sh14
-rwxr-xr-xdocker/docker.in2
-rw-r--r--docs/source/markdown/podman-build.1.md.in7
-rw-r--r--docs/source/markdown/podman-systemd.unit.5.md3
-rw-r--r--go.mod35
-rw-r--r--go.sum122
-rwxr-xr-xhack/podman_cleanup_tracer.bt151
-rw-r--r--libpod/networking_linux.go29
-rw-r--r--pkg/rootless/rootless_linux.go2
-rw-r--r--test/e2e/quadlet_test.go2
-rw-r--r--test/system/035-logs.bats37
-rw-r--r--test/system/250-systemd.bats122
-rw-r--r--test/system/252-quadlet.bats2
-rw-r--r--test/system/505-networking-pasta.bats61
-rw-r--r--test/system/550-pause-process.bats19
-rw-r--r--test/system/610-format.bats12
-rw-r--r--test/system/700-play.bats205
-rw-r--r--troubleshooting.md3
-rw-r--r--vendor/dario.cat/mergo/.gitignore3
-rw-r--r--vendor/dario.cat/mergo/README.md102
-rw-r--r--vendor/dario.cat/mergo/map.go2
-rw-r--r--vendor/dario.cat/mergo/merge.go2
-rw-r--r--vendor/github.com/containers/common/libimage/copier.go34
-rw-r--r--vendor/github.com/containers/common/libimage/import.go4
-rw-r--r--vendor/github.com/containers/common/libimage/manifest_list.go164
-rw-r--r--vendor/github.com/containers/common/libimage/manifests/manifests.go28
-rw-r--r--vendor/github.com/containers/common/libimage/pull.go12
-rw-r--r--vendor/github.com/containers/common/libimage/push.go4
-rw-r--r--vendor/github.com/containers/common/libimage/save.go8
-rw-r--r--vendor/github.com/containers/common/pkg/netns/netns_linux.go154
-rw-r--r--vendor/github.com/containers/image/v5/copy/compression.go120
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go34
-rw-r--r--vendor/github.com/containers/image/v5/copy/progress_bars.go13
-rw-r--r--vendor/github.com/containers/image/v5/copy/sign.go2
-rw-r--r--vendor/github.com/containers/image/v5/copy/single.go36
-rw-r--r--vendor/github.com/containers/image/v5/docker/body_reader.go4
-rw-r--r--vendor/github.com/containers/image/v5/docker/daemon/client.go2
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image.go6
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_dest.go22
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_src.go73
-rw-r--r--vendor/github.com/containers/image/v5/internal/blobinfocache/types.go21
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go3
-rw-r--r--vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go5
-rw-r--r--vendor/github.com/containers/image/v5/internal/manifest/manifest.go5
-rw-r--r--vendor/github.com/containers/image/v5/internal/manifest/oci_index.go5
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go4
-rw-r--r--vendor/github.com/containers/image/v5/internal/private/private.go5
-rw-r--r--vendor/github.com/containers/image/v5/manifest/docker_schema1.go6
-rw-r--r--vendor/github.com/containers/image/v5/manifest/oci.go2
-rw-r--r--vendor/github.com/containers/image/v5/oci/layout/oci_delete.go121
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift-copies.go6
-rw-r--r--vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go72
-rw-r--r--vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go38
-rw-r--r--vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go129
-rw-r--r--vendor/github.com/containers/image/v5/signature/fulcio_cert.go4
-rw-r--r--vendor/github.com/containers/image/v5/signature/internal/rekor_set.go36
-rw-r--r--vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go71
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go133
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go38
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go184
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_reference_match.go2
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_types.go29
-rw-r--r--vendor/github.com/containers/image/v5/signature/simple.go10
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_dest.go8
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_reference.go2
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_src.go6
-rw-r--r--vendor/github.com/containers/image/v5/tarball/tarball_src.go6
-rw-r--r--vendor/github.com/containers/image/v5/tarball/tarball_transport.go4
-rw-r--r--vendor/github.com/containers/storage/.cirrus.yml2
-rw-r--r--vendor/github.com/containers/storage/Makefile2
-rw-r--r--vendor/github.com/containers/storage/check.go16
-rw-r--r--vendor/github.com/containers/storage/containers.go41
-rw-r--r--vendor/github.com/containers/storage/drivers/driver.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/mount.go12
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go109
-rw-r--r--vendor/github.com/containers/storage/drivers/windows/windows.go4
-rw-r--r--vendor/github.com/containers/storage/images.go67
-rw-r--r--vendor/github.com/containers/storage/layers.go120
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_linux.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/changes.go8
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/changes_other.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/cache_linux.go27
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_linux.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/directory/directory_unix.go7
-rw-r--r--vendor/github.com/containers/storage/pkg/directory/directory_windows.go7
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/flags.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go10
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/unmount_unix.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/parsers/parsers.go14
-rw-r--r--vendor/github.com/containers/storage/pkg/stringutils/stringutils.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go2
-rw-r--r--vendor/github.com/containers/storage/store.go83
-rw-r--r--vendor/github.com/containers/storage/types/options.go4
-rw-r--r--vendor/github.com/containers/storage/userns.go12
-rw-r--r--vendor/github.com/containers/storage/utils.go15
-rw-r--r--vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go16
-rw-r--r--vendor/github.com/coreos/go-oidc/v3/oidc/verify.go4
-rw-r--r--vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md24
-rw-r--r--vendor/github.com/go-jose/go-jose/v4/crypter.go10
-rw-r--r--vendor/github.com/go-jose/go-jose/v4/jwk.go21
-rw-r--r--vendor/github.com/go-jose/go-jose/v4/opaque.go3
-rw-r--r--vendor/github.com/go-jose/go-jose/v4/signing.go10
-rw-r--r--vendor/github.com/klauspost/compress/README.md22
-rw-r--r--vendor/github.com/klauspost/compress/flate/deflate.go2
-rw-r--r--vendor/github.com/klauspost/compress/flate/inflate.go74
-rw-r--r--vendor/github.com/klauspost/compress/fse/decompress.go2
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockdec.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_better.go32
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_dfast.go16
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder.go19
-rw-r--r--vendor/github.com/klauspost/compress/zstd/framedec.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s8
-rw-r--r--vendor/github.com/letsencrypt/boulder/core/objects.go93
-rw-r--r--vendor/github.com/letsencrypt/boulder/core/util.go4
-rw-r--r--vendor/github.com/letsencrypt/boulder/goodkey/good_key.go129
-rw-r--r--vendor/github.com/moby/sys/capability/.codespellrc3
-rw-r--r--vendor/github.com/moby/sys/capability/.golangci.yml6
-rw-r--r--vendor/github.com/moby/sys/capability/CHANGELOG.md90
-rw-r--r--vendor/github.com/moby/sys/capability/LICENSE25
-rw-r--r--vendor/github.com/moby/sys/capability/README.md13
-rw-r--r--vendor/github.com/moby/sys/capability/capability.go144
-rw-r--r--vendor/github.com/moby/sys/capability/capability_linux.go541
-rw-r--r--vendor/github.com/moby/sys/capability/capability_noop.go26
-rw-r--r--vendor/github.com/moby/sys/capability/enum.go330
-rw-r--r--vendor/github.com/moby/sys/capability/enum_gen.go137
-rw-r--r--vendor/github.com/moby/sys/capability/syscall_linux.go153
-rw-r--r--vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go38
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go70
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go3
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go30
-rw-r--r--vendor/github.com/sylabs/sif/v2/pkg/sif/create.go40
-rw-r--r--vendor/golang.org/x/oauth2/token.go7
-rw-r--r--vendor/golang.org/x/time/LICENSE4
-rw-r--r--vendor/modules.txt51
142 files changed, 4008 insertions, 1641 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 23c0be84a..cbdaf422e 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -401,6 +401,9 @@ bindings_task:
server_log_artifacts:
path: ./podman-server.log
type: text/plain
+ cleanup_tracer_artifacts:
+ path: ./podman-cleanup-tracer.log
+ type: text/plain
df_script: '$SCRIPT_BASE/logcollector.sh df'
audit_log_script: '$SCRIPT_BASE/logcollector.sh audit'
journal_script: '$SCRIPT_BASE/logcollector.sh journal'
diff --git a/.gitignore b/.gitignore
index d1813cc8f..1b596b2b4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -57,3 +57,4 @@ contrib/win-installer/shasums
contrib/win-installer/*.wixobj
contrib/win-installer/*.wixpdb
contrib/win-installer/*.log
+podman-cleanup-tracer.log*
diff --git a/cmd/quadlet/main.go b/cmd/quadlet/main.go
index bd1987559..a62a01a1d 100644
--- a/cmd/quadlet/main.go
+++ b/cmd/quadlet/main.go
@@ -107,9 +107,11 @@ func Debugf(format string, a ...interface{}) {
// For system generators these are in /usr/share/containers/systemd (for distro files)
// and /etc/containers/systemd (for sysadmin files).
// For user generators these can live in $XDG_RUNTIME_DIR/containers/systemd, /etc/containers/systemd/users, /etc/containers/systemd/users/$UID, and $XDG_CONFIG_HOME/containers/systemd
-func getUnitDirs(rootless bool) []string {
+func getUnitDirs(rootless bool) map[string]struct{} {
+ dirs := make(map[string]struct{}, 0)
+
// Allow overriding source dir, this is mainly for the CI tests
- if varExist, dirs := getDirsFromEnv(); varExist {
+ if getDirsFromEnv(dirs) {
return dirs
}
@@ -119,61 +121,57 @@ func getUnitDirs(rootless bool) []string {
if rootless {
systemUserDirLevel := len(strings.Split(resolvedUnitDirAdminUser, string(os.PathSeparator)))
nonNumericFilter := getNonNumericFilter(resolvedUnitDirAdminUser, systemUserDirLevel)
- return getRootlessDirs(nonNumericFilter, userLevelFilter)
+ getRootlessDirs(dirs, nonNumericFilter, userLevelFilter)
+ } else {
+ getRootDirs(dirs, userLevelFilter)
}
-
- return getRootDirs(userLevelFilter)
+ return dirs
}
-func getDirsFromEnv() (bool, []string) {
+func getDirsFromEnv(dirs map[string]struct{}) bool {
unitDirsEnv := os.Getenv("QUADLET_UNIT_DIRS")
if len(unitDirsEnv) == 0 {
- return false, nil
+ return false
}
- dirs := make([]string, 0)
for _, eachUnitDir := range strings.Split(unitDirsEnv, ":") {
if !filepath.IsAbs(eachUnitDir) {
Logf("%s not a valid file path", eachUnitDir)
- return true, nil
+ break
}
- dirs = appendSubPaths(dirs, eachUnitDir, false, nil)
+ appendSubPaths(dirs, eachUnitDir, false, nil)
}
- return true, dirs
+ return true
}
-func getRootlessDirs(nonNumericFilter, userLevelFilter func(string, bool) bool) []string {
- dirs := make([]string, 0)
-
+func getRootlessDirs(dirs map[string]struct{}, nonNumericFilter, userLevelFilter func(string, bool) bool) {
runtimeDir, found := os.LookupEnv("XDG_RUNTIME_DIR")
if found {
- dirs = appendSubPaths(dirs, path.Join(runtimeDir, "containers/systemd"), false, nil)
+ appendSubPaths(dirs, path.Join(runtimeDir, "containers/systemd"), false, nil)
}
configDir, err := os.UserConfigDir()
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: %v", err)
- return nil
+ return
}
- dirs = appendSubPaths(dirs, path.Join(configDir, "containers/systemd"), false, nil)
+ appendSubPaths(dirs, path.Join(configDir, "containers/systemd"), false, nil)
u, err := user.Current()
if err == nil {
- dirs = appendSubPaths(dirs, filepath.Join(quadlet.UnitDirAdmin, "users"), true, nonNumericFilter)
- dirs = appendSubPaths(dirs, filepath.Join(quadlet.UnitDirAdmin, "users", u.Uid), true, userLevelFilter)
+ appendSubPaths(dirs, filepath.Join(quadlet.UnitDirAdmin, "users"), true, nonNumericFilter)
+ appendSubPaths(dirs, filepath.Join(quadlet.UnitDirAdmin, "users", u.Uid), true, userLevelFilter)
} else {
fmt.Fprintf(os.Stderr, "Warning: %v", err)
}
- return append(dirs, filepath.Join(quadlet.UnitDirAdmin, "users"))
+ dirs[filepath.Join(quadlet.UnitDirAdmin, "users")] = struct{}{}
}
-func getRootDirs(userLevelFilter func(string, bool) bool) []string {
- dirs := make([]string, 0)
-
- dirs = appendSubPaths(dirs, quadlet.UnitDirTemp, false, userLevelFilter)
- dirs = appendSubPaths(dirs, quadlet.UnitDirAdmin, false, userLevelFilter)
- return appendSubPaths(dirs, quadlet.UnitDirDistro, false, nil)
+func getRootDirs(dirs map[string]struct{}, userLevelFilter func(string, bool) bool) {
+ appendSubPaths(dirs, quadlet.UnitDirTemp, false, userLevelFilter)
+ appendSubPaths(dirs, quadlet.UnitDirAdmin, false, userLevelFilter)
+ appendSubPaths(dirs, quadlet.UnitDirDistro, false, nil)
}
func resolveUnitDirAdminUser() string {
@@ -189,7 +187,7 @@ func resolveUnitDirAdminUser() string {
return resolvedUnitDirAdminUser
}
-func appendSubPaths(dirs []string, path string, isUserFlag bool, filterPtr func(string, bool) bool) []string {
+func appendSubPaths(dirs map[string]struct{}, path string, isUserFlag bool, filterPtr func(string, bool) bool) {
resolvedPath, err := filepath.EvalSymlinks(path)
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
@@ -197,27 +195,55 @@ func appendSubPaths(dirs []string, path string, isUserFlag bool, filterPtr func(
}
// Despite the failure add the path to the list for logging purposes
// This is the equivalent of adding the path when info==nil below
- dirs = append(dirs, path)
- return dirs
+ dirs[path] = struct{}{}
+ return
}
- err = filepath.WalkDir(resolvedPath, func(_path string, info os.DirEntry, err error) error {
- // Ignore drop-in directory subpaths
- if !strings.HasSuffix(_path, ".d") {
- if info == nil || info.IsDir() {
- if filterPtr == nil || filterPtr(_path, isUserFlag) {
- dirs = append(dirs, _path)
- }
- }
+ // If the resolvedPath is already in the map no need to read it again
+ if _, already := dirs[resolvedPath]; already {
+ return
+ }
+
+ // Don't traverse drop-in directories
+ if strings.HasSuffix(resolvedPath, ".d") {
+ return
+ }
+
+ // Check if the directory should be filtered out
+ if filterPtr != nil && !filterPtr(resolvedPath, isUserFlag) {
+ return
+ }
+
+ stat, err := os.Stat(resolvedPath)
+ if err != nil {
+ if !errors.Is(err, fs.ErrNotExist) {
+ Debugf("Error occurred resolving path %q: %s", path, err)
}
- return err
- })
+ return
+ }
+
+ // Not a directory nothing to add
+ if !stat.IsDir() {
+ return
+ }
+
+ // Add the current directory
+ dirs[resolvedPath] = struct{}{}
+
+ // Read the contents of the directory
+ entries, err := os.ReadDir(resolvedPath)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
Debugf("Error occurred walking sub directories %q: %s", path, err)
}
+ return
+ }
+
+ // Recursively run through the contents of the directory
+ for _, entry := range entries {
+ fullPath := filepath.Join(resolvedPath, entry.Name())
+ appendSubPaths(dirs, fullPath, isUserFlag, filterPtr)
}
- return dirs
}
func getNonNumericFilter(resolvedUnitDirAdminUser string, systemUserDirLevel int) func(string, bool) bool {
@@ -297,7 +323,7 @@ func loadUnitsFromDir(sourcePath string) ([]*parser.UnitFile, error) {
return units, prevError
}
-func loadUnitDropins(unit *parser.UnitFile, sourcePaths []string) error {
+func loadUnitDropins(unit *parser.UnitFile, sourcePaths map[string]struct{}) error {
var prevError error
reportError := func(err error) {
if prevError != nil {
@@ -309,7 +335,7 @@ func loadUnitDropins(unit *parser.UnitFile, sourcePaths []string) error {
dropinDirs := []string{}
unitDropinPaths := unit.GetUnitDropinPaths()
- for _, sourcePath := range sourcePaths {
+ for sourcePath := range sourcePaths {
for _, dropinPath := range unitDropinPaths {
dropinDirs = append(dropinDirs, path.Join(sourcePath, dropinPath))
}
@@ -620,10 +646,10 @@ func process() error {
Debugf("Starting quadlet-generator, output to: %s", outputPath)
}
- sourcePaths := getUnitDirs(isUserFlag)
+ sourcePathsMap := getUnitDirs(isUserFlag)
var units []*parser.UnitFile
- for _, d := range sourcePaths {
+ for d := range sourcePathsMap {
if result, err := loadUnitsFromDir(d); err != nil {
reportError(err)
} else {
@@ -634,12 +660,12 @@ func process() error {
if len(units) == 0 {
// containers/podman/issues/17374: exit cleanly but log that we
// had nothing to do
- Debugf("No files parsed from %s", sourcePaths)
+ Debugf("No files parsed from %s", sourcePathsMap)
return prevError
}
for _, unit := range units {
- if err := loadUnitDropins(unit, sourcePaths); err != nil {
+ if err := loadUnitDropins(unit, sourcePathsMap); err != nil {
reportError(err)
}
}
diff --git a/cmd/quadlet/main_test.go b/cmd/quadlet/main_test.go
index 74fec0a02..e429c507d 100644
--- a/cmd/quadlet/main_test.go
+++ b/cmd/quadlet/main_test.go
@@ -64,31 +64,36 @@ func TestUnitDirs(t *testing.T) {
resolvedUnitDirAdminUser := resolveUnitDirAdminUser()
userLevelFilter := getUserLevelFilter(resolvedUnitDirAdminUser)
- rootDirs := []string{}
- rootDirs = appendSubPaths(rootDirs, quadlet.UnitDirTemp, false, userLevelFilter)
- rootDirs = appendSubPaths(rootDirs, quadlet.UnitDirAdmin, false, userLevelFilter)
- rootDirs = appendSubPaths(rootDirs, quadlet.UnitDirDistro, false, userLevelFilter)
- assert.Equal(t, unitDirs, rootDirs, "rootful unit dirs should match")
+ rootDirs := make(map[string]struct{}, 0)
+ appendSubPaths(rootDirs, quadlet.UnitDirTemp, false, userLevelFilter)
+ appendSubPaths(rootDirs, quadlet.UnitDirAdmin, false, userLevelFilter)
+ appendSubPaths(rootDirs, quadlet.UnitDirDistro, false, userLevelFilter)
+ assert.Equal(t, rootDirs, unitDirs, "rootful unit dirs should match")
configDir, err := os.UserConfigDir()
assert.Nil(t, err)
- rootlessDirs := []string{}
+ rootlessDirs := make(map[string]struct{}, 0)
systemUserDirLevel := len(strings.Split(resolvedUnitDirAdminUser, string(os.PathSeparator)))
nonNumericFilter := getNonNumericFilter(resolvedUnitDirAdminUser, systemUserDirLevel)
runtimeDir, found := os.LookupEnv("XDG_RUNTIME_DIR")
if found {
- rootlessDirs = appendSubPaths(rootlessDirs, path.Join(runtimeDir, "containers/systemd"), false, nil)
+ appendSubPaths(rootlessDirs, path.Join(runtimeDir, "containers/systemd"), false, nil)
}
- rootlessDirs = appendSubPaths(rootlessDirs, path.Join(configDir, "containers/systemd"), false, nil)
- rootlessDirs = appendSubPaths(rootlessDirs, filepath.Join(quadlet.UnitDirAdmin, "users"), true, nonNumericFilter)
- rootlessDirs = appendSubPaths(rootlessDirs, filepath.Join(quadlet.UnitDirAdmin, "users", u.Uid), true, userLevelFilter)
- rootlessDirs = append(rootlessDirs, filepath.Join(quadlet.UnitDirAdmin, "users"))
+ appendSubPaths(rootlessDirs, path.Join(configDir, "containers/systemd"), false, nil)
+ appendSubPaths(rootlessDirs, filepath.Join(quadlet.UnitDirAdmin, "users"), true, nonNumericFilter)
+ appendSubPaths(rootlessDirs, filepath.Join(quadlet.UnitDirAdmin, "users", u.Uid), true, userLevelFilter)
+ rootlessDirs[filepath.Join(quadlet.UnitDirAdmin, "users")] = struct{}{}
unitDirs = getUnitDirs(true)
- assert.Equal(t, unitDirs, rootlessDirs, "rootless unit dirs should match")
+ assert.Equal(t, rootlessDirs, unitDirs, "rootless unit dirs should match")
+
+ // Test that relative path returns an empty list
+ t.Setenv("QUADLET_UNIT_DIRS", "./relative/path")
+ unitDirs = getUnitDirs(false)
+ assert.Equal(t, map[string]struct{}{}, unitDirs)
name, err := os.MkdirTemp("", "dir")
assert.Nil(t, err)
@@ -97,10 +102,10 @@ func TestUnitDirs(t *testing.T) {
t.Setenv("QUADLET_UNIT_DIRS", name)
unitDirs = getUnitDirs(false)
- assert.Equal(t, unitDirs, []string{name}, "rootful should use environment variable")
+ assert.Equal(t, map[string]struct{}{name: {}}, unitDirs, "rootful should use environment variable")
unitDirs = getUnitDirs(true)
- assert.Equal(t, unitDirs, []string{name}, "rootless should use environment variable")
+ assert.Equal(t, map[string]struct{}{name: {}}, unitDirs, "rootless should use environment variable")
symLinkTestBaseDir, err := os.MkdirTemp("", "podman-symlinktest")
assert.Nil(t, err)
@@ -118,7 +123,72 @@ func TestUnitDirs(t *testing.T) {
assert.Nil(t, err)
t.Setenv("QUADLET_UNIT_DIRS", symlink)
unitDirs = getUnitDirs(true)
- assert.Equal(t, unitDirs, []string{actualDir, innerDir}, "directory resolution should follow symlink")
+ assert.Equal(t, map[string]struct{}{actualDir: {}, innerDir: {}}, unitDirs, "directory resolution should follow symlink")
+
+ // Make a more elborate test with the following structure:
+ // <BASE>/linkToDir - real directory to link to
+ // <BASE>/linkToDir/a - real directory
+ // <BASE>/linkToDir/b - link to <BASE>/unitDir/b/a should be ignored
+ // <BASE>/linkToDir/c - link to <BASE>/unitDir should be ignored
+ // <BASE>/unitDir - start from here
+ // <BASE>/unitDir/a - real directory
+ // <BASE>/unitDir/a/a - real directory
+ // <BASE>/unitDir/a/a/a - real directory
+ // <BASE>/unitDir/b/a - real directory
+ // <BASE>/unitDir/b/b - link to <BASE>/unitDir/a/a should be ignored
+ // <BASE>/unitDir/c - link to <BASE>/linkToDir
+ symLinkRecursiveTestBaseDir, err := os.MkdirTemp("", "podman-symlink-recursive-test")
+ assert.Nil(t, err)
+ // remove the temporary directory at the end of the program
+ defer os.RemoveAll(symLinkRecursiveTestBaseDir)
+
+ createDir := func(path, name string, dirs map[string]struct{}) string {
+ dirName := filepath.Join(path, name)
+ assert.NotContains(t, dirs, dirName)
+ err = os.Mkdir(dirName, 0755)
+ assert.Nil(t, err)
+ dirs[dirName] = struct{}{}
+ return dirName
+ }
+ expectedDirs := make(map[string]struct{}, 0)
+ // Create <BASE>/linkToDir
+ linkToDirPath := createDir(symLinkRecursiveTestBaseDir, "linkToDir", expectedDirs)
+ // Create <BASE>/linkToDir/a
+ createDir(linkToDirPath, "a", expectedDirs)
+ // Create <BASE>/unitDir
+ unitsDirPath := createDir(symLinkRecursiveTestBaseDir, "unitsDir", expectedDirs)
+ // Create <BASE>/unitDir/a
+ aDirPath := createDir(unitsDirPath, "a", expectedDirs)
+ // Create <BASE>/unitDir/a/a
+ aaDirPath := createDir(aDirPath, "a", expectedDirs)
+ // Create <BASE>/unitDir/a/b
+ createDir(aDirPath, "b", expectedDirs)
+ // Create <BASE>/unitDir/a/a/a
+ createDir(aaDirPath, "a", expectedDirs)
+ // Create <BASE>/unitDir/b
+ bDirPath := createDir(unitsDirPath, "b", expectedDirs)
+ // Create <BASE>/unitDir/b/a
+ baDirPath := createDir(bDirPath, "a", expectedDirs)
+
+ linkDir := func(path, name, target string) {
+ linkName := filepath.Join(path, name)
+ err = os.Symlink(target, linkName)
+ assert.Nil(t, err)
+ }
+ // Link <BASE>/unitDir/b/b to <BASE>/unitDir/a/a
+ linkDir(bDirPath, "b", aaDirPath)
+ // Link <BASE>/linkToDir/b to <BASE>/unitDir/b/a
+ linkDir(linkToDirPath, "b", baDirPath)
+ // Link <BASE>/linkToDir/c to <BASE>/unitDir
+ linkDir(linkToDirPath, "c", unitsDirPath)
+ // Link <BASE>/unitDir/c to <BASE>/linkToDir
+ linkDir(unitsDirPath, "c", linkToDirPath)
+
+ t.Setenv("QUADLET_UNIT_DIRS", unitsDirPath)
+ unitDirs = getUnitDirs(true)
+ assert.Equal(t, expectedDirs, unitDirs, "directory resolution should follow symlink")
+ // remove the temporary directory at the end of the program
+ defer os.RemoveAll(symLinkTestBaseDir)
// because chroot is only available for root,
// unshare the namespace and map user to root
diff --git a/contrib/cirrus/runner.sh b/contrib/cirrus/runner.sh
index ea6235eba..8d4dddd69 100755
--- a/contrib/cirrus/runner.sh
+++ b/contrib/cirrus/runner.sh
@@ -458,6 +458,15 @@ msg "************************************************************"
((${SETUP_ENVIRONMENT:-0})) || \
die "Expecting setup_environment.sh to have completed successfully"
+if [[ "$UID" -eq 0 ]] && ((CONTAINER==0)); then
+ # start ebpf cleanup tracer (#23487)
+ msg "start ebpf cleanup tracer"
+ # replace zero bytes to make the log more readable
+ bpftrace $GOSRC/hack/podman_cleanup_tracer.bt |& \
+ tr '\0' ' ' >$GOSRC/podman-cleanup-tracer.log &
+ TRACER_PID=$!
+fi
+
# shellcheck disable=SC2154
if [[ "$PRIV_NAME" == "rootless" ]] && [[ "$UID" -eq 0 ]]; then
# Remove /var/lib/cni, it is not required for rootless cni.
@@ -499,4 +508,9 @@ fi
showrun $handler
+if [[ -n "$TRACER_PID" ]]; then
+ # ignore any error here
+ kill "$TRACER_PID" || true
+fi
+
showrun echo "finished"
diff --git a/docker/docker.in b/docker/docker.in
index 61e1b64c2..0b4aa838c 100755
--- a/docker/docker.in
+++ b/docker/docker.in
@@ -1,4 +1,4 @@
#!/bin/sh
-[ -e ${ETCDIR}/containers/nodocker ] || \
+[ -e ${ETCDIR}/containers/nodocker ] || [ -e "\${XDG_CONFIG_HOME-\$HOME/.config}/containers/nodocker" ] || \
echo "Emulate Docker CLI using podman. Create ${ETCDIR}/containers/nodocker to quiet msg." >&2
exec ${BINDIR}/podman "$@"
diff --git a/docs/source/markdown/podman-build.1.md.in b/docs/source/markdown/podman-build.1.md.in
index 98545a03c..08c5d3d14 100644
--- a/docs/source/markdown/podman-build.1.md.in
+++ b/docs/source/markdown/podman-build.1.md.in
@@ -44,9 +44,8 @@ NOTE: `podman build` uses code sourced from the `Buildah` project to build
container images. This `Buildah` code creates `Buildah` containers for the
`RUN` options in container storage. In certain situations, when the
`podman build` crashes or users kill the `podman build` process, these external
-containers can be left in container storage. Use the `podman ps --all --storage`
-command to see these containers. External containers can be removed with the
-`podman rm --storage` command.
+containers can be left in container storage. Use the `podman ps --all --external`
+command to see these containers.
`podman buildx build` command is an alias of `podman build`. Not all `buildx build` features are available in Podman. The `buildx build` option is provided for scripting compatibility.
@@ -312,7 +311,7 @@ values for $GOOS and $GOARCH at https://golang.org/doc/install/source#environmen
and can also be found by running `go tool dist list`.
While `podman build` is happy to use base images and build images for any
-platform that exists, `RUN` instructions are able to succeed without
+platform that exists, `RUN` instructions are unable to succeed without
the help of emulation provided by packages like `qemu-user-static`.
@@option pull.image
diff --git a/docs/source/markdown/podman-systemd.unit.5.md b/docs/source/markdown/podman-systemd.unit.5.md
index 976f062dc..827303653 100644
--- a/docs/source/markdown/podman-systemd.unit.5.md
+++ b/docs/source/markdown/podman-systemd.unit.5.md
@@ -35,8 +35,7 @@ Quadlet files for non-root users can be placed in the following directories
### Using symbolic links
-Quadlet supports using symbolic links for the base of the search paths.
-Symbolic links below the search paths are not supported.
+Quadlet supports using symbolic links for the base of the search paths and inside them.
## DESCRIPTION
diff --git a/go.mod b/go.mod
index fc44452dc..2e15fb81c 100644
--- a/go.mod
+++ b/go.mod
@@ -2,7 +2,7 @@ module github.com/containers/podman/v5
// Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates
-go 1.22.0
+go 1.22.6
require (
github.com/BurntSushi/toml v1.4.0
@@ -13,14 +13,14 @@ require (
github.com/checkpoint-restore/go-criu/v7 v7.2.0
github.com/containernetworking/plugins v1.5.1
github.com/containers/buildah v1.37.0
- github.com/containers/common v0.60.1-0.20240918122915-db8145750e1d
+ github.com/containers/common v0.60.1-0.20240920125326-ff6611ae40ad
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/gvisor-tap-vsock v0.7.5
- github.com/containers/image/v5 v5.32.1-0.20240806084436-e3e9287ca8e6
+ github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46
github.com/containers/libhvee v0.7.1
github.com/containers/ocicrypt v1.2.0
github.com/containers/psgo v1.9.0
- github.com/containers/storage v1.55.1-0.20240903205438-465c38f89483
+ github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0
github.com/containers/winquit v1.1.0
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
github.com/coreos/stream-metadata-go v0.4.4
@@ -29,7 +29,7 @@ require (
github.com/cyphar/filepath-securejoin v0.3.2
github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e
github.com/docker/distribution v2.8.3+incompatible
- github.com/docker/docker v27.3.0+incompatible
+ github.com/docker/docker v27.3.1+incompatible
github.com/docker/go-connections v0.5.0
github.com/docker/go-plugins-helpers v0.0.0-20240701071450-45e2431495c8
github.com/docker/go-units v0.5.0
@@ -48,6 +48,7 @@ require (
github.com/mattn/go-shellwords v1.0.12
github.com/mattn/go-sqlite3 v1.14.23
github.com/mdlayher/vsock v1.2.1
+ github.com/moby/sys/capability v0.3.0
github.com/moby/sys/user v0.3.0
github.com/moby/term v0.5.0
github.com/nxadm/tail v1.4.11
@@ -85,7 +86,7 @@ require (
)
require (
- dario.cat/mergo v1.0.0 // indirect
+ dario.cat/mergo v1.0.1 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Microsoft/hcsshim v0.12.6 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
@@ -106,7 +107,7 @@ require (
github.com/containernetworking/cni v1.2.3 // indirect
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/containers/luksy v0.0.0-20240618143119-a8846e21c08c // indirect
- github.com/coreos/go-oidc/v3 v3.10.0 // indirect
+ github.com/coreos/go-oidc/v3 v3.11.0 // indirect
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@@ -121,7 +122,7 @@ require (
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/gin-gonic/gin v1.9.1 // indirect
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
- github.com/go-jose/go-jose/v4 v4.0.2 // indirect
+ github.com/go-jose/go-jose/v4 v4.0.4 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
@@ -144,7 +145,7 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/go-cmp v0.6.0 // indirect
- github.com/google/go-containerregistry v0.20.1 // indirect
+ github.com/google/go-containerregistry v0.20.2 // indirect
github.com/google/go-intervals v0.0.2 // indirect
github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
@@ -153,11 +154,11 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jinzhu/copier v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
- github.com/klauspost/compress v1.17.9 // indirect
+ github.com/klauspost/compress v1.17.10 // indirect
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
- github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 // indirect
+ github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/manifoldco/promptui v0.9.0 // indirect
@@ -191,12 +192,12 @@ require (
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
- github.com/sigstore/fulcio v1.4.5 // indirect
+ github.com/sigstore/fulcio v1.6.4 // indirect
github.com/sigstore/rekor v1.3.6 // indirect
- github.com/sigstore/sigstore v1.8.4 // indirect
+ github.com/sigstore/sigstore v1.8.9 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect
- github.com/sylabs/sif/v2 v2.18.0 // indirect
+ github.com/sylabs/sif/v2 v2.19.1 // indirect
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
@@ -217,10 +218,10 @@ require (
go.opentelemetry.io/otel/trace v1.28.0 // indirect
golang.org/x/arch v0.7.0 // indirect
golang.org/x/mod v0.20.0 // indirect
- golang.org/x/oauth2 v0.22.0 // indirect
- golang.org/x/time v0.5.0 // indirect
+ golang.org/x/oauth2 v0.23.0 // indirect
+ golang.org/x/time v0.6.0 // indirect
golang.org/x/tools v0.24.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect
google.golang.org/grpc v1.65.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
diff --git a/go.sum b/go.sum
index fd9edeb3f..8155a4af8 100644
--- a/go.sum
+++ b/go.sum
@@ -1,6 +1,6 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
-dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
+dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
@@ -81,14 +81,14 @@ github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+
github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM=
github.com/containers/buildah v1.37.0 h1:jvHwu1vIwIqnHyOSg9eef9Apdpry+5oWLrm43gdf8Rk=
github.com/containers/buildah v1.37.0/go.mod h1:MKd79tkluMf6vtH06SedhBQK5OB7E0pFVIuiTTw3dJk=
-github.com/containers/common v0.60.1-0.20240918122915-db8145750e1d h1:AAEZbfeh92xKohiQoEk6sx+e/8OLIXzIElJ7H69cxVg=
-github.com/containers/common v0.60.1-0.20240918122915-db8145750e1d/go.mod h1:CPKbz94MP7eKS5LdkBZbcDbQgAHncjogq/hYY9r4Spw=
+github.com/containers/common v0.60.1-0.20240920125326-ff6611ae40ad h1:Ida4yFcnk+xGPynWR267zGGUddWTfpAVMSzo6PhjPFQ=
+github.com/containers/common v0.60.1-0.20240920125326-ff6611ae40ad/go.mod h1:UjxkwBehRqlASg/duCPlXbsc2hu5y+iYwUt+8/N4w+8=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/gvisor-tap-vsock v0.7.5 h1:bTy4u3DOmmUPwurL6me2rsgfypAFDhyeJleUcQmBR/E=
github.com/containers/gvisor-tap-vsock v0.7.5/go.mod h1:GW9jOqAEEGdaS20XwTYdm6KCYDHIulOE/yEEOabkoE4=
-github.com/containers/image/v5 v5.32.1-0.20240806084436-e3e9287ca8e6 h1:nXEEUAo8l2HLlMBy+LsHju2AikpA30jvlTSHbnjJXVw=
-github.com/containers/image/v5 v5.32.1-0.20240806084436-e3e9287ca8e6/go.mod h1:r//zsX8SjmVH0F87d+gakcgR4W5HTFGSgSLB4sufW6A=
+github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46 h1:eIwxm8+oAoTk+PDuOTbZRFG1DBF5tAlFO+niIamyzaM=
+github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46/go.mod h1:GgaW+YZJaJmcGtyPZNtsggfM4BBYIMfu/fFK62ZKU0o=
github.com/containers/libhvee v0.7.1 h1:dWGF5GLq9DZvXo3P8aDp3cNieL5eCaSell4UmeA/jY4=
github.com/containers/libhvee v0.7.1/go.mod h1:fRKB3AyIqHMvq6xaeYhTpckM2cdoq0oecolyoiuLP7M=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
@@ -99,12 +99,12 @@ github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sir
github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U=
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
-github.com/containers/storage v1.55.1-0.20240903205438-465c38f89483 h1:hQOAlIad+xjukeGFHQbH/x5I2zuPNCXmjvSrxX5ERF4=
-github.com/containers/storage v1.55.1-0.20240903205438-465c38f89483/go.mod h1:fRTU33KP5BXpOIWDxDgU5LpHbrOzWxmVmtm/3PYLlgE=
+github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0 h1:0NNBYNpPFzQUKXVq+oQG6NFQcBwtbs2luxl/bVulbPs=
+github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0/go.mod h1:Gx8WE9kURdCyEuB9cq8Kq5sRDRbpZi34lnOQ3zAGK2s=
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
-github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU=
-github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac=
+github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
+github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 h1:OoRAFlvDGCUqDLampLQjk0yeeSGdF9zzst/3G9IkBbc=
@@ -134,12 +134,12 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2oNn0GkeZE=
-github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ=
+github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v27.3.0+incompatible h1:BNb1QY6o4JdKpqwi9IB+HUYcRRrVN4aGFUTvDmWYK1A=
-github.com/docker/docker v27.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
+github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
@@ -173,8 +173,8 @@ github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
-github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
-github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
+github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E=
+github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@@ -211,12 +211,12 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.17.0 h1:SmVVlfAOtlZncTxRuinDPomC2DkXJ4E5T9gDA0AIH74=
github.com/go-playground/validator/v10 v10.17.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
-github.com/go-rod/rod v0.116.0 h1:ypRryjTys3EnqHskJ/TdgodFMvXV0EHvmy4bSkKZgHM=
-github.com/go-rod/rod v0.116.0/go.mod h1:aiedSEFg5DwG/fnNbUOTPMTTWX3MRj6vIs/a684Mthw=
+github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA=
+github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
-github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
-github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
+github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U=
+github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@@ -252,8 +252,8 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-containerregistry v0.20.1 h1:eTgx9QNYugV4DN5mz4U8hiAGTi1ybXn0TPi4Smd8du0=
-github.com/google/go-containerregistry v0.20.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
+github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo=
+github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -275,8 +275,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E=
github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -310,8 +310,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
-github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
+github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
@@ -326,8 +326,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
-github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 h1:aiPrFdHDCCvigNBCkOWj2lv9Bx5xDp210OANZEoiP0I=
-github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0/go.mod h1:srVwm2N3DC/tWqQ+igZXDrmKlNRN8X/dmJ1wEZrv760=
+github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ=
+github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk=
github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2 h1:DZMFueDbfz6PNc1GwDRA8+6lBx1TB9UnxDQliCqR73Y=
github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2/go.mod h1:SWzULI85WerrFt3u+nIm5F9l7EvxZTKQvd0InF3nmgM=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
@@ -365,6 +365,8 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
+github.com/moby/sys/capability v0.3.0 h1:kEP+y6te0gEXIaeQhIi0s7vKs/w0RPoH1qPa6jROcVg=
+github.com/moby/sys/capability v0.3.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
@@ -382,6 +384,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
@@ -423,25 +427,25 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
-github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
-github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
+github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
-github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw=
-github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q=
+github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY=
+github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
-github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
-github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rootless-containers/rootlesskit/v2 v2.3.1 h1:wdYtdKxWFvVLby9ThMP6O6/v2q/GmOXbkRi+4m9nPW0=
github.com/rootless-containers/rootlesskit/v2 v2.3.1/go.mod h1:tdtfS9ak4bGmwJRmcjsAzcHN5rJ3c5dB7yhSV10KTbk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
-github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
+github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY=
+github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=
github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
@@ -456,12 +460,12 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
-github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc=
-github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8=
+github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY=
+github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs=
github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8=
github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc=
-github.com/sigstore/sigstore v1.8.4 h1:g4ICNpiENFnWxjmBzBDWUn62rNFeny/P77HUC8da32w=
-github.com/sigstore/sigstore v1.8.4/go.mod h1:1jIKtkTFEeISen7en+ZPWdDHazqhxco/+v9CNjc7oNg=
+github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk=
+github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
@@ -486,8 +490,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/sylabs/sif/v2 v2.18.0 h1:eXugsS1qx7St2Wu/AJ21KnsQiVCpouPlTigABh+6KYI=
-github.com/sylabs/sif/v2 v2.18.0/go.mod h1:GOQj7LIBqp15fjqH5i8ZEbLp8SXJi9S+xbRO+QQAdRo=
+github.com/sylabs/sif/v2 v2.19.1 h1:1eeMmFc8elqJe60ZiWwXgL3gMheb0IP4GmNZ4q0IEA0=
+github.com/sylabs/sif/v2 v2.19.1/go.mod h1:U1SUhvl8X1JIxAylC0DYz1fa/Xba6EMZD1dGPGBH83E=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
@@ -528,12 +532,12 @@ github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ=
github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns=
github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ=
github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18=
-github.com/ysmood/got v0.34.1 h1:IrV2uWLs45VXNvZqhJ6g2nIhY+pgIG1CUoOcqfXFl1s=
-github.com/ysmood/got v0.34.1/go.mod h1:yddyjq/PmAf08RMLSwDjPyCvHvYed+WjHnQxpH851LM=
+github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q=
+github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg=
github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE=
github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
-github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak=
-github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
+github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU=
+github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
@@ -552,8 +556,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIX
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
@@ -562,8 +566,8 @@ go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBq
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
-go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
-go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
+go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
+go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
@@ -608,8 +612,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
-golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
+golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -667,8 +671,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
-golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
-golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
+golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -691,11 +695,11 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s=
-google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
-google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok=
+google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
+google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c h1:Kqjm4WpoWvwhMPcrAczoTyMySQmYa9Wy2iL6Con4zn8=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
diff --git a/hack/podman_cleanup_tracer.bt b/hack/podman_cleanup_tracer.bt
new file mode 100755
index 000000000..42c46cde9
--- /dev/null
+++ b/hack/podman_cleanup_tracer.bt
@@ -0,0 +1,151 @@
+#!/usr/bin/env bpftrace
+
+// Traces podman and conmon commands for there exit status and all process that
+// send signals to them. Also prints stderr for all podman container cleanup processes.
+// The goal is to be able to trace all podman cleanup errors, while there is an
+// option to log the errors to the syslog that only works of the process if the
+// process got there and not if it was killed before. This program here will trace
+// the exit status as well so we know even if it was killed by a signal.
+//
+// This is script uses https://github.com/bpftrace/bpftrace to compile this into
+// kernel ebpf.
+//
+// Usage: sudo ./podman_cleanup_tracer.bt
+// see below for output format
+
+BEGIN {
+ print("Output format is one of:");
+ print("exec TIME PID PPID COMM ARGV(truncated)");
+ print("cmd TIME PID PPID ARGV(full)");
+ print("exit TIME PID PPID COMM EXIT_CODE EXIT_SIGNAL");
+ print("kill TIME PID PPID COMM SIGNAL TPID RET");
+ print("stderr TIME PID PPID COMM OUTPUT");
+}
+
+// Trace all exec calls to find all podman + conmon processes, it will also catch
+// netavark and aardvark-dns as they have podman in the path as well but this is
+// good so we can see any errors there as well.
+tracepoint:syscalls:sys_enter_exec*
+/ strcontains(str(args.argv[0]),"podman") || strcontains(str(args.argv[0]), "conmon") /
+{
+
+ // create entry in pid map so we can check the pid later
+ @pids[pid] = 1;
+
+ // Find the podman cleanup process spawned by conmon.
+ // I tried matching argv but there seems to be no way to iterate over it.
+ // In practise parent name conmon and argv0 podman should contain all the
+ // cleanup processes we care about.
+ if (comm == "conmon" && strcontains(str(args.argv[0]), "podman")) {
+ @cleanupPids[pid] = 1;
+ }
+
+ printf("%-6s %s %-8d %-8d %-12s ",
+ "exec",
+ strftime("%H:%M:%S.%f", nsecs),
+ pid,
+ curtask->real_parent->tgid,
+ comm
+ );
+
+ // Unfortunately this doesn't print the full argv0 as there is some limit,
+ // as such we use the /proc/<>/cmdline reading hack below on exec exit.
+ join(args->argv);
+}
+
+
+// Print the full cmdline
+tracepoint:syscalls:sys_exit_exec*
+/ @pids[pid] /
+{
+ printf("%-6s %s %-8d %-8d ",
+ "cmd",
+ strftime("%H:%M:%S.%f", nsecs),
+ pid,
+ curtask->real_parent->tgid
+ );
+ // This can fail to open the file it is done in user space and
+ // thus racy if the process exits quickly.
+ cat("/proc/%d/cmdline", pid);
+ print("");
+}
+
+
+// Trace all exits for the pids we matched above,
+// pid == tid is used to only match the main exit
+// and not all thread exits which we do not care about.
+tracepoint:sched:sched_process_exit
+/ @pids[pid] && pid == tid /
+{
+ printf("%-6s %s %-8d %-8d %-12s %d %d\n",
+ "exit",
+ strftime("%H:%M:%S.%f", nsecs),
+ pid,
+ curtask->real_parent->tgid,
+ comm,
+ curtask->exit_code >> 8, // actual exit code
+ curtask->exit_code & 0xFF // signal number if killed
+ );
+
+ // process is done remove pid from map
+ delete(@pids[pid]);
+ delete(@cleanupPids[pid]);
+}
+
+// Trace all kill calls that target our pids.
+// This will not catch signals send via pidfd_send_signal because
+// I don't see a way to translate the pidfd to our target pids and
+// I don't want to log all send signals on the system.
+tracepoint:syscalls:sys_enter_kill
+/ @pids[args.pid] /
+{
+ @tpid[tid] = args.pid;
+ @tsig[tid] = args.sig;
+}
+
+tracepoint:syscalls:sys_exit_kill
+/ @tpid[tid] /
+{
+ printf("%-6s %s %-8d %-8d %-12s %d %-8d %d\n",
+ "kill",
+ strftime("%H:%M:%S.%f", nsecs),
+ pid,
+ curtask->real_parent->tgid,
+ comm,
+ @tsig[tid],
+ @tpid[tid],
+ args.ret
+ );
+ delete(@tpid[tid]);
+ delete(@tsig[tid]);
+}
+
+
+// Print anything written on stderr for the podman container cleanup process.
+tracepoint:syscalls:sys_enter_write
+/ @cleanupPids[pid] && args.fd == 2 /
+{
+ printf("%-6s %s %-8d %-8d %-12s",
+ "stderr",
+ strftime("%H:%M:%S.%f", nsecs),
+ pid,
+ curtask->real_parent->tgid,
+ comm
+ );
+
+ // String size limit is is 64 by default, this includes the 0 byte and when we
+ // hit the string limit it also adds "..." when using 63 so we use 63 as len here.
+ // While upstream fixed these low string limits (https://github.com/bpftrace/bpftrace/issues/305)
+ // it is not yet in older distro version we use so we cannot use that yet.
+ // Thus manually print several times.
+ $len = 62;
+ $offset = 62;
+ printf("%s", str(args.buf, $len));
+
+ unroll(10) {
+ if ((int64)args.count > $offset ) {
+ printf("%s", str(args.buf + $offset, $len));
+ }
+ $offset += $len
+ }
+}
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index bf9f63598..688985b5e 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -3,11 +3,8 @@
package libpod
import (
- "crypto/rand"
"fmt"
"net"
- "os"
- "path/filepath"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/common/libnetwork/types"
@@ -17,7 +14,6 @@ import (
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
- "golang.org/x/sys/unix"
)
// Create and configure a new network namespace for a container
@@ -104,33 +100,10 @@ func (r *Runtime) createNetNS(ctr *Container) (n string, q map[string]types.Stat
// Configure the network namespace using the container process
func (r *Runtime) setupNetNS(ctr *Container) error {
nsProcess := fmt.Sprintf("/proc/%d/ns/net", ctr.state.PID)
-
- b := make([]byte, 16)
-
- if _, err := rand.Reader.Read(b); err != nil {
- return fmt.Errorf("failed to generate random netns name: %w", err)
- }
- nsPath, err := netns.GetNSRunDir()
- if err != nil {
- return err
- }
- nsPath = filepath.Join(nsPath, fmt.Sprintf("netns-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]))
-
- if err := os.MkdirAll(filepath.Dir(nsPath), 0711); err != nil {
- return err
- }
-
- mountPointFd, err := os.Create(nsPath)
+ nsPath, err := netns.NewNSFrom(nsProcess)
if err != nil {
return err
}
- if err := mountPointFd.Close(); err != nil {
- return err
- }
-
- if err := unix.Mount(nsProcess, nsPath, "none", unix.MS_BIND, ""); err != nil {
- return fmt.Errorf("cannot mount %s: %w", nsPath, err)
- }
networkStatus, err := r.configureNetNS(ctr, nsPath)
diff --git a/pkg/rootless/rootless_linux.go b/pkg/rootless/rootless_linux.go
index 3ad390624..9ecc5df9e 100644
--- a/pkg/rootless/rootless_linux.go
+++ b/pkg/rootless/rootless_linux.go
@@ -20,9 +20,9 @@ import (
"github.com/containers/storage/pkg/idtools"
pmount "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/unshare"
+ "github.com/moby/sys/capability"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
- "github.com/syndtr/gocapability/capability"
"golang.org/x/sys/unix"
)
diff --git a/test/e2e/quadlet_test.go b/test/e2e/quadlet_test.go
index 51897716e..f0b7f0ef3 100644
--- a/test/e2e/quadlet_test.go
+++ b/test/e2e/quadlet_test.go
@@ -713,7 +713,7 @@ var _ = Describe("quadlet system generator", func() {
Expect(session).Should(Exit(0))
current := session.ErrorToStringArray()
- expected := "No files parsed from [/something]"
+ expected := "No files parsed from map[/something:{}]"
found := false
for _, line := range current {
diff --git a/test/system/035-logs.bats b/test/system/035-logs.bats
index c0e74fd25..9114346a5 100644
--- a/test/system/035-logs.bats
+++ b/test/system/035-logs.bats
@@ -5,6 +5,7 @@
load helpers
+# bats test_tags=ci:parallel
@test "podman logs - basic test" {
rand_string=$(random_string 40)
@@ -62,10 +63,12 @@ function _log_test_tail() {
run_podman rm $cid
}
+# CANNOT BE PARALLELIZED - #23750, events-backend=file cannot coexist with journal
@test "podman logs - tail test, k8s-file" {
_log_test_tail k8s-file
}
+# bats test_tags=ci:parallel
@test "podman logs - tail test, journald" {
# We can't use journald on RHEL as rootless: rhbz#1895105
skip_if_journald_unavailable
@@ -117,10 +120,12 @@ ${cid[1]} b2" "Sequential output from c2"
run_podman rm -f -t0 ${cid[0]} ${cid[1]}
}
+# CANNOT BE PARALLELIZED - #23750, events-backend=file cannot coexist with journal
@test "podman logs - multi k8s-file" {
_log_test_multi k8s-file
}
+# bats test_tags=ci:parallel
@test "podman logs - multi journald" {
# We can't use journald on RHEL as rootless: rhbz#1895105
skip_if_journald_unavailable
@@ -146,10 +151,12 @@ function _log_test_restarted() {
run_podman rm -f -t0 $cname
}
+# CANNOT BE PARALLELIZED - #23750, events-backend=file cannot coexist with journal
@test "podman logs restarted - k8s-file" {
_log_test_restarted k8s-file
}
+# bats test_tags=ci:parallel
@test "podman logs restarted journald" {
# We can't use journald on RHEL as rootless: rhbz#1895105
skip_if_journald_unavailable
@@ -157,6 +164,7 @@ function _log_test_restarted() {
_log_test_restarted journald
}
+# CANNOT BE PARALLELIZED - #23750, events-backend=file cannot coexist with journal
@test "podman logs - journald log driver requires journald events backend" {
skip_if_remote "remote does not support --events-backend"
# We can't use journald on RHEL as rootless: rhbz#1895105
@@ -203,10 +211,12 @@ $s_after"
run_podman rm -t 1 -f $cname
}
+# CANNOT BE PARALLELIZED - #23750, events-backend=file cannot coexist with journal
@test "podman logs - since k8s-file" {
_log_test_since k8s-file
}
+# bats test_tags=ci:parallel
@test "podman logs - since journald" {
# We can't use journald on RHEL as rootless: rhbz#1895105
skip_if_journald_unavailable
@@ -256,10 +266,12 @@ $s_after"
run_podman rm -t 0 -f $cname
}
+# CANNOT BE PARALLELIZED - #23750, events-backend=file cannot coexist with journal
@test "podman logs - until k8s-file" {
_log_test_until k8s-file
}
+# bats test_tags=ci:parallel
@test "podman logs - until journald" {
# We can't use journald on RHEL as rootless: rhbz#1895105
skip_if_journald_unavailable
@@ -289,10 +301,12 @@ $contentC" "logs -f on exited container works"
run_podman ${events_backend} rm -t 0 -f $cname
}
+# CANNOT BE PARALLELIZED - #23750, events-backend=file cannot coexist with journal
@test "podman logs - --follow k8s-file" {
_log_test_follow k8s-file
}
+# bats test_tags=ci:parallel
@test "podman logs - --follow journald" {
# We can't use journald on RHEL as rootless: rhbz#1895105
skip_if_journald_unavailable
@@ -320,7 +334,7 @@ function _log_test_follow_since() {
# Now do the same with a running container to check #16950.
run_podman ${events_backend} run --log-driver=$driver --name $cname -d $IMAGE \
- sh -c "sleep 1; while :; do echo $content && sleep 5; done"
+ sh -c "sleep 1; while :; do echo $content && sleep 1; done"
# sleep is required to make sure the podman event backend no longer sees the start event in the log
# This value must be greater or equal than the value given in --since below
@@ -328,17 +342,18 @@ function _log_test_follow_since() {
# Make sure podman logs actually follows by giving a low timeout and check that the command times out
PODMAN_TIMEOUT=3 run_podman 124 ${events_backend} logs --since 0.1s -f $cname
- assert "$output" =~ "^$content
+ assert "$output" =~ "$content
timeout: sending signal TERM to command.*" "logs --since -f on running container works"
run_podman ${events_backend} rm -t 0 -f $cname
}
+# CANNOT BE PARALLELIZED - #23750, events-backend=file cannot coexist with journal
@test "podman logs - --since --follow k8s-file" {
_log_test_follow_since k8s-file
}
-# bats test_tags=distro-integration
+# bats test_tags=distro-integration, ci:parallel
@test "podman logs - --since --follow journald" {
# We can't use journald on RHEL as rootless: rhbz#1895105
skip_if_journald_unavailable
@@ -359,16 +374,16 @@ function _log_test_follow_until() {
run_podman ${events_backend} run --log-driver=$driver --name $cname -d $IMAGE \
sh -c "n=1;while :; do echo $content--\$n; n=\$((n+1));sleep 0.1; done"
- t0=$SECONDS
+ t0=$(date +%s%3N)
# The logs command should exit after the until time even when follow is set
PODMAN_TIMEOUT=10 run_podman ${events_backend} logs --until 3s -f $cname
- t1=$SECONDS
+ t1=$(date +%s%3N)
logs_seen="$output"
- # The delta should be 3 but because it could be a bit longer on a slow system such as CI we also accept 4.
- delta_t=$(( $t1 - $t0 ))
- assert $delta_t -gt 2 "podman logs --until: exited too early!"
- assert $delta_t -lt 5 "podman logs --until: exited too late!"
+ # The delta should be 3 but could be longer on a slow CI system
+ delta_t_ms=$(( $t1 - $t0 ))
+ assert $delta_t_ms -gt 2000 "podman logs --until: exited too early!"
+ assert $delta_t_ms -lt 5000 "podman logs --until: exited too late!"
# Impossible to know how many lines we'll see, but require at least two
assert "$logs_seen" =~ "$content--1
@@ -377,11 +392,12 @@ $content--2.*" "logs --until -f on running container works"
run_podman ${events_backend} rm -t 0 -f $cname
}
+# CANNOT BE PARALLELIZED - #23750, events-backend=file cannot coexist with journal
@test "podman logs - --until --follow k8s-file" {
_log_test_follow_until k8s-file
}
-# bats test_tags=distro-integration
+# bats test_tags=distro-integration, ci:parallel
@test "podman logs - --until --follow journald" {
# We can't use journald on RHEL as rootless: rhbz#1895105
skip_if_journald_unavailable
@@ -390,6 +406,7 @@ $content--2.*" "logs --until -f on running container works"
}
# https://github.com/containers/podman/issues/19545
+# CANNOT BE PARALLELIZED - #23750, events-backend=file cannot coexist with journal
@test "podman logs --tail, k8s-file with partial lines" {
cname="c-$(safename)"
diff --git a/test/system/250-systemd.bats b/test/system/250-systemd.bats
index 677b07243..b3e2f9368 100644
--- a/test/system/250-systemd.bats
+++ b/test/system/250-systemd.bats
@@ -3,23 +3,29 @@
# Tests generated configurations for systemd.
#
+# bats file_tags=ci:parallel
+
load helpers
load helpers.systemd
load helpers.network
-SERVICE_NAME="podman_test_$(random_string)"
+SERVICE_NAME=
-UNIT_FILE="$UNIT_DIR/$SERVICE_NAME.service"
-TEMPLATE_FILE="$UNIT_DIR/$SERVICE_NAME@.service"
+UNIT_FILE=
+TEMPLATE_FILE=
function setup() {
skip_if_remote "systemd tests are meaningless over remote"
+ SERVICE_NAME="podman-test-$(safename)"
+ UNIT_FILE="$UNIT_DIR/$SERVICE_NAME.service"
+ TEMPLATE_FILE="$UNIT_DIR/$SERVICE_NAME@.service"
+
basic_setup
}
function teardown() {
- if [[ -e "$UNIT_FILE" ]]; then
+ if [[ -n "$UNIT_FILE" ]] && [[ -e "$UNIT_FILE" ]]; then
run systemctl stop "$SERVICE_NAME"
if [ $status -ne 0 ]; then
echo "# WARNING: systemctl stop failed in teardown: $output" >&3
@@ -32,6 +38,20 @@ function teardown() {
basic_teardown
}
+# Helper to atomically create a systemd unit file from a tmpfile
+#
+# Context:
+# $1 - file created by podman generate systemd; presumed to be in a tmpdir
+# $2 - desired service file path, presumed to be in /run
+#
+# We can't just mv one to the other, because mv is not atomic across
+# filesystems. (We need atomic, to guarantee that there will never
+# be an incomplete .service file). Hence the tmp extension.
+# -Z is because /run and $TMPDIR have different SELinux contexts.
+function mv-safely() {
+ mv -Z "$1" "$2.tmp.$$" && mv -Z "$2.tmp.$$" "$2"
+}
+
# Helper to start a systemd service running a container
function service_setup() {
# January 2024: we can no longer do "run_podman generate systemd" followed
@@ -40,12 +60,12 @@ function service_setup() {
# stdout + stderr, that warning goes to the unit file. (Today's systemd
# is forgiving about that, but RHEL8 systemd chokes with EINVAL)
(
- cd $UNIT_DIR
+ cd $PODMAN_TMPDIR
run_podman generate systemd --files --name \
-e http_proxy -e https_proxy -e no_proxy \
-e HTTP_PROXY -e HTTPS_PROXY -e NO_PROXY \
--new $cname
- mv "container-$cname.service" $UNIT_FILE
+ mv-safely "container-$cname.service" $UNIT_FILE
)
run_podman rm $cname
@@ -92,7 +112,7 @@ function service_cleanup() {
"generate systemd emits warning"
run_podman rm -f $cid
- cname=$(random_string)
+ cname=c-$(safename)
# See #7407 for --pull=always.
run_podman create --pull=always --name $cname --label "io.containers.autoupdate=registry" $IMAGE \
sh -c "trap 'echo Received SIGTERM, finishing; exit' SIGTERM; echo WAITING; while :; do sleep 0.1; done"
@@ -112,10 +132,10 @@ function service_cleanup() {
@test "podman autoupdate local" {
# Note that the entrypoint may be a JSON string which requires preserving the quotes (see #12477)
- cname=$(random_string)
+ cname=c-$(safename)
# Create a scratch image (copy of our regular one)
- image_copy=base$(random_string | tr A-Z a-z)
+ image_copy=base-$(safename)
run_podman tag $IMAGE $image_copy
# Create a container based on that
@@ -140,7 +160,7 @@ function service_cleanup() {
# These tests can fail in dev. environment because of SELinux.
# quick fix: chcon -t container_runtime_exec_t ./bin/podman
@test "podman generate systemd - envar" {
- cname=$(random_string)
+ cname=c-$(safename)
FOO=value BAR=%s run_podman create --name $cname --env FOO -e BAR --env MYVAR=myval \
$IMAGE sh -c 'printenv && echo READY; trap 'exit' SIGTERM; while :; do sleep 0.1; done'
@@ -162,19 +182,19 @@ function service_cleanup() {
# Regression test for #11438
@test "podman generate systemd - restart policy & timeouts" {
- cname=$(random_string)
+ cname=c1-$(safename)
run_podman create --restart=always --name $cname $IMAGE
run_podman generate systemd --new $cname
is "$output" ".*Restart=always.*" "Use container's restart policy if set"
run_podman generate systemd --new --restart-policy=on-failure $cname
is "$output" ".*Restart=on-failure.*" "Override container's restart policy"
- cname2=$(random_string)
+ cname2=c2-$(safename)
run_podman create --restart=unless-stopped --name $cname2 $IMAGE
run_podman generate systemd --new $cname2
is "$output" ".*Restart=always.*" "unless-stopped translated to always"
- cname3=$(random_string)
+ cname3=c3-$(safename)
run_podman create --restart=on-failure:42 --name $cname3 $IMAGE
run_podman generate systemd --new $cname3
is "$output" ".*Restart=on-failure.*" "on-failure:xx is parsed correctly"
@@ -230,14 +250,14 @@ LISTEN_FDNAMES=listen_fdnames" | sort)
}
@test "podman generate - systemd template" {
- cname=$(random_string)
+ cname=c-$(safename)
run_podman create --name $cname $IMAGE top
# See note in service_setup() above re: using --files
(
- cd $UNIT_DIR
+ cd $PODMAN_TMPDIR
run_podman generate systemd --template --files -n $cname
- mv "container-$cname.service" $TEMPLATE_FILE
+ mv-safely "container-$cname.service" $TEMPLATE_FILE
)
run_podman rm -f $cname
@@ -257,8 +277,8 @@ LISTEN_FDNAMES=listen_fdnames" | sort)
}
@test "podman generate - systemd template no support for pod" {
- cname=$(random_string)
- podname=$(random_string)
+ cname=c-$(safename)
+ podname=p-$(safename)
run_podman pod create --name $podname
run_podman run --pod $podname -dt --name $cname $IMAGE top
@@ -267,11 +287,10 @@ LISTEN_FDNAMES=listen_fdnames" | sort)
run_podman rm -f $cname
run_podman pod rm -f $podname
- run_podman rmi $(pause_image)
}
@test "podman generate - systemd template only used on --new" {
- cname=$(random_string)
+ cname=c-$(safename)
run_podman create --name $cname $IMAGE top
run_podman 125 generate systemd --new=false --template -n $cname
is "$output" ".*--template cannot be set" "Error message should be '--template requires --new'"
@@ -284,11 +303,12 @@ LISTEN_FDNAMES=listen_fdnames" | sort)
}
@test "podman --systemd sets container_uuid" {
- run_podman run --systemd=always --name test $IMAGE printenv container_uuid
+ cname=c-$(safename)
+ run_podman run --systemd=always --name $cname $IMAGE printenv container_uuid
container_uuid=$output
- run_podman inspect test --format '{{ .ID }}'
+ run_podman inspect $cname --format '{{ .ID }}'
is "${container_uuid}" "${output:0:32}" "UUID should be first 32 chars of Container id"
- run_podman rm test
+ run_podman rm $cname
}
@test "podman --systemd fails on cgroup v1 with a private cgroupns" {
@@ -302,8 +322,8 @@ LISTEN_FDNAMES=listen_fdnames" | sort)
@test "podman rootless-netns processes should be in different cgroup" {
is_rootless || skip "only meaningful for rootless"
- cname=$(random_string)
- local netname=testnet-$(random_string 10)
+ cname=c-$(safename)
+ local netname=testnet-$(safename)
# create network and container with network
run_podman network create $netname
@@ -313,7 +333,7 @@ LISTEN_FDNAMES=listen_fdnames" | sort)
service_setup
# run second container with network
- cname2=$(random_string)
+ cname2=c2-$(safename)
run_podman run -d --name $cname2 --network $netname $IMAGE top
# stop systemd container
@@ -332,7 +352,7 @@ LISTEN_FDNAMES=listen_fdnames" | sort)
}
@test "podman create --health-on-failure=kill" {
- cname=c_$(random_string)
+ cname=c-$(safename)
run_podman create --name $cname \
--health-cmd /home/podman/healthcheck \
--health-on-failure=kill \
@@ -387,30 +407,33 @@ LISTEN_FDNAMES=listen_fdnames" | sort)
install_kube_template
# Create the YAMl file
yaml_source="$PODMAN_TMPDIR/test.yaml"
+ podname=p-$(safename)
+ c1=c1-$(safename)
+ c2=c2-$(safename)
cat >$yaml_source <<EOF
apiVersion: v1
kind: Pod
metadata:
annotations:
io.containers.autoupdate: "local"
- io.containers.autoupdate/b: "registry"
+ io.containers.autoupdate/$c2: "registry"
labels:
app: test
- name: test_pod
+ name: $podname
spec:
containers:
- command:
- sh
- -c
- - echo a stdout; echo a stderr 1>&2; trap 'exit' SIGTERM; while :; do sleep 0.1; done
+ - echo c1 stdout; echo c1 stderr 1>&2; trap 'exit' SIGTERM; while :; do sleep 0.1; done
image: $IMAGE
- name: a
+ name: $c1
- command:
- sh
- -c
- - echo b stdout; echo b stderr 1>&2; trap 'exit' SIGTERM; while :; do sleep 0.1; done
+ - echo c2 stdout; echo c2 stderr 1>&2; trap 'exit' SIGTERM; while :; do sleep 0.1; done
image: $IMAGE
- name: b
+ name: $c2
EOF
# Dispatch the YAML file
@@ -436,29 +459,29 @@ EOF
is "$output" "Error: container .* is the service container of pod(s) .* and cannot be removed without removing the pod(s)"
# containers/podman/issues/17482: verify that the log-driver for the Pod's containers is NOT passthrough
- for name in "a" "b"; do
- run_podman container inspect test_pod-${name} --format "{{.HostConfig.LogConfig.Type}}"
+ for name in "c1" "c2"; do
+ run_podman container inspect ${podname}-${!name} --format "{{.HostConfig.LogConfig.Type}}"
assert $output != "passthrough"
# check that we can get the logs with passthrough when we run in a systemd unit
- run_podman logs test_pod-$name
+ run_podman logs ${podname}-${!name}
assert "$output" == "$name stdout
$name stderr" "logs work with passthrough"
done
# we cannot assume the ordering between a b, this depends on timing and would flake in CI
# use --names so we do not have to get the ID
- run_podman pod logs --names test_pod
- assert "$output" =~ ".*^test_pod-a a stdout.*" "logs from container a shown"
- assert "$output" =~ ".*^test_pod-b b stdout.*" "logs from container b shown"
+ run_podman pod logs --names $podname
+ assert "$output" =~ ".*^${podname}-${c1} c1 stdout.*" "logs from container 1 shown"
+ assert "$output" =~ ".*^${podname}-${c2} c2 stdout.*" "logs from container 2 shown"
# Add a simple `auto-update --dry-run` test here to avoid too much redundancy
# with 255-auto-update.bats
run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"
- is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,false,local.*" "global auto-update policy gets applied"
- is "$output" ".*$service_name,.* (test_pod-b),$IMAGE,false,registry.*" "container-specified auto-update policy gets applied"
+ is "$output" ".*$service_name,.* (${podname}-${c1}),$IMAGE,false,local.*" "global auto-update policy gets applied"
+ is "$output" ".*$service_name,.* (${podname}-${c2}),$IMAGE,false,registry.*" "container-specified auto-update policy gets applied"
# Kill the pod and make sure the service is not running.
- run_podman pod kill test_pod
+ run_podman pod kill $podname
for i in {0..20}; do
# echos are for debugging test flakes
echo "$_LOG_PROMPT systemctl is-active $service_name"
@@ -481,26 +504,25 @@ $name stderr" "logs work with passthrough"
# Clean up
systemctl stop $service_name
run_podman 1 container exists $service_container
- run_podman 1 pod exists test_pod
- run_podman rmi $(pause_image)
- run_podman network rm podman-default-kube-network
- rm -f $UNIT_DIR/$unit_name
+ run_podman 1 pod exists $podname
}
@test "podman generate - systemd - DEPRECATED" {
run_podman generate systemd --help
is "$output" ".*[DEPRECATED] command:"
is "$output" ".*\[DEPRECATED\] Generate systemd units.*"
- run_podman create --name test $IMAGE
- run_podman generate systemd test >/dev/null
+
+ cname=c-$(safename)
+ run_podman create --name $cname $IMAGE
+ run_podman generate systemd $cname >/dev/null
is "$output" ".*[DEPRECATED] command:"
run_podman generate --help
is "$output" ".*\[DEPRECATED\] Generate systemd units"
- run_podman rm test
+ run_podman rm $cname
}
@test "podman passes down the KillSignal and StopTimeout setting" {
- ctr=systemd_test_$(random_string 5)
+ ctr=systemd_test_$(safename)
run_podman run -d --name $ctr --stop-signal 5 --stop-timeout 7 --rm $IMAGE top
run_podman inspect $ctr --format '{{ .Id }}'
diff --git a/test/system/252-quadlet.bats b/test/system/252-quadlet.bats
index 3ccf97d26..b282cbd0e 100644
--- a/test/system/252-quadlet.bats
+++ b/test/system/252-quadlet.bats
@@ -221,6 +221,8 @@ EOF
}
@test "quadlet conflict names" {
+ skip "FIXME: #24047, temporary skip because this is an intense flake"
+
# If two directories in the search have files with the same name, quadlet should
# only process the first name
dir1=$PODMAN_TMPDIR/$(random_string)
diff --git a/test/system/505-networking-pasta.bats b/test/system/505-networking-pasta.bats
index f32e3e427..9a0053378 100644
--- a/test/system/505-networking-pasta.bats
+++ b/test/system/505-networking-pasta.bats
@@ -169,10 +169,6 @@ function pasta_test_do() {
# socat options for first <address> in server ("LISTEN" address types),
local bind="${proto_upper}${ip_ver}-LISTEN:\${port}"
- # For IPv6 via tap, we can pick either link-local or global unicast
- if [ ${ip_ver} -eq 4 ] || [ ${iftype} = "loopback" ]; then
- bind="${bind},bind=[${addr}]"
- fi
if [ "${proto}" = "udp" ]; then
bind="${bind},null-eof"
fi
@@ -434,43 +430,52 @@ function pasta_test_do() {
### DNS ########################################################################
-@test "External resolver, IPv4" {
- skip_if_no_ipv4 "IPv4 not routable on the host"
-
- run_podman '?' run --rm --net=pasta $IMAGE nslookup 127.0.0.1
-
- assert "$output" =~ "1.0.0.127.in-addr.arpa" \
- "127.0.0.1 not resolved"
-}
-
-@test "External resolver, IPv6" {
- skip_if_no_ipv6 "IPv6 not routable on the host"
-
- run_podman '?' run --rm --net=pasta $IMAGE nslookup ::1
-
- assert "$output" =~ "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa" \
- "::1 not resolved"
+@test "Basic nameserver lookup" {
+ run_podman run --rm --net=pasta $IMAGE nslookup l.root-servers.net
}
-@test "Local forwarder, IPv4" {
+@test "Default nameserver forwarding" {
skip_if_no_ipv4 "IPv4 not routable on the host"
# pasta is the default now so no need to set it
run_podman run --rm $IMAGE grep nameserver /etc/resolv.conf
assert "${lines[0]}" == "nameserver 169.254.1.1" "default dns forward server"
+}
+
+@test "Custom DNS forward address, IPv4" {
+ skip_if_no_ipv4 "IPv4 not routable on the host"
+
+ local addr=198.51.100.1
- run_podman run --rm --net=pasta:--dns-forward,198.51.100.1 \
- $IMAGE nslookup 127.0.0.1 || :
- assert "$output" =~ "1.0.0.127.in-addr.arpa" "No answer from resolver"
+ run_podman run --rm --net=pasta:--dns-forward,$addr \
+ $IMAGE grep nameserver /etc/resolv.conf
+ assert "${lines[0]}" == "nameserver $addr" "custom dns forward server"
+
+ run_podman run --rm --net=pasta:--dns-forward,$addr \
+ $IMAGE nslookup l.root-servers.net $addr
}
-@test "Local forwarder, IPv6" {
+@test "Custom DNS forward address, IPv6" {
skip_if_no_ipv6 "IPv6 not routable on the host"
- # TODO: Two issues here:
+ # TODO: In fact, this requires not just IPv6 connectivity on the
+ # host, but an IPv6 reachable nameserver which is harder to
+ # test for. We could remove that requirement if pasta could
+ # forward between IPv4 and IPv6 addresses but as of
+ # 2024_09_06.6b38f07 that's unsupported. Skip the test for
+ # now.
skip "Currently unsupported"
- # run_podman run --dns 2001:db8::1 \
- # --net=pasta:--dns-forward,2001:db8::1 $IMAGE nslookup ::1
+ # local addr=2001:db8::1
+ #
+ # run_podman run --rm --net=pasta:--dns-forward,$addr \
+ # $IMAGE grep nameserver /etc/resolv.conf
+ # assert "${lines[0]}" == "nameserver $addr" "custom dns forward server"
+ # run_podman run --rm --net=pasta:--dns-forward,$addr \
+ # $IMAGE nslookup l.root-servers.net $addr
+ #
+ # TODO: In addition to the IPv6 nameserver requirement above,
+ # there seem to be two problems running this test. It's
+ # unclear if those are in busybox, musl or pasta.
#
# 1. With this, Podman writes "nameserver 2001:db8::1" to
# /etc/resolv.conf, without zone, and the query originates from ::1.
diff --git a/test/system/550-pause-process.bats b/test/system/550-pause-process.bats
index cd3860930..69da5a1d8 100644
--- a/test/system/550-pause-process.bats
+++ b/test/system/550-pause-process.bats
@@ -149,3 +149,22 @@ function _check_pause_process() {
run_podman rm -f -t0 $cname1
}
+
+# regression test for https://issues.redhat.com/browse/RHEL-59620
+@test "rootless userns can unmount netns properly" {
+ skip_if_not_rootless "pause process is only used as rootless"
+ skip_if_remote "system migrate not supported via remote"
+
+ # Use podman system migrate to stop the currently running pause process
+ run_podman system migrate
+
+ # First run a container with a custom userns as this uses different netns setup logic.
+ local cname=c-$(safename)
+ run_podman run --userns keep-id --name $cname -d $IMAGE sleep 100
+
+ # Now run a "normal" container without userns
+ run_podman run --rm $IMAGE true
+
+ # This used to hang trying to unmount the netns.
+ run_podman rm -f -t0 $cname
+}
diff --git a/test/system/610-format.bats b/test/system/610-format.bats
index c0a8417a4..a0d7e119d 100644
--- a/test/system/610-format.bats
+++ b/test/system/610-format.bats
@@ -130,6 +130,14 @@ function check_subcommand() {
run_podman pod create $podname
run_podman secret create $secretname /etc/hosts
+ # For 'search' and 'image search': if local cache registry is available,
+ # use it. This bypasses quay, and thus prevents flakes.
+ searchargs=$IMAGE
+ if [[ -n "$CI_USE_REGISTRY_CACHE" ]]; then
+ # FIXME: someday: find a way to refactor the hardcoded port
+ searchargs="--tls-verify=false 127.0.0.1:60333/we/dontactuallyneed:arealimage"
+ fi
+
# Most commands can't just be run with --format; they need an argument or
# option. This table defines what those are.
extra_args_table="
@@ -145,8 +153,8 @@ secret inspect | $secretname
network inspect | podman
ps | -a
-image search | $IMAGE
-search | $IMAGE
+image search | $searchargs
+search | $searchargs
pod inspect | $podname
diff --git a/test/system/700-play.bats b/test/system/700-play.bats
index 7fe6d18db..ce5011bd8 100644
--- a/test/system/700-play.bats
+++ b/test/system/700-play.bats
@@ -7,21 +7,6 @@ load helpers
load helpers.network
load helpers.registry
-# This is a long ugly way to clean up pods and remove the pause image
-function teardown() {
- run_podman pod rm -t 0 -f -a
- run_podman rm -t 0 -f -a
- run_podman image list --format '{{.ID}} {{.Repository}}'
- while read id name; do
- if [[ "$name" =~ /podman-pause ]]; then
- run_podman rmi $id
- fi
- done <<<"$output"
- run_podman network rm -f podman-default-kube-network
-
- basic_teardown
-}
-
# helper function: writes a yaml file with customizable values
function _write_test_yaml() {
# All of these are available to our caller
@@ -120,6 +105,7 @@ EOF
RELABEL="system_u:object_r:container_file_t:s0"
+# bats test_tags=ci:parallel
@test "podman kube with stdin" {
TESTDIR=$PODMAN_TMPDIR/testdir
mkdir -p $TESTDIR
@@ -140,6 +126,7 @@ RELABEL="system_u:object_r:container_file_t:s0"
run_podman pod rm -t 0 -f $PODNAME
}
+# bats test_tags=ci:parallel
@test "podman play" {
# Testing that the "podman play" cmd still works now that
# "podman kube" is an option.
@@ -161,6 +148,7 @@ RELABEL="system_u:object_r:container_file_t:s0"
run_podman pod rm -t 0 -f $PODNAME
}
+# bats test_tags=ci:parallel
@test "podman play --service-container" {
skip_if_remote "service containers only work locally"
@@ -226,11 +214,14 @@ RELABEL="system_u:object_r:container_file_t:s0"
run_podman pod kill $PODNAME
_ensure_container_running $service_container false
+ run_podman network ls
+
# Remove the pod and make sure the service is removed along with it
run_podman pod rm $PODNAME
run_podman 1 container exists $service_container
}
+# bats test_tags=ci:parallel
@test "podman kube --network" {
_write_test_yaml command=top
@@ -262,6 +253,7 @@ RELABEL="system_u:object_r:container_file_t:s0"
run_podman 1 container exists $PODCTRNAME
}
+# bats test_tags=ci:parallel
@test "podman kube play read-only" {
YAML=$PODMAN_TMPDIR/test.yml
@@ -300,6 +292,7 @@ RELABEL="system_u:object_r:container_file_t:s0"
run_podman 1 container exists ${podname}-${c3name}
}
+# bats test_tags=ci:parallel
@test "podman kube play read-only from containers.conf" {
containersconf=$PODMAN_TMPDIR/containers.conf
cat >$containersconf <<EOF
@@ -351,6 +344,7 @@ EOF
run_podman 1 container exists ${podname}-${c3name}
}
+# bats test_tags=ci:parallel
@test "podman play with user from image" {
imgname="i-$(safename)"
_write_test_yaml command=id image=$imgname
@@ -362,7 +356,8 @@ _EOF
# Unset the PATH during build and make sure that all default env variables
# are correctly set for the created container.
- run_podman build --unsetenv PATH -t $imgname $PODMAN_TMPDIR
+ # --layers=false needed to work around buildah#5674 parallel flake
+ run_podman build --layers=false --unsetenv PATH -t $imgname $PODMAN_TMPDIR
run_podman image inspect $imgname --format "{{.Config.Env}}"
is "$output" "\[\]" "image does not set PATH - env is empty"
@@ -377,6 +372,8 @@ _EOF
run_podman rmi -f $imgname
}
+# CANNOT BE PARALLELIZED (YET): buildah#5674, parallel builds fail
+# ...workaround is --layers=false, but there's no way to do that in kube
@test "podman play --build --context-dir" {
skip_if_remote "--build is not supported in context remote"
@@ -412,6 +409,7 @@ _EOF
# podman play kube --replace to fail. This tests created a conflicting
# storage container name using buildah to make sure --replace, still
# functions properly by removing the storage container.
+# bats test_tags=ci:parallel
@test "podman kube play --replace external storage" {
_write_test_yaml command="top"
@@ -440,6 +438,7 @@ _EOF
run_podman pod rm -t 0 -f $PODNAME
}
+# bats test_tags=ci:parallel
@test "podman kube --annotation" {
_write_test_yaml command=/home/podman/pause
@@ -458,6 +457,7 @@ _EOF
run_podman pod rm -t 0 -f $PODNAME
}
+# bats test_tags=ci:parallel
@test "podman play Yaml deprecated --no-trunc annotation" {
skip "FIXME: I can't figure out what this test is supposed to do"
RANDOMSTRING=$(random_string 65)
@@ -466,6 +466,7 @@ _EOF
run_podman play kube --no-trunc - < $TESTYAML
}
+# bats test_tags=ci:parallel
@test "podman kube play - default log driver" {
_write_test_yaml command=top
# Get the default log driver
@@ -482,6 +483,7 @@ _EOF
is "$output" ".*Error: no such object: \"$PODCTRNAME\""
}
+# bats test_tags=ci:parallel
@test "podman kube play - URL" {
_write_test_yaml command=top
@@ -510,6 +512,7 @@ _EOF
run_podman rm -f -t0 $serverctr
}
+# bats test_tags=ci:parallel
@test "podman play with init container" {
_write_test_yaml command=
cat >>$TESTYAML <<EOF
@@ -534,6 +537,7 @@ EOF
run_podman kube down $TESTYAML
}
+# bats test_tags=ci:parallel
@test "podman kube play - hostport" {
HOST_PORT=$(random_free_port)
_write_test_yaml
@@ -551,6 +555,7 @@ EOF
run_podman kube down $TESTYAML
}
+# bats test_tags=ci:parallel
@test "podman kube play - multi-pod YAML" {
skip_if_remote "service containers only work locally"
skip_if_journald_unavailable
@@ -603,6 +608,7 @@ EOF
run_podman kube down $TESTYAML
}
+# bats test_tags=ci:parallel
@test "podman kube generate filetype" {
YAML=$PODMAN_TMPDIR/test.yml
@@ -634,6 +640,7 @@ EOF
}
# kube play --wait=true, where we clear up the created containers, pods, and volumes when a kill or sigterm is triggered
+# bats test_tags=ci:parallel
@test "podman kube play --wait with siginterrupt" {
podname="p-$(safename)"
ctrname="c-$(safename)"
@@ -661,16 +668,28 @@ spec:
PODMAN_TIMEOUT=2 run_podman 124 kube play --wait $fname
local t1=$SECONDS
local delta_t=$((t1 - t0))
- assert $delta_t -le 4 \
- "podman kube play did not get killed within 3 seconds"
+
+ # Expectation (in seconds) of when we should time out. When running
+ # parallel, allow 4 more seconds due to system load
+ local expect=4
+ if [[ -n "$PARALLEL_JOBSLOT" ]]; then
+ expect=$((expect + 4))
+ fi
+ assert $delta_t -le $expect \
+ "podman kube play did not get killed within $expect seconds"
# Make sure we actually got SIGTERM and podman printed its message.
assert "$output" =~ "Cleaning up containers, pods, and volumes" "kube play printed sigterm message"
# there should be no containers running or created
- run_podman ps -aq
- assert "$output" !~ "$(safename)" "No containers created by this test"
+ run_podman ps -a --noheading
+ assert "$output" !~ "$(safename)" "All containers created by this test should be gone"
+
+ # ...nor pods
+ run_podman pod ps --noheading
+ assert "$output" !~ "$(safename)" "All pods created by this test should be gone"
}
+# bats test_tags=ci:parallel
@test "podman kube play --wait - wait for pod to exit" {
podname="p-$(safename)"
ctrname="c-$(safename)"
@@ -703,6 +722,7 @@ spec:
assert "$output" !~ "$(safename)" "No pods created by this test"
}
+# bats test_tags=ci:parallel
@test "podman kube play with configmaps" {
foovalue="foo-$(safename)"
barvalue="bar-$(safename)"
@@ -777,6 +797,7 @@ spec:
run_podman kube down $pod_file
}
+# bats test_tags=ci:parallel
@test "podman kube with --authfile=/tmp/bogus" {
_write_test_yaml
bogus=$PODMAN_TMPDIR/bogus-authfile
@@ -786,6 +807,7 @@ spec:
"$command should fail with not such file"
}
+# bats test_tags=ci:parallel
@test "podman kube play with umask from containers.conf" {
skip_if_remote "remote does not support CONTAINERS_CONF*"
YAML=$PODMAN_TMPDIR/test.yaml
@@ -820,6 +842,7 @@ EOF
run_podman rm $ctr
}
+# bats test_tags=ci:parallel
@test "podman kube generate tmpfs on /tmp" {
_write_test_yaml command=/home/podman/pause
run_podman kube play $TESTYAML
@@ -828,6 +851,7 @@ EOF
run_podman kube down $TESTYAML
}
+# bats test_tags=ci:parallel
@test "podman kube play - pull policy" {
skip_if_remote "pull debug logs only work locally"
@@ -850,12 +874,14 @@ EOF
run_podman rmi $local_image
}
-@test "podman kube play healthcheck should wait initialDelaySeconds before updating status (healthy)" {
- podname="liveness-exec-$(safename)"
- ctrname="liveness-ctr-$(safename)"
+# bats test_tags=ci:parallel
+@test "podman kube play healthcheck should wait initialDelaySeconds before updating status" {
+ for want in healthy unhealthy; do
+ podname="liveness-exec-$(safename)-$want"
+ ctrname="liveness-ctr-$(safename)-$want"
- fname="$PODMAN_TMPDIR/play_kube_healthy_$(random_string 6).yaml"
- echo "
+ fname="$PODMAN_TMPDIR/play_kube_${want}_$(random_string 6).yaml"
+ cat <<EOF >$fname
apiVersion: v1
kind: Pod
metadata:
@@ -871,93 +897,62 @@ spec:
- touch /tmp/healthy && sleep 100
livenessProbe:
exec:
+ # /tmp/healthy will exist in healthy container
+ # /tmp/unhealthy will never exist, and will thus
+ # cause healthcheck failure
command:
- cat
- - /tmp/healthy
+ - /tmp/$want
initialDelaySeconds: 3
failureThreshold: 1
periodSeconds: 1
-" > $fname
-
- run_podman kube play $fname
- ctrName="$podname-$ctrname"
-
- # Keep checking status. For the first 2 seconds it must be 'starting'
- t0=$SECONDS
- while [[ $SECONDS -le $((t0 + 2)) ]]; do
- run_podman inspect $ctrName --format "1-{{.State.Health.Status}}"
- assert "$output" == "1-starting" "Health.Status at $((SECONDS - t0))"
- sleep 0.5
- done
+EOF
- # After 3 seconds it may take another second to go healthy. Wait.
- t0=$SECONDS
- while [[ $SECONDS -le $((t0 + 3)) ]]; do
- run_podman inspect $ctrName --format "2-{{.State.Health.Status}}"
- if [[ "$output" = "2-healthy" ]]; then
- break;
+ run_podman kube play $fname
+ ctrName="$podname-$ctrname"
+
+ # Collect status every half-second until it goes into the desired state.
+ local i=1
+ local full_log=""
+ while [[ $i -lt 15 ]]; do
+ run_podman inspect $ctrName --format "$i-{{.State.Health.Status}}"
+ full_log+=" $output"
+ if [[ "$output" =~ "-$want" ]]; then
+ break
+ fi
+ sleep 0.5
+ i=$((i+1))
+ done
+
+ assert "$full_log" =~ "-$want\$" \
+ "Container got to '$want'"
+ assert "$full_log" =~ "-starting.*-$want" \
+ "Container went from starting to $want"
+
+ if [[ $want == "healthy" ]]; then
+ dontwant="unhealthy"
+ else
+ dontwant="healthy"
fi
- sleep 0.5
- done
- assert $output == "2-healthy" "After 3 seconds"
-
- run_podman kube down $fname
-}
-
-@test "podman kube play healthcheck should wait initialDelaySeconds before updating status (unhealthy)" {
- podname="liveness-exec-$(safename)"
- ctrname="liveness-ctr-$(safename)"
+ assert "$full_log" !~ "-$dontwant" \
+ "Container never goes $dontwant"
+
+ # GAH! Save ten seconds, but in a horrible way.
+ # - 'kube down' does not have a -t0 option.
+ # - Using 'top' in the container, instead of 'sleep 100', results
+ # in very weird failures. Seriously weird.
+ # - 'stop -t0', every once in a while on parallel runs on my
+ # laptop (never yet in CI), barfs with 'container is running or
+ # paused, refusing to clean up, container state improper'
+ # Here's hoping that this will silence the flakes.
+ run_podman '?' stop -t0 $ctrName
- fname="$PODMAN_TMPDIR/play_kube_unhealthy_$(random_string 6).yaml"
- echo "
-apiVersion: v1
-kind: Pod
-metadata:
- labels:
- name: $podname
-spec:
- containers:
- - name: $ctrname
- image: $IMAGE
- args:
- - /bin/sh
- - -c
- - touch /tmp/healthy && sleep 100
- livenessProbe:
- exec:
- command:
- - cat
- - /tmp/randomfile
- initialDelaySeconds: 3
- failureThreshold: 1
- periodSeconds: 1
-" > $fname
-
- run_podman kube play $fname
- ctrName="$podname-$ctrname"
-
- # Keep checking status. For the first 2 seconds it must be 'starting'
- t0=$SECONDS
- while [[ $SECONDS -le $((t0 + 2)) ]]; do
- run_podman inspect $ctrName --format "1-{{.State.Health.Status}}"
- assert "$output" == "1-starting" "Health.Status at $((SECONDS - t0))"
- sleep 0.5
- done
-
- # After 3 seconds it may take another second to go unhealthy. Wait.
- t0=$SECONDS
- while [[ $SECONDS -le $((t0 + 3)) ]]; do
- run_podman inspect $ctrName --format "2-{{.State.Health.Status}}"
- if [[ "$output" = "2-unhealthy" ]]; then
- break;
- fi
- sleep 0.5
+ run_podman kube down $fname
done
- assert $output == "2-unhealthy" "After 3 seconds"
-
- run_podman kube down $fname
}
+# CANNOT BE PARALLELIZED (YET): buildah#5674, parallel builds fail
+# ...workaround is --layers=false, but there's no way to do that in kube
@test "podman play --build private registry" {
skip_if_remote "--build is not supported in context remote"
@@ -1001,6 +996,7 @@ _EOF
run_podman rmi -f $userimage $from_image
}
+# bats test_tags=ci:parallel
@test "podman play with image volume (automount annotation and OCI VolumeSource)" {
imgname1="automount-img1-$(safename)"
imgname2="automount-img2-$(safename)"
@@ -1026,8 +1022,9 @@ VOLUME /test2
VOLUME /test_same
EOF
- run_podman build -t $imgname1 -f $PODMAN_TMPDIR/Containerfile1
- run_podman build -t $imgname2 -f $PODMAN_TMPDIR/Containerfile2
+ # --layers=false needed to work around buildah#5674 parallel flake
+ run_podman build -t $imgname1 --layers=false -f $PODMAN_TMPDIR/Containerfile1
+ run_podman build -t $imgname2 --layers=false -f $PODMAN_TMPDIR/Containerfile2
_write_test_yaml command=top name=$podname ctrname=$ctrname
run_podman kube play --annotation "io.podman.annotations.kube.image.volumes.mount/$ctrname=$imgname1" $TESTYAML
@@ -1141,6 +1138,7 @@ EOF
run_podman rmi $imgname1 $imgname2
}
+# bats test_tags=ci:parallel
@test "podman play with image volume pull policies" {
podname="p-$(safename)"
ctrname="c-$(safename)"
@@ -1234,6 +1232,7 @@ EOF
run_podman rmi $volimg_local $volimg_both
}
+# CANNOT BE PARALLELIZED: userns=auto, rootless, => not enough unused IDs in user namespace
@test "podman kube restore user namespace" {
if ! is_rootless; then
grep -E -q "^containers:" /etc/subuid || skip "no IDs allocated for user 'containers'"
diff --git a/troubleshooting.md b/troubleshooting.md
index 1dd1d2980..f3a8104d9 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -1517,8 +1517,7 @@ The following steps create the user _containers_ and assigns big subuid and subg
1. Create the user _containers_
```
sudo useradd --comment "Helper user to reserve subuids and subgids for Podman" \
- --no-create-home \
- --home-dir / \
+ --no-create-home \
--shell /usr/sbin/nologin \
containers
```
diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore
index 529c3412b..45ad0f1ae 100644
--- a/vendor/dario.cat/mergo/.gitignore
+++ b/vendor/dario.cat/mergo/.gitignore
@@ -13,6 +13,9 @@
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
+# Golang/Intellij
+.idea
+
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
.glide/
diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md
index 7d0cf9f32..0b3c48889 100644
--- a/vendor/dario.cat/mergo/README.md
+++ b/vendor/dario.cat/mergo/README.md
@@ -44,13 +44,21 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
## Status
-It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
+Mergo is stable and frozen, ready for production. Check a short list of the projects using at large scale it [here](https://github.com/imdario/mergo#mergo-in-the-wild).
+
+No new features are accepted. They will be considered for a future v2 that improves the implementation and fixes bugs for corner cases.
### Important notes
#### 1.0.0
-In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`.
+In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. No more v1 versions will be released.
+
+If the vanity URL is causing issues in your project due to a dependency pulling Mergo - it isn't a direct dependency in your project - it is recommended to use [replace](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) to pin the version to the last one with the old import URL:
+
+```
+replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16
+```
#### 0.3.9
@@ -64,55 +72,24 @@ If you were using Mergo before April 6th, 2015, please check your project works
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
-<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
<a href='https://github.com/sponsors/imdario' target='_blank'><img alt="Become my sponsor" src="https://img.shields.io/github/sponsors/imdario?style=for-the-badge" /></a>
### Mergo in the wild
-- [moby/moby](https://github.com/moby/moby)
-- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
-- [vmware/dispatch](https://github.com/vmware/dispatch)
-- [Shopify/themekit](https://github.com/Shopify/themekit)
-- [imdario/zas](https://github.com/imdario/zas)
-- [matcornic/hermes](https://github.com/matcornic/hermes)
-- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
-- [kataras/iris](https://github.com/kataras/iris)
-- [michaelsauter/crane](https://github.com/michaelsauter/crane)
-- [go-task/task](https://github.com/go-task/task)
-- [sensu/uchiwa](https://github.com/sensu/uchiwa)
-- [ory/hydra](https://github.com/ory/hydra)
-- [sisatech/vcli](https://github.com/sisatech/vcli)
-- [dairycart/dairycart](https://github.com/dairycart/dairycart)
-- [projectcalico/felix](https://github.com/projectcalico/felix)
-- [resin-os/balena](https://github.com/resin-os/balena)
-- [go-kivik/kivik](https://github.com/go-kivik/kivik)
-- [Telefonica/govice](https://github.com/Telefonica/govice)
-- [supergiant/supergiant](supergiant/supergiant)
-- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
-- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
-- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
-- [EagerIO/Stout](https://github.com/EagerIO/Stout)
-- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
-- [russross/canvasassignments](https://github.com/russross/canvasassignments)
-- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
-- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
-- [divshot/gitling](https://github.com/divshot/gitling)
-- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
-- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
-- [elwinar/rambler](https://github.com/elwinar/rambler)
-- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
-- [jfbus/impressionist](https://github.com/jfbus/impressionist)
-- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
-- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
-- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
-- [thoas/picfit](https://github.com/thoas/picfit)
-- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
-- [jnuthong/item_search](https://github.com/jnuthong/item_search)
-- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
-- [containerssh/containerssh](https://github.com/containerssh/containerssh)
-- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
-- [tjpnz/structbot](https://github.com/tjpnz/structbot)
+Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including:
+
+* [containerd/containerd](https://github.com/containerd/containerd)
+* [datadog/datadog-agent](https://github.com/datadog/datadog-agent)
+* [docker/cli/](https://github.com/docker/cli/)
+* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
+* [go-micro/go-micro](https://github.com/go-micro/go-micro)
+* [grafana/loki](https://github.com/grafana/loki)
+* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
+* [masterminds/sprig](github.com/Masterminds/sprig)
+* [moby/moby](https://github.com/moby/moby)
+* [slackhq/nebula](https://github.com/slackhq/nebula)
+* [volcano-sh/volcano](https://github.com/volcano-sh/volcano)
## Install
@@ -141,6 +118,39 @@ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
}
```
+If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "dario.cat/mergo"
+)
+
+type Foo struct {
+ A *string
+ B int64
+}
+
+func main() {
+ first := "first"
+ second := "second"
+ src := Foo{
+ A: &first,
+ B: 2,
+ }
+
+ dest := Foo{
+ A: &second,
+ B: 1,
+ }
+
+ mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference)
+}
+```
+
Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
```go
diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go
index b50d5c2a4..759b4f74f 100644
--- a/vendor/dario.cat/mergo/map.go
+++ b/vendor/dario.cat/mergo/map.go
@@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
}
fieldName := field.Name
fieldName = changeInitialCase(fieldName, unicode.ToLower)
- if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) {
+ if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue {
dstMap[fieldName] = src.Field(i).Interface()
}
}
diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go
index 0ef9b2138..fd47c95b2 100644
--- a/vendor/dario.cat/mergo/merge.go
+++ b/vendor/dario.cat/mergo/merge.go
@@ -269,7 +269,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return
}
- } else {
+ } else if src.Elem().Kind() != reflect.Struct {
if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
dst.Set(src)
}
diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go
index 1f4f925fe..5151343ed 100644
--- a/vendor/github.com/containers/common/libimage/copier.go
+++ b/vendor/github.com/containers/common/libimage/copier.go
@@ -60,6 +60,13 @@ type CopyOptions struct {
CertDirPath string
// Force layer compression when copying to a `dir` transport destination.
DirForceCompress bool
+
+ // ImageListSelection is one of CopySystemImage, CopyAllImages, or
+ // CopySpecificImages, to control whether, when the source reference is a list,
+ // copy.Image() copies only an image which matches the current runtime
+ // environment, or all images which match the supplied reference, or only
+ // specific images from the source reference.
+ ImageListSelection copy.ImageListSelection
// Allow contacting registries over HTTP, or HTTPS with failed TLS
// verification. Note that this does not affect other TLS connections.
InsecureSkipTLSVerify types.OptionalBool
@@ -206,13 +213,17 @@ func getDockerAuthConfig(name, passwd, creds, idToken string) (*types.DockerAuth
}
}
+// NewCopier is a simple, exported wrapper for newCopier
+func NewCopier(options *CopyOptions, sc *types.SystemContext) (*copier, error) {
+ return newCopier(options, sc)
+}
+
// newCopier creates a copier. Note that fields in options *may* overwrite the
// counterparts of the specified system context. Please make sure to call
// `(*copier).close()`.
-func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) {
+func newCopier(options *CopyOptions, sc *types.SystemContext) (*copier, error) {
c := copier{extendTimeoutSocket: options.extendTimeoutSocket}
- c.systemContext = r.systemContextCopy()
-
+ c.systemContext = sc
if options.SourceLookupReferenceFunc != nil {
c.sourceLookup = options.SourceLookupReferenceFunc
}
@@ -300,6 +311,7 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) {
c.imageCopyOptions.ProgressInterval = time.Second
}
+ c.imageCopyOptions.ImageListSelection = options.ImageListSelection
c.imageCopyOptions.ForceCompressionFormat = options.ForceCompressionFormat
c.imageCopyOptions.ForceManifestMIMEType = options.ManifestMIMEType
c.imageCopyOptions.SourceCtx = c.systemContext
@@ -325,14 +337,22 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) {
return &c, nil
}
-// close open resources.
-func (c *copier) close() error {
+// newCopier creates a copier. Note that fields in options *may* overwrite the
+// counterparts of the specified system context. Please make sure to call
+// `(*copier).close()`.
+func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) {
+ sc := r.systemContextCopy()
+ return newCopier(options, sc)
+}
+
+// Close open resources.
+func (c *copier) Close() error {
return c.policyContext.Destroy()
}
-// copy the source to the destination. Returns the bytes of the copied
+// Copy the source to the destination. Returns the bytes of the copied
// manifest which may be used for digest computation.
-func (c *copier) copy(ctx context.Context, source, destination types.ImageReference) ([]byte, error) {
+func (c *copier) Copy(ctx context.Context, source, destination types.ImageReference) ([]byte, error) {
logrus.Debugf("Copying source image %s to destination image %s", source.StringWithinTransport(), destination.StringWithinTransport())
// Avoid running out of time when running inside a systemd unit by
diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go
index 552c48eae..a03f28853 100644
--- a/vendor/github.com/containers/common/libimage/import.go
+++ b/vendor/github.com/containers/common/libimage/import.go
@@ -108,9 +108,9 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption
if err != nil {
return "", err
}
- defer c.close()
+ defer c.Close()
- if _, err := c.copy(ctx, srcRef, destRef); err != nil {
+ if _, err := c.Copy(ctx, srcRef, destRef); err != nil {
return "", err
}
diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go
index 1db0cf4df..a23315da3 100644
--- a/vendor/github.com/containers/common/libimage/manifest_list.go
+++ b/vendor/github.com/containers/common/libimage/manifest_list.go
@@ -7,20 +7,28 @@ import (
"errors"
"fmt"
"maps"
+ "os"
+ "path/filepath"
"slices"
"time"
"github.com/containers/common/libimage/define"
"github.com/containers/common/libimage/manifests"
+ manifesterrors "github.com/containers/common/pkg/manifests"
+ "github.com/containers/common/pkg/supplemented"
imageCopy "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
structcopier "github.com/jinzhu/copier"
"github.com/opencontainers/go-digest"
+ imgspec "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
)
// NOTE: the abstractions and APIs here are a first step to further merge
@@ -101,8 +109,157 @@ func (r *Runtime) lookupManifestList(name string) (*Image, manifests.List, error
return image, list, nil
}
+// ConvertToManifestList converts the image into a manifest list if it is not
+// already also a list. An error is returned if the conversion fails.
+func (i *Image) ConvertToManifestList(ctx context.Context) (*ManifestList, error) {
+ // If we don't need to do anything, don't do anything.
+ if list, err := i.ToManifestList(); err == nil || !errors.Is(err, ErrNotAManifestList) {
+ return list, err
+ }
+
+ // Determine which type we prefer for the new manifest list or image index.
+ _, imageManifestType, err := i.Manifest(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("reading the image's manifest: %w", err)
+ }
+ var preferredListType string
+ switch imageManifestType {
+ case manifest.DockerV2Schema2MediaType,
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+ manifest.DockerV2ListMediaType:
+ preferredListType = manifest.DockerV2ListMediaType
+ case imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex:
+ preferredListType = imgspecv1.MediaTypeImageIndex
+ default:
+ preferredListType = ""
+ }
+
+ // Create a list and add the image's manifest to it. Use OCI format
+ // for now. If we need to convert it to Docker format, we'll do that
+ // while copying it.
+ list := manifests.Create()
+ if _, err := list.Add(ctx, &i.runtime.systemContext, i.storageReference, false); err != nil {
+ return nil, fmt.Errorf("generating new image index: %w", err)
+ }
+ listBytes, err := list.Serialize(imgspecv1.MediaTypeImageIndex)
+ if err != nil {
+ return nil, fmt.Errorf("serializing image index: %w", err)
+ }
+ listDigest, err := manifest.Digest(listBytes)
+ if err != nil {
+ return nil, fmt.Errorf("digesting image index: %w", err)
+ }
+
+ // Build an OCI layout containing the image index as the only item.
+ tmp, err := os.MkdirTemp("", "")
+ if err != nil {
+ return nil, fmt.Errorf("serializing initial list: %w", err)
+ }
+ defer os.RemoveAll(tmp)
+
+ // Drop our image index in there.
+ if err := os.Mkdir(filepath.Join(tmp, imgspecv1.ImageBlobsDir), 0o755); err != nil {
+ return nil, fmt.Errorf("creating directory for blobs: %w", err)
+ }
+ if err := os.Mkdir(filepath.Join(tmp, imgspecv1.ImageBlobsDir, listDigest.Algorithm().String()), 0o755); err != nil {
+ return nil, fmt.Errorf("creating directory for %s blobs: %w", listDigest.Algorithm().String(), err)
+ }
+ listFile := filepath.Join(tmp, imgspecv1.ImageBlobsDir, listDigest.Algorithm().String(), listDigest.Encoded())
+ if err := os.WriteFile(listFile, listBytes, 0o644); err != nil {
+ return nil, fmt.Errorf("writing image index for OCI layout: %w", err)
+ }
+
+ // Build the index for the layout.
+ index := imgspecv1.Index{
+ Versioned: imgspec.Versioned{
+ SchemaVersion: 2,
+ },
+ MediaType: imgspecv1.MediaTypeImageIndex,
+ Manifests: []imgspecv1.Descriptor{{
+ MediaType: imgspecv1.MediaTypeImageIndex,
+ Digest: listDigest,
+ Size: int64(len(listBytes)),
+ }},
+ }
+ indexBytes, err := json.Marshal(&index)
+ if err != nil {
+ return nil, fmt.Errorf("encoding image index for OCI layout: %w", err)
+ }
+
+ // Write the index for the layout.
+ indexFile := filepath.Join(tmp, imgspecv1.ImageIndexFile)
+ if err := os.WriteFile(indexFile, indexBytes, 0o644); err != nil {
+ return nil, fmt.Errorf("writing top-level index for OCI layout: %w", err)
+ }
+
+ // Write the "why yes, this is an OCI layout" file.
+ layoutFile := filepath.Join(tmp, imgspecv1.ImageLayoutFile)
+ layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{Version: imgspecv1.ImageLayoutVersion})
+ if err != nil {
+ return nil, fmt.Errorf("encoding image layout structure for OCI layout: %w", err)
+ }
+ if err := os.WriteFile(layoutFile, layoutBytes, 0o644); err != nil {
+ return nil, fmt.Errorf("writing oci-layout file: %w", err)
+ }
+
+ // Build an OCI layout reference to use as a source.
+ tmpRef, err := layout.NewReference(tmp, "")
+ if err != nil {
+ return nil, fmt.Errorf("creating reference to directory: %w", err)
+ }
+ bundle := supplemented.Reference(tmpRef, []types.ImageReference{i.storageReference}, imageCopy.CopySystemImage, nil)
+
+ // Build a policy that ensures we don't prevent ourselves from reading
+ // this reference.
+ signaturePolicy, err := signature.DefaultPolicy(&i.runtime.systemContext)
+ if err != nil {
+ return nil, fmt.Errorf("obtaining default signature policy: %w", err)
+ }
+ acceptAnything := signature.PolicyTransportScopes{
+ "": []signature.PolicyRequirement{signature.NewPRInsecureAcceptAnything()},
+ }
+ signaturePolicy.Transports[i.storageReference.Transport().Name()] = acceptAnything
+ signaturePolicy.Transports[tmpRef.Transport().Name()] = acceptAnything
+ policyContext, err := signature.NewPolicyContext(signaturePolicy)
+ if err != nil {
+ return nil, fmt.Errorf("creating new signature policy context: %w", err)
+ }
+ defer func() {
+ if err2 := policyContext.Destroy(); err2 != nil {
+ logrus.Errorf("Destroying signature policy context: %v", err2)
+ }
+ }()
+
+ // Copy from the OCI layout into the same image record, so that it gets
+ // both its own manifest and the image index.
+ copyOptions := imageCopy.Options{
+ ForceManifestMIMEType: imageManifestType,
+ }
+ if _, err := imageCopy.Image(ctx, policyContext, i.storageReference, bundle, &copyOptions); err != nil {
+ return nil, fmt.Errorf("writing updates to image: %w", err)
+ }
+
+ // Now explicitly write the list's manifest to the image as its "main"
+ // manifest.
+ if _, err := list.SaveToImage(i.runtime.store, i.ID(), i.storageImage.Names, preferredListType); err != nil {
+ return nil, fmt.Errorf("saving image index: %w", err)
+ }
+
+ // Reload the record.
+ if err = i.reload(); err != nil {
+ return nil, fmt.Errorf("reloading image record: %w", err)
+ }
+ mList, err := i.runtime.LookupManifestList(i.storageImage.ID)
+ if err != nil {
+ return nil, fmt.Errorf("looking up new manifest list: %w", err)
+ }
+
+ return mList, nil
+}
+
// ToManifestList converts the image into a manifest list. An error is thrown
-// if the image is no manifest list.
+// if the image is not a manifest list.
func (i *Image) ToManifestList() (*ManifestList, error) {
list, err := i.getManifestList()
if err != nil {
@@ -194,6 +351,9 @@ func (m *ManifestList) reload() error {
// getManifestList is a helper to obtain a manifest list
func (i *Image) getManifestList() (manifests.List, error) {
_, list, err := manifests.LoadFromImage(i.runtime.store, i.ID())
+ if errors.Is(err, manifesterrors.ErrManifestTypeNotSupported) {
+ err = fmt.Errorf("%s: %w", err.Error(), ErrNotAManifestList)
+ }
return list, err
}
@@ -636,7 +796,7 @@ func (m *ManifestList) Push(ctx context.Context, destination string, options *Ma
if err != nil {
return "", err
}
- defer copier.close()
+ defer copier.Close()
pushOptions := manifests.PushOptions{
AddCompression: options.AddCompression,
diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go
index a1625bb1b..50c32569a 100644
--- a/vendor/github.com/containers/common/libimage/manifests/manifests.go
+++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go
@@ -342,8 +342,7 @@ func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, in
}
}
// write the index that refers to this one artifact image
- tag := "latest"
- indexFile := filepath.Join(tmp, "index.json")
+ indexFile := filepath.Join(tmp, v1.ImageIndexFile)
index := v1.Index{
Versioned: imgspec.Versioned{
SchemaVersion: 2,
@@ -353,9 +352,6 @@ func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, in
MediaType: v1.MediaTypeImageManifest,
Digest: artifactManifestDigest,
Size: int64(len(contents)),
- Annotations: map[string]string{
- v1.AnnotationRefName: tag,
- },
}},
}
indexBytes, err := json.Marshal(&index)
@@ -366,12 +362,16 @@ func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, in
return nil, fmt.Errorf("writing image index for OCI layout: %w", err)
}
// write the layout file
- layoutFile := filepath.Join(tmp, "oci-layout")
- if err := os.WriteFile(layoutFile, []byte(`{"imageLayoutVersion": "1.0.0"}`), 0o644); err != nil {
+ layoutFile := filepath.Join(tmp, v1.ImageLayoutFile)
+ layoutBytes, err := json.Marshal(v1.ImageLayout{Version: v1.ImageLayoutVersion})
+ if err != nil {
+ return nil, fmt.Errorf("encoding image layout for OCI layout: %w", err)
+ }
+ if err := os.WriteFile(layoutFile, layoutBytes, 0o644); err != nil {
return nil, fmt.Errorf("writing oci-layout file: %w", err)
}
// build the reference to this artifact image's oci layout
- ref, err := ocilayout.NewReference(tmp, tag)
+ ref, err := ocilayout.NewReference(tmp, "")
if err != nil {
return nil, fmt.Errorf("creating ImageReference for artifact with files %q: %w", symlinkedFiles, err)
}
@@ -676,14 +676,14 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag
// This should provide for all of the ways to construct a manifest outlined in
// https://github.com/opencontainers/image-spec/blob/main/manifest.md#guidelines-for-artifact-usage
-// * no blobs → set ManifestArtifactType
-// * blobs, no configuration → set ManifestArtifactType and possibly LayerMediaType, and provide file names
-// * blobs and configuration → set ManifestArtifactType, possibly LayerMediaType, and ConfigDescriptor, and provide file names
+// - no blobs → set ManifestArtifactType
+// - blobs, no configuration → set ManifestArtifactType and possibly LayerMediaType, and provide file names
+// - blobs and configuration → set ManifestArtifactType, possibly LayerMediaType, and ConfigDescriptor, and provide file names
//
// The older style of describing artifacts:
-// * leave ManifestArtifactType blank
-// * specify a zero-length application/vnd.oci.image.config.v1+json config blob
-// * set LayerMediaType to a custom type
+// - leave ManifestArtifactType blank
+// - specify a zero-length application/vnd.oci.image.config.v1+json config blob
+// - set LayerMediaType to a custom type
//
// When reading data produced elsewhere, note that newer tooling will produce
// manifests with ArtifactType set. If the manifest's ArtifactType is not set,
diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go
index 3db1b2992..c4ad5df0c 100644
--- a/vendor/github.com/containers/common/libimage/pull.go
+++ b/vendor/github.com/containers/common/libimage/pull.go
@@ -235,7 +235,7 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference,
if err != nil {
return nil, err
}
- defer c.close()
+ defer c.Close()
// Figure out a name for the storage destination.
var storageName, imageName string
@@ -321,7 +321,7 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference,
return nil, fmt.Errorf("parsing %q: %w", storageName, err)
}
- _, err = c.copy(ctx, ref, destRef)
+ _, err = c.Copy(ctx, ref, destRef)
return []string{imageName}, err
}
@@ -391,7 +391,7 @@ func (r *Runtime) copyFromDockerArchiveReaderReference(ctx context.Context, read
if err != nil {
return nil, err
}
- defer c.close()
+ defer c.Close()
// Get a slice of storage references we can copy.
references, destNames, err := r.storageReferencesReferencesFromArchiveReader(ctx, readerRef, reader)
@@ -401,7 +401,7 @@ func (r *Runtime) copyFromDockerArchiveReaderReference(ctx context.Context, read
// Now copy all of the images. Use readerRef for performance.
for _, destRef := range references {
- if _, err := c.copy(ctx, readerRef, destRef); err != nil {
+ if _, err := c.Copy(ctx, readerRef, destRef); err != nil {
return nil, err
}
}
@@ -640,7 +640,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
if err != nil {
return nil, err
}
- defer c.close()
+ defer c.Close()
var pullErrors []error
for _, candidate := range resolved.PullCandidates {
@@ -678,7 +678,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
}
}
var manifestBytes []byte
- if manifestBytes, err = c.copy(ctx, srcRef, destRef); err != nil {
+ if manifestBytes, err = c.Copy(ctx, srcRef, destRef); err != nil {
logrus.Debugf("Error pulling candidate %s: %v", candidateString, err)
pullErrors = append(pullErrors, err)
continue
diff --git a/vendor/github.com/containers/common/libimage/push.go b/vendor/github.com/containers/common/libimage/push.go
index f89b8fc07..5db6cfbcf 100644
--- a/vendor/github.com/containers/common/libimage/push.go
+++ b/vendor/github.com/containers/common/libimage/push.go
@@ -114,7 +114,7 @@ func (r *Runtime) Push(ctx context.Context, source, destination string, options
return nil, err
}
- defer c.close()
+ defer c.Close()
- return c.copy(ctx, srcRef, destRef)
+ return c.Copy(ctx, srcRef, destRef)
}
diff --git a/vendor/github.com/containers/common/libimage/save.go b/vendor/github.com/containers/common/libimage/save.go
index 62cad3288..46529d10f 100644
--- a/vendor/github.com/containers/common/libimage/save.go
+++ b/vendor/github.com/containers/common/libimage/save.go
@@ -123,9 +123,9 @@ func (r *Runtime) saveSingleImage(ctx context.Context, name, format, path string
if err != nil {
return err
}
- defer c.close()
+ defer c.Close()
- _, err = c.copy(ctx, srcRef, destRef)
+ _, err = c.Copy(ctx, srcRef, destRef)
return err
}
@@ -208,7 +208,7 @@ func (r *Runtime) saveDockerArchive(ctx context.Context, names []string, path st
if err != nil {
return err
}
- defer c.close()
+ defer c.Close()
destRef, err := writer.NewReference(nil)
if err != nil {
@@ -220,7 +220,7 @@ func (r *Runtime) saveDockerArchive(ctx context.Context, names []string, path st
return err
}
- if _, err := c.copy(ctx, srcRef, destRef); err != nil {
+ if _, err := c.Copy(ctx, srcRef, destRef); err != nil {
return err
}
}
diff --git a/vendor/github.com/containers/common/pkg/netns/netns_linux.go b/vendor/github.com/containers/common/pkg/netns/netns_linux.go
index db35fd15a..5461b05f7 100644
--- a/vendor/github.com/containers/common/pkg/netns/netns_linux.go
+++ b/vendor/github.com/containers/common/pkg/netns/netns_linux.go
@@ -40,6 +40,8 @@ import (
// threadNsPath is the /proc path to the current netns handle for the current thread
const threadNsPath = "/proc/thread-self/ns/net"
+var errNoFreeName = errors.New("failed to find free netns path name")
+
// GetNSRunDir returns the dir of where to create the netNS. When running
// rootless, it needs to be at a location writable by user.
func GetNSRunDir() (string, error) {
@@ -60,14 +62,26 @@ func NewNSAtPath(nsPath string) (ns.NetNS, error) {
// NewNS creates a new persistent (bind-mounted) network namespace and returns
// an object representing that namespace, without switching to it.
func NewNS() (ns.NetNS, error) {
+ nsRunDir, err := GetNSRunDir()
+ if err != nil {
+ return nil, err
+ }
+
+ // Create the directory for mounting network namespaces
+ // This needs to be a shared mountpoint in case it is mounted in to
+ // other namespaces (containers)
+ err = makeNetnsDir(nsRunDir)
+ if err != nil {
+ return nil, err
+ }
+
for range 10000 {
- b := make([]byte, 16)
- _, err := rand.Reader.Read(b)
+ nsName, err := getRandomNetnsName()
if err != nil {
- return nil, fmt.Errorf("failed to generate random netns name: %v", err)
+ return nil, err
}
- nsName := fmt.Sprintf("netns-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
- ns, err := NewNSWithName(nsName)
+ nsPath := path.Join(nsRunDir, nsName)
+ ns, err := newNSPath(nsPath)
if err == nil {
return ns, nil
}
@@ -77,62 +91,128 @@ func NewNS() (ns.NetNS, error) {
}
return nil, err
}
- return nil, errors.New("failed to find free netns path name")
+ return nil, errNoFreeName
}
-// NewNSWithName creates a new persistent (bind-mounted) network namespace and returns
-// an object representing that namespace, without switching to it.
-func NewNSWithName(name string) (ns.NetNS, error) {
+// NewNSFrom creates a persistent (bind-mounted) network namespace from the
+// given netns path, i.e. /proc/<pid>/ns/net, and returns the new full path to
+// the bind mounted file in the netns run dir.
+func NewNSFrom(fromNetns string) (string, error) {
nsRunDir, err := GetNSRunDir()
if err != nil {
- return nil, err
+ return "", err
}
- // Create the directory for mounting network namespaces
- // This needs to be a shared mountpoint in case it is mounted in to
- // other namespaces (containers)
- err = os.MkdirAll(nsRunDir, 0o755)
+ err = makeNetnsDir(nsRunDir)
if err != nil {
- return nil, err
+ return "", err
}
- // Remount the namespace directory shared. This will fail if it is not
- // already a mountpoint, so bind-mount it on to itself to "upgrade" it
- // to a mountpoint.
- err = unix.Mount("", nsRunDir, "none", unix.MS_SHARED|unix.MS_REC, "")
- if err != nil {
- if err != unix.EINVAL {
- return nil, fmt.Errorf("mount --make-rshared %s failed: %q", nsRunDir, err)
+ for range 10000 {
+ nsName, err := getRandomNetnsName()
+ if err != nil {
+ return "", err
}
+ nsPath := filepath.Join(nsRunDir, nsName)
- // Recursively remount /run/netns on itself. The recursive flag is
- // so that any existing netns bindmounts are carried over.
- err = unix.Mount(nsRunDir, nsRunDir, "none", unix.MS_BIND|unix.MS_REC, "")
+ // create an empty file to use as at the mount point
+ err = createNetnsFile(nsPath)
if err != nil {
- return nil, fmt.Errorf("mount --rbind %s %s failed: %q", nsRunDir, nsRunDir, err)
+ // retry when the name already exists
+ if errors.Is(err, os.ErrExist) {
+ continue
+ }
+ return "", err
}
- // Now we can make it shared
- err = unix.Mount("", nsRunDir, "none", unix.MS_SHARED|unix.MS_REC, "")
+ err = unix.Mount(fromNetns, nsPath, "none", unix.MS_BIND|unix.MS_SHARED|unix.MS_REC, "")
if err != nil {
- return nil, fmt.Errorf("mount --make-rshared %s failed: %q", nsRunDir, err)
+ // Do not leak the ns on errors
+ _ = os.RemoveAll(nsPath)
+ return "", fmt.Errorf("failed to bind mount ns at %s: %v", nsPath, err)
}
+ return nsPath, nil
}
- nsPath := path.Join(nsRunDir, name)
- return newNSPath(nsPath)
+ return "", errNoFreeName
}
-func newNSPath(nsPath string) (ns.NetNS, error) {
- // create an empty file at the mount point
- mountPointFd, err := os.OpenFile(nsPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600)
+func getRandomNetnsName() (string, error) {
+ b := make([]byte, 16)
+ _, err := rand.Reader.Read(b)
if err != nil {
- return nil, err
+ return "", fmt.Errorf("failed to generate random netns name: %v", err)
}
- if err := mountPointFd.Close(); err != nil {
- return nil, err
+ return fmt.Sprintf("netns-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]), nil
+}
+
+func makeNetnsDir(nsRunDir string) error {
+ err := os.MkdirAll(nsRunDir, 0o755)
+ if err != nil {
+ return err
+ }
+ // Important, the bind mount setup is racy if two process try to set it up in parallel.
+ // This can have very bad consequences because we end up with two duplicated mounts
+ // for the netns file that then might have a different parent mounts.
+ // Also because as root netns dir is also created by ip netns we should not race against them.
+ // Use a lock on the netns dir like they do, compare the iproute2 ip netns add code.
+ // https://github.com/iproute2/iproute2/blob/8b9d9ea42759c91d950356ca43930a975d0c352b/ip/ipnetns.c#L806-L815
+
+ dirFD, err := unix.Open(nsRunDir, unix.O_RDONLY|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return &os.PathError{Op: "open", Path: nsRunDir, Err: err}
+ }
+ // closing the fd will also unlock so we do not have to call flock(fd,LOCK_UN)
+ defer unix.Close(dirFD)
+
+ err = unix.Flock(dirFD, unix.LOCK_EX)
+ if err != nil {
+ return fmt.Errorf("failed to lock %s dir: %w", nsRunDir, err)
+ }
+
+ // Remount the namespace directory shared. This will fail with EINVAL
+ // if it is not already a mountpoint, so bind-mount it on to itself
+ // to "upgrade" it to a mountpoint.
+ err = unix.Mount("", nsRunDir, "none", unix.MS_SHARED|unix.MS_REC, "")
+ if err == nil {
+ return nil
+ }
+ if err != unix.EINVAL {
+ return fmt.Errorf("mount --make-rshared %s failed: %q", nsRunDir, err)
+ }
+
+ // Recursively remount /run/netns on itself. The recursive flag is
+ // so that any existing netns bindmounts are carried over.
+ err = unix.Mount(nsRunDir, nsRunDir, "none", unix.MS_BIND|unix.MS_REC, "")
+ if err != nil {
+ return fmt.Errorf("mount --rbind %s %s failed: %q", nsRunDir, nsRunDir, err)
}
+ // Now we can make it shared
+ err = unix.Mount("", nsRunDir, "none", unix.MS_SHARED|unix.MS_REC, "")
+ if err != nil {
+ return fmt.Errorf("mount --make-rshared %s failed: %q", nsRunDir, err)
+ }
+
+ return nil
+}
+
+// createNetnsFile created the file with O_EXCL to ensure there are no conflicts with others
+// Callers should check for ErrExist and loop over it to find a free file.
+func createNetnsFile(path string) error {
+ mountPointFd, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600)
+ if err != nil {
+ return err
+ }
+ return mountPointFd.Close()
+}
+
+func newNSPath(nsPath string) (ns.NetNS, error) {
+ // create an empty file to use as at the mount point
+ err := createNetnsFile(nsPath)
+ if err != nil {
+ return nil, err
+ }
// Ensure the mount point is cleaned up on errors; if the namespace
// was successfully mounted this will have no effect because the file
// is in-use
diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go
index 081c49312..fb5e1b174 100644
--- a/vendor/github.com/containers/image/v5/copy/compression.go
+++ b/vendor/github.com/containers/image/v5/copy/compression.go
@@ -52,6 +52,16 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
}
stream.reader = reader
+ if decompressor != nil && format.Name() == compressiontypes.ZstdAlgorithmName {
+ tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
+ if err != nil {
+ return bpDetectCompressionStepData{}, err
+ }
+ if tocDigest != nil {
+ format = compression.ZstdChunked
+ }
+
+ }
res := bpDetectCompressionStepData{
isCompressed: decompressor != nil,
format: format,
@@ -71,13 +81,14 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
type bpCompressionStepData struct {
- operation bpcOperation // What we are actually doing
- uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
- uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
- uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
- srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob.
- uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob.
- closers []io.Closer // Objects to close after the upload is done, if any.
+ operation bpcOperation // What we are actually doing
+ uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
+ uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
+ uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
+ srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob.
+ uploadedCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the uploaded blob.
+ uploadedCompressorSpecificVariantName string // Compressor specific variant name to record in the blob info cache for the uploaded blob.
+ closers []io.Closer // Objects to close after the upload is done, if any.
}
type bpcOperation int
@@ -129,11 +140,12 @@ func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectComp
// We can’t do anything with an encrypted blob unless decrypted.
logrus.Debugf("Using original blob without modification for encrypted blob")
return &bpCompressionStepData{
- operation: bpcOpPreserveOpaque,
- uploadedOperation: types.PreserveOriginal,
- uploadedAlgorithm: nil,
- srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
- uploadedCompressorName: internalblobinfocache.UnknownCompression,
+ operation: bpcOpPreserveOpaque,
+ uploadedOperation: types.PreserveOriginal,
+ uploadedAlgorithm: nil,
+ srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
+ uploadedCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
+ uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
}, nil
}
return nil, nil
@@ -157,14 +169,19 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
Digest: "",
Size: -1,
}
+ specificVariantName := uploadedAlgorithm.Name()
+ if specificVariantName == uploadedAlgorithm.BaseVariantName() {
+ specificVariantName = internalblobinfocache.UnknownCompression
+ }
return &bpCompressionStepData{
- operation: bpcOpCompressUncompressed,
- uploadedOperation: types.Compress,
- uploadedAlgorithm: uploadedAlgorithm,
- uploadedAnnotations: annotations,
- srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
- uploadedCompressorName: uploadedAlgorithm.Name(),
- closers: []io.Closer{reader},
+ operation: bpcOpCompressUncompressed,
+ uploadedOperation: types.Compress,
+ uploadedAlgorithm: uploadedAlgorithm,
+ uploadedAnnotations: annotations,
+ srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
+ uploadedCompressorBaseVariantName: uploadedAlgorithm.BaseVariantName(),
+ uploadedCompressorSpecificVariantName: specificVariantName,
+ closers: []io.Closer{reader},
}, nil
}
return nil, nil
@@ -197,15 +214,20 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp
Digest: "",
Size: -1,
}
+ specificVariantName := ic.compressionFormat.Name()
+ if specificVariantName == ic.compressionFormat.BaseVariantName() {
+ specificVariantName = internalblobinfocache.UnknownCompression
+ }
succeeded = true
return &bpCompressionStepData{
- operation: bpcOpRecompressCompressed,
- uploadedOperation: types.PreserveOriginal,
- uploadedAlgorithm: ic.compressionFormat,
- uploadedAnnotations: annotations,
- srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
- uploadedCompressorName: ic.compressionFormat.Name(),
- closers: []io.Closer{decompressed, recompressed},
+ operation: bpcOpRecompressCompressed,
+ uploadedOperation: types.PreserveOriginal,
+ uploadedAlgorithm: ic.compressionFormat,
+ uploadedAnnotations: annotations,
+ srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
+ uploadedCompressorBaseVariantName: ic.compressionFormat.BaseVariantName(),
+ uploadedCompressorSpecificVariantName: specificVariantName,
+ closers: []io.Closer{decompressed, recompressed},
}, nil
}
return nil, nil
@@ -226,12 +248,13 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp
Size: -1,
}
return &bpCompressionStepData{
- operation: bpcOpDecompressCompressed,
- uploadedOperation: types.Decompress,
- uploadedAlgorithm: nil,
- srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
- uploadedCompressorName: internalblobinfocache.Uncompressed,
- closers: []io.Closer{s},
+ operation: bpcOpDecompressCompressed,
+ uploadedOperation: types.Decompress,
+ uploadedAlgorithm: nil,
+ srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
+ uploadedCompressorBaseVariantName: internalblobinfocache.Uncompressed,
+ uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
+ closers: []io.Closer{s},
}, nil
}
return nil, nil
@@ -276,7 +299,8 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom
// We only record the base variant of the format on upload; we didn’t do anything with
// the TOC, we don’t know whether it matches the blob digest, so we don’t want to trigger
// reuse of any kind between the blob digest and the TOC digest.
- uploadedCompressorName: detected.srcCompressorBaseVariantName,
+ uploadedCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
+ uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
}
}
@@ -336,24 +360,16 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
}
}
- if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorName == "" {
- return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded: %q)",
- d.srcCompressorBaseVariantName, d.uploadedCompressorName)
+ if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorBaseVariantName == "" || d.uploadedCompressorSpecificVariantName == "" {
+ return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded base: %q, uploaded specific: %q)",
+ d.srcCompressorBaseVariantName, d.uploadedCompressorBaseVariantName, d.uploadedCompressorSpecificVariantName)
}
- if d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
- if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
- // HACK: Don’t record zstd:chunked algorithms.
- // There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions,
- // and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless.
- //
- // We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate
- // between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName
- // with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about
- // inconsistent data to be logged.
- c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{
- BaseVariantCompressor: d.uploadedCompressorName,
- })
- }
+ if d.uploadedCompressorBaseVariantName != internalblobinfocache.UnknownCompression {
+ c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{
+ BaseVariantCompressor: d.uploadedCompressorBaseVariantName,
+ SpecificVariantCompressor: d.uploadedCompressorSpecificVariantName,
+ SpecificVariantAnnotations: d.uploadedAnnotations,
+ })
}
if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
d.srcCompressorBaseVariantName != internalblobinfocache.UnknownCompression {
@@ -361,7 +377,9 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
// blob as is, or perhaps decompressed it; either way we don’t trust the TOC digest,
// so record neither the variant name, nor the TOC digest.
c.blobInfoCache.RecordDigestCompressorData(srcInfo.Digest, internalblobinfocache.DigestCompressorData{
- BaseVariantCompressor: d.srcCompressorBaseVariantName,
+ BaseVariantCompressor: d.srcCompressorBaseVariantName,
+ SpecificVariantCompressor: internalblobinfocache.UnknownCompression,
+ SpecificVariantAnnotations: nil,
})
}
return nil
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index 996a4e2d7..867ba73c7 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -193,35 +193,33 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
reportWriter = options.ReportWriter
}
+ // safeClose amends retErr with an error from c.Close(), if any.
+ safeClose := func(name string, c io.Closer) {
+ err := c.Close()
+ if err == nil {
+ return
+ }
+ // Do not use %w for err as we don't want it to be unwrapped by callers.
+ if retErr != nil {
+ retErr = fmt.Errorf(" (%s: %s): %w", name, err.Error(), retErr)
+ } else {
+ retErr = fmt.Errorf(" (%s: %s)", name, err.Error())
+ }
+ }
+
publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
if err != nil {
return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err)
}
dest := imagedestination.FromPublic(publicDest)
- defer func() {
- if err := dest.Close(); err != nil {
- if retErr != nil {
- retErr = fmt.Errorf(" (dest: %v): %w", err, retErr)
- } else {
- retErr = fmt.Errorf(" (dest: %v)", err)
- }
- }
- }()
+ defer safeClose("dest", dest)
publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
if err != nil {
return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err)
}
rawSource := imagesource.FromPublic(publicRawSource)
- defer func() {
- if err := rawSource.Close(); err != nil {
- if retErr != nil {
- retErr = fmt.Errorf(" (src: %v): %w", err, retErr)
- } else {
- retErr = fmt.Errorf(" (src: %v)", err)
- }
- }
- }()
+ defer safeClose("src", rawSource)
// If reportWriter is not a TTY (e.g., when piping to a file), do not
// print the progress bars to avoid long and hard to parse output.
diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go
index 08128ce8d..59f41d216 100644
--- a/vendor/github.com/containers/image/v5/copy/progress_bars.go
+++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go
@@ -24,13 +24,18 @@ func (c *copier) newProgressPool() *mpb.Progress {
// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar
func customPartialBlobDecorFunc(s decor.Statistics) string {
+ current := decor.SizeB1024(s.Current)
+ total := decor.SizeB1024(s.Total)
+ refill := decor.SizeB1024(s.Refill)
if s.Total == 0 {
- pairFmt := "%.1f / %.1f (skipped: %.1f)"
- return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
+ return fmt.Sprintf("%.1f / %.1f (skipped: %.1f)", current, total, refill)
+ }
+ // If we didn't do a partial fetch then let's not output a distracting ("skipped: 0.0b = 0.00%")
+ if s.Refill == 0 {
+ return fmt.Sprintf("%.1f / %.1f", current, total)
}
- pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
percentage := 100.0 * float64(s.Refill) / float64(s.Total)
- return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
+ return fmt.Sprintf("%.1f / %.1f (skipped: %.1f = %.2f%%)", current, total, refill, percentage)
}
// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods.
diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go
index 0ec54ded2..7ddfe917b 100644
--- a/vendor/github.com/containers/image/v5/copy/sign.go
+++ b/vendor/github.com/containers/image/v5/copy/sign.go
@@ -106,7 +106,7 @@ func (c *copier) createSignatures(ctx context.Context, manifest []byte, identity
if len(c.signers) == 1 {
return nil, fmt.Errorf("creating signature: %w", err)
} else {
- return nil, fmt.Errorf("creating signature %d: %w", signerIndex, err)
+ return nil, fmt.Errorf("creating signature %d: %w", signerIndex+1, err)
}
}
res = append(res, newSig)
diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go
index 714dc8136..324785a8b 100644
--- a/vendor/github.com/containers/image/v5/copy/single.go
+++ b/vendor/github.com/containers/image/v5/copy/single.go
@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"io"
+ "maps"
"reflect"
"slices"
"strings"
@@ -162,7 +163,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
if format == nil {
format = defaultCompressionFormat
}
- if format.Name() == compression.ZstdChunked.Name() {
+ if format.Name() == compressiontypes.ZstdChunkedAlgorithmName {
if ic.requireCompressionFormatMatch {
return copySingleImageResult{}, errors.New("explicitly requested to combine zstd:chunked with encryption, which is not beneficial; use plain zstd instead")
}
@@ -322,10 +323,7 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
if err != nil {
return fmt.Errorf("parsing image configuration: %w", err)
}
- wantedPlatforms, err := platform.WantedPlatforms(sys)
- if err != nil {
- return fmt.Errorf("getting current platform information %#v: %w", sys, err)
- }
+ wantedPlatforms := platform.WantedPlatforms(sys)
options := newOrderedSet()
match := false
@@ -888,21 +886,33 @@ func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.Reuse
// Handling of compression, encryption, and the related MIME types and the like are all the responsibility
// of the generic code in this package.
res := types.BlobInfo{
- Digest: reusedBlob.Digest,
- Size: reusedBlob.Size,
- URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
- Annotations: inputInfo.Annotations, // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls)
- MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
+ Digest: reusedBlob.Digest,
+ Size: reusedBlob.Size,
+ URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
+ // FIXME: This should remove zstd:chunked annotations IF the original was chunked and the new one isn’t
+ // (but those annotations being left with incorrect values should not break pulls).
+ Annotations: maps.Clone(inputInfo.Annotations),
+ MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
CompressionOperation: reusedBlob.CompressionOperation,
CompressionAlgorithm: reusedBlob.CompressionAlgorithm,
CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway.
}
// The transport is only expected to fill CompressionOperation and CompressionAlgorithm
- // if the blob was substituted; otherwise, fill it in based
+ // if the blob was substituted; otherwise, it is optional, and if not set, fill it in based
// on what we know from the srcInfos we were given.
if reusedBlob.Digest == inputInfo.Digest {
- res.CompressionOperation = inputInfo.CompressionOperation
- res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
+ if res.CompressionOperation == types.PreserveOriginal {
+ res.CompressionOperation = inputInfo.CompressionOperation
+ }
+ if res.CompressionAlgorithm == nil {
+ res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
+ }
+ }
+ if len(reusedBlob.CompressionAnnotations) != 0 {
+ if res.Annotations == nil {
+ res.Annotations = map[string]string{}
+ }
+ maps.Copy(res.Annotations, reusedBlob.CompressionAnnotations)
}
return res
}
diff --git a/vendor/github.com/containers/image/v5/docker/body_reader.go b/vendor/github.com/containers/image/v5/docker/body_reader.go
index 7d66ef6bc..29d3b0420 100644
--- a/vendor/github.com/containers/image/v5/docker/body_reader.go
+++ b/vendor/github.com/containers/image/v5/docker/body_reader.go
@@ -6,7 +6,7 @@ import (
"fmt"
"io"
"math"
- "math/rand"
+ "math/rand/v2"
"net/http"
"net/url"
"strconv"
@@ -158,7 +158,7 @@ func (br *bodyReader) Read(p []byte) (int, error) {
logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise
}
br.body = nil
- time.Sleep(1*time.Second + time.Duration(rand.Intn(100_000))*time.Microsecond) // Some jitter so that a failure blip doesn’t cause a deterministic stampede
+ time.Sleep(1*time.Second + rand.N(100_000*time.Microsecond)) // Some jitter so that a failure blip doesn’t cause a deterministic stampede
headers := map[string][]string{
"Range": {fmt.Sprintf("bytes=%d-", br.offset)},
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go
index 354af2140..64ccf6ae5 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/client.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go
@@ -80,6 +80,7 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
return &http.Client{
Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
TLSClientConfig: tlsc,
},
CheckRedirect: dockerclient.CheckRedirect,
@@ -89,6 +90,7 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
func httpConfig() *http.Client {
return &http.Client{
Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
TLSClientConfig: nil,
},
CheckRedirect: dockerclient.CheckRedirect,
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go
index 9741afc3f..74f559dce 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image.go
@@ -91,6 +91,12 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
}
for _, tag := range tagsHolder.Tags {
if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values
+ // Per https://github.com/containers/skopeo/issues/2409 , Sonatype Nexus 3.58, contrary
+ // to the spec, may include JSON null values in the list; and Go silently parses them as "".
+ if tag == "" {
+ logrus.Debugf("Ignoring invalid empty tag")
+ continue
+ }
// Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory,
// contrary to the tag format specified in
// https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 ,
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
index 7f7a74bd3..ed3d4a2c0 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
@@ -332,6 +332,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
}
+ originalCandidateKnownToBeMissing := false
if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
// First, check whether the blob happens to already exist at the destination.
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
@@ -341,9 +342,17 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
if haveBlob {
return true, reusedInfo, nil
}
+ originalCandidateKnownToBeMissing = true
} else {
logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v",
optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
+ // We can get here with a blob detected to be zstd when the user wants a zstd:chunked.
+ // In that case we keep originalCandiateKnownToBeMissing = false, so that if we find
+ // a BIC entry for this blob, we do use that entry and return a zstd:chunked entry
+ // with the BIC’s annotations.
+ // This is not quite correct, it only works if the BIC also contains an acceptable _location_.
+ // Ideally, we could look up just the compression algorithm/annotations for info.digest,
+ // and use it even if no location candidate exists and the original dandidate is present.
}
// Then try reusing blobs from other locations.
@@ -387,7 +396,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
// for it in the current repo.
candidateRepo = reference.TrimNamed(d.ref.ref)
}
- if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
+ if originalCandidateKnownToBeMissing &&
+ candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
logrus.Debug("... Already tried the primary destination")
continue
}
@@ -427,10 +437,12 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
return true, private.ReusedBlob{
- Digest: candidate.Digest,
- Size: size,
- CompressionOperation: candidate.CompressionOperation,
- CompressionAlgorithm: candidate.CompressionAlgorithm}, nil
+ Digest: candidate.Digest,
+ Size: size,
+ CompressionOperation: candidate.CompressionOperation,
+ CompressionAlgorithm: candidate.CompressionAlgorithm,
+ CompressionAnnotations: candidate.CompressionAnnotations,
+ }, nil
}
return false, private.ReusedBlob{}, nil
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index c8f6ba305..6e44ce096 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -116,10 +116,10 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
// Don’t just build a string, try to preserve the typed error.
primary := &attempts[len(attempts)-1]
extras := []string{}
- for i := 0; i < len(attempts)-1; i++ {
+ for _, attempt := range attempts[:len(attempts)-1] {
// This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use.
// The paired [] at least have some chance of being unambiguous.
- extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err))
+ extras = append(extras, fmt.Sprintf("[%s: %v]", attempt.ref.String(), attempt.err))
}
return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err)
}
@@ -464,26 +464,20 @@ func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanc
var res []signature.Signature
switch {
case s.c.supportsSignatures:
- sigs, err := s.getSignaturesFromAPIExtension(ctx, instanceDigest)
- if err != nil {
+ if err := s.appendSignaturesFromAPIExtension(ctx, &res, instanceDigest); err != nil {
return nil, err
}
- res = append(res, sigs...)
case s.c.signatureBase != nil:
- sigs, err := s.getSignaturesFromLookaside(ctx, instanceDigest)
- if err != nil {
+ if err := s.appendSignaturesFromLookaside(ctx, &res, instanceDigest); err != nil {
return nil, err
}
- res = append(res, sigs...)
default:
return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
}
- sigstoreSigs, err := s.getSignaturesFromSigstoreAttachments(ctx, instanceDigest)
- if err != nil {
+ if err := s.appendSignaturesFromSigstoreAttachments(ctx, &res, instanceDigest); err != nil {
return nil, err
}
- res = append(res, sigstoreSigs...)
return res, nil
}
@@ -505,35 +499,35 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *
return manifest.Digest(s.cachedManifest)
}
-// getSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
-// which is not nil.
-func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+// appendSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
+// which is not nil, storing the signatures to *dest.
+// On error, the contents of *dest are undefined.
+func (s *dockerImageSource) appendSignaturesFromLookaside(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
if err != nil {
- return nil, err
+ return err
}
// NOTE: Keep this in sync with docs/signature-protocols.md!
- signatures := []signature.Signature{}
for i := 0; ; i++ {
if i >= maxLookasideSignatures {
- return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
+ return fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
}
sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
if err != nil {
- return nil, err
+ return err
}
signature, missing, err := s.getOneSignature(ctx, sigURL)
if err != nil {
- return nil, err
+ return err
}
if missing {
break
}
- signatures = append(signatures, signature)
+ *dest = append(*dest, signature)
}
- return signatures, nil
+ return nil
}
// getOneSignature downloads one signature from sigURL, and returns (signature, false, nil)
@@ -596,48 +590,51 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL
}
}
-// getSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension.
-func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+// appendSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension,
+// storing the signatures to *dest.
+// On error, the contents of *dest are undefined.
+func (s *dockerImageSource) appendSignaturesFromAPIExtension(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
if err != nil {
- return nil, err
+ return err
}
parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest)
if err != nil {
- return nil, err
+ return err
}
- var sigs []signature.Signature
for _, sig := range parsedBody.Signatures {
if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic {
- sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content))
+ *dest = append(*dest, signature.SimpleSigningFromBlob(sig.Content))
}
}
- return sigs, nil
+ return nil
}
-func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+// appendSignaturesFromSigstoreAttachments implements GetSignaturesWithFormat() using the sigstore tag convention,
+// storing the signatures to *dest.
+// On error, the contents of *dest are undefined.
+func (s *dockerImageSource) appendSignaturesFromSigstoreAttachments(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
if !s.c.useSigstoreAttachments {
logrus.Debugf("Not looking for sigstore attachments: disabled by configuration")
- return nil, nil
+ return nil
}
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
if err != nil {
- return nil, err
+ return err
}
ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest)
if err != nil {
- return nil, err
+ return err
}
if ociManifest == nil {
- return nil, nil
+ return nil
}
logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers))
- res := []signature.Signature{}
for layerIndex, layer := range ociManifest.Layers {
// Note that this copies all kinds of attachments: attestations, and whatever else is there,
// not just signatures. We leave the signature consumers to decide based on the MIME type.
@@ -648,11 +645,11 @@ func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Con
payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize,
none.NoCache)
if err != nil {
- return nil, err
+ return err
}
- res = append(res, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations))
+ *dest = append(*dest, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations))
}
- return res, nil
+ return nil
}
// deleteImage deletes the named image from the registry, if supported.
@@ -830,7 +827,7 @@ func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint)
handleBufferedNetworkReader(&br)
}()
- for i := uint(0); i < nBuffers; i++ {
+ for range nBuffers {
b := bufferedNetworkReaderBuffer{
data: make([]byte, bufferSize),
}
diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
index 276c8073e..acf82ee63 100644
--- a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
+++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
@@ -37,8 +37,11 @@ type BlobInfoCache2 interface {
// RecordDigestCompressorData records data for the blob with the specified digest.
// WARNING: Only call this with LOCALLY VERIFIED data:
- // - don’t record a compressor for a digest just because some remote author claims so
- // (e.g. because a manifest says so);
+ // - don’t record a compressor for a digest just because some remote author claims so
+ // (e.g. because a manifest says so);
+ // - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
+ // and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
+ // in a manifest)
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
// information in a manifest.
RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData)
@@ -52,6 +55,9 @@ type BlobInfoCache2 interface {
// (This is worded generically, but basically targeted at the zstd / zstd:chunked situation.)
type DigestCompressorData struct {
BaseVariantCompressor string // A compressor’s base variant name, or Uncompressed or UnknownCompression.
+ // The following fields are only valid if the base variant is neither Uncompressed nor UnknownCompression:
+ SpecificVariantCompressor string // A non-base variant compressor (or UnknownCompression if the true format is just the base variant)
+ SpecificVariantAnnotations map[string]string // Annotations required to benefit from the base variant.
}
// CandidateLocations2Options are used in CandidateLocations2.
@@ -66,9 +72,10 @@ type CandidateLocations2Options struct {
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
type BICReplacementCandidate2 struct {
- Digest digest.Digest
- CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
- CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
- UnknownLocation bool // is true when `Location` for this blob is not set
- Location types.BICLocationReference // not set if UnknownLocation is set to `true`
+ Digest digest.Digest
+ CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
+ CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
+ CompressionAnnotations map[string]string // If necessary, annotations necessary to use CompressionAlgorithm
+ UnknownLocation bool // is true when `Location` for this blob is not set
+ Location types.BICLocationReference // not set if UnknownLocation is set to `true`
}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
index cdd3c5e5d..f5a38541a 100644
--- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
@@ -76,6 +76,9 @@ func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.Blob
Size: blob.Size,
CompressionOperation: blob.CompressionOperation,
CompressionAlgorithm: blob.CompressionAlgorithm,
+ // CompressionAnnotations could be set to blob.Annotations, but that may contain unrelated
+ // annotations, and we didn’t use the blob.Annotations field previously, so we’ll
+ // continue not using it.
}, nil
}
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
index f847fa9cc..07922cece 100644
--- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
+++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
@@ -152,10 +152,7 @@ func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemCont
// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
// of the image which is appropriate for the current environment.
func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
- wantedPlatforms, err := platform.WantedPlatforms(ctx)
- if err != nil {
- return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
- }
+ wantedPlatforms := platform.WantedPlatforms(ctx)
for _, wantedPlatform := range wantedPlatforms {
for _, d := range list.Manifests {
imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform)
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go
index ee0ddc772..3fb52104a 100644
--- a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go
+++ b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go
@@ -205,11 +205,6 @@ type ReuseConditions struct {
// (which can be nil to represent uncompressed or unknown) matches reuseConditions.
func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool {
if c.RequiredCompression != nil {
- if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName {
- // HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
- // The caller must re-compress to build those annotations.
- return false
- }
if candidateCompression == nil ||
(c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
return false
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
index fe78efaeb..6a0f88d3a 100644
--- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
+++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
@@ -236,10 +236,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
if preferGzip == types.OptionalBoolTrue {
didPreferGzip = true
}
- wantedPlatforms, err := platform.WantedPlatforms(ctx)
- if err != nil {
- return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
- }
+ wantedPlatforms := platform.WantedPlatforms(ctx)
var bestMatch *instanceCandidate
bestMatch = nil
for manifestIndex, d := range index.Manifests {
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
index afdce1d3d..3a16dad63 100644
--- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
+++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
@@ -153,7 +153,7 @@ var compatibility = map[string][]string{
// WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user,
// the most compatible platform is first.
// If some option (arch, os, variant) is not present, a value from current platform is detected.
-func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
+func WantedPlatforms(ctx *types.SystemContext) []imgspecv1.Platform {
// Note that this does not use Platform.OSFeatures and Platform.OSVersion at all.
// The fields are not specified by the OCI specification, as of version 1.1, usefully enough
// to be interoperable, anyway.
@@ -211,7 +211,7 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
Variant: v,
})
}
- return res, nil
+ return res
}
// MatchesPlatform returns true if a platform descriptor from a multi-arch image matches
diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go
index 63fb9326d..d81ea6703 100644
--- a/vendor/github.com/containers/image/v5/internal/private/private.go
+++ b/vendor/github.com/containers/image/v5/internal/private/private.go
@@ -134,9 +134,14 @@ type ReusedBlob struct {
Size int64 // Must be provided
// The following compression fields should be set when the reuse substitutes
// a differently-compressed blob.
+ // They may be set also to change from a base variant to a specific variant of an algorithm.
CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A
CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A
+ // Annotations that should be added, for CompressionAlgorithm. Note that they might need to be
+ // added even if the digest doesn’t change (if we found the annotations in a cache).
+ CompressionAnnotations map[string]string
+
MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes.
}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
index 222aa896e..b74a1e240 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
@@ -318,20 +318,20 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
// Add the history and rootfs information.
rootfs, err := json.Marshal(rootFS)
if err != nil {
- return nil, fmt.Errorf("error encoding rootfs information %#v: %v", rootFS, err)
+ return nil, fmt.Errorf("error encoding rootfs information %#v: %w", rootFS, err)
}
rawRootfs := json.RawMessage(rootfs)
raw["rootfs"] = &rawRootfs
history, err := json.Marshal(convertedHistory)
if err != nil {
- return nil, fmt.Errorf("error encoding history information %#v: %v", convertedHistory, err)
+ return nil, fmt.Errorf("error encoding history information %#v: %w", convertedHistory, err)
}
rawHistory := json.RawMessage(history)
raw["history"] = &rawHistory
// Encode the result.
config, err = json.Marshal(raw)
if err != nil {
- return nil, fmt.Errorf("error re-encoding compat image config %#v: %v", s1, err)
+ return nil, fmt.Errorf("error re-encoding compat image config %#v: %w", s1, err)
}
return config, nil
}
diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go
index f714574ee..0faa866b7 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci.go
@@ -60,7 +60,7 @@ func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) {
if err := json.Unmarshal(manifestBlob, &oci1); err != nil {
return nil, err
}
- if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex,
+ if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageManifest,
manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go
index bcf257df6..08366a7e2 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go
@@ -27,17 +27,8 @@ func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContex
return err
}
- var blobsUsedByImage map[digest.Digest]int
-
- switch descriptor.MediaType {
- case imgspecv1.MediaTypeImageManifest:
- blobsUsedByImage, err = ref.getBlobsUsedInSingleImage(&descriptor, sharedBlobsDir)
- case imgspecv1.MediaTypeImageIndex:
- blobsUsedByImage, err = ref.getBlobsUsedInImageIndex(&descriptor, sharedBlobsDir)
- default:
- return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
- }
- if err != nil {
+ blobsUsedByImage := make(map[digest.Digest]int)
+ if err := ref.countBlobsForDescriptor(blobsUsedByImage, &descriptor, sharedBlobsDir); err != nil {
return err
}
@@ -54,82 +45,48 @@ func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContex
return ref.deleteReferenceFromIndex(descriptorIndex)
}
-func (ref ociReference) getBlobsUsedInSingleImage(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) {
- manifest, err := ref.getManifest(descriptor, sharedBlobsDir)
- if err != nil {
- return nil, err
- }
- blobsUsedInManifest := ref.getBlobsUsedInManifest(manifest)
- blobsUsedInManifest[descriptor.Digest]++ // Add the current manifest to the list of blobs used by this reference
-
- return blobsUsedInManifest, nil
-}
-
-func (ref ociReference) getBlobsUsedInImageIndex(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) {
+// countBlobsForDescriptor updates dest with usage counts of blobs required for descriptor, INCLUDING descriptor itself.
+func (ref ociReference) countBlobsForDescriptor(dest map[digest.Digest]int, descriptor *imgspecv1.Descriptor, sharedBlobsDir string) error {
blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
if err != nil {
- return nil, err
- }
- index, err := parseIndex(blobPath)
- if err != nil {
- return nil, err
+ return err
}
- blobsUsedInImageRefIndex := make(map[digest.Digest]int)
- err = ref.addBlobsUsedInIndex(blobsUsedInImageRefIndex, index, sharedBlobsDir)
- if err != nil {
- return nil, err
+ dest[descriptor.Digest]++
+ switch descriptor.MediaType {
+ case imgspecv1.MediaTypeImageManifest:
+ manifest, err := parseJSON[imgspecv1.Manifest](blobPath)
+ if err != nil {
+ return err
+ }
+ dest[manifest.Config.Digest]++
+ for _, layer := range manifest.Layers {
+ dest[layer.Digest]++
+ }
+ case imgspecv1.MediaTypeImageIndex:
+ index, err := parseIndex(blobPath)
+ if err != nil {
+ return err
+ }
+ if err := ref.countBlobsReferencedByIndex(dest, index, sharedBlobsDir); err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
}
- blobsUsedInImageRefIndex[descriptor.Digest]++ // Add the nested index in the list of blobs used by this reference
-
- return blobsUsedInImageRefIndex, nil
+ return nil
}
-// Updates a map of digest with the usage count, so a blob that is referenced three times will have 3 in the map
-func (ref ociReference) addBlobsUsedInIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error {
+// countBlobsReferencedByIndex updates dest with usage counts of blobs required for index, EXCLUDING the index itself.
+func (ref ociReference) countBlobsReferencedByIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error {
for _, descriptor := range index.Manifests {
- destination[descriptor.Digest]++
- switch descriptor.MediaType {
- case imgspecv1.MediaTypeImageManifest:
- manifest, err := ref.getManifest(&descriptor, sharedBlobsDir)
- if err != nil {
- return err
- }
- for digest, count := range ref.getBlobsUsedInManifest(manifest) {
- destination[digest] += count
- }
- case imgspecv1.MediaTypeImageIndex:
- blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
- if err != nil {
- return err
- }
- index, err := parseIndex(blobPath)
- if err != nil {
- return err
- }
- err = ref.addBlobsUsedInIndex(destination, index, sharedBlobsDir)
- if err != nil {
- return err
- }
- default:
- return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
+ if err := ref.countBlobsForDescriptor(destination, &descriptor, sharedBlobsDir); err != nil {
+ return err
}
}
-
return nil
}
-func (ref ociReference) getBlobsUsedInManifest(manifest *imgspecv1.Manifest) map[digest.Digest]int {
- blobsUsedInManifest := make(map[digest.Digest]int, 0)
-
- blobsUsedInManifest[manifest.Config.Digest]++
- for _, layer := range manifest.Layers {
- blobsUsedInManifest[layer.Digest]++
- }
-
- return blobsUsedInManifest
-}
-
// This takes in a map of the digest and their usage count in the manifest to be deleted
// It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted
func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) {
@@ -138,7 +95,7 @@ func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[diges
return nil, err
}
blobsUsedInRootIndex := make(map[digest.Digest]int)
- err = ref.addBlobsUsedInIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir)
+ err = ref.countBlobsReferencedByIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir)
if err != nil {
return nil, err
}
@@ -224,17 +181,3 @@ func saveJSON(path string, content any) error {
return json.NewEncoder(file).Encode(content)
}
-
-func (ref ociReference) getManifest(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (*imgspecv1.Manifest, error) {
- manifestPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
- if err != nil {
- return nil, err
- }
-
- manifest, err := parseJSON[imgspecv1.Manifest](manifestPath)
- if err != nil {
- return nil, err
- }
-
- return manifest, nil
-}
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
index fff586bee..cef3dcccf 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
@@ -365,7 +365,7 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []err
if len(clusterInfo.CertificateAuthority) != 0 {
err := validateFileIsReadable(clusterInfo.CertificateAuthority)
if err != nil {
- validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %w", clusterInfo.CertificateAuthority, clusterName, err))
}
}
@@ -403,13 +403,13 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error {
if len(authInfo.ClientCertificate) != 0 {
err := validateFileIsReadable(authInfo.ClientCertificate)
if err != nil {
- validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %w", authInfo.ClientCertificate, authInfoName, err))
}
}
if len(authInfo.ClientKey) != 0 {
err := validateFileIsReadable(authInfo.ClientKey)
if err != nil {
- validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %w", authInfo.ClientKey, authInfoName, err))
}
}
}
diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
index 03548209f..d73aafbdb 100644
--- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
@@ -28,9 +28,10 @@ const replacementUnknownLocationAttempts = 2
// CandidateTemplate is a subset of BICReplacementCandidate2 with data related to a specific digest,
// which can be later combined with information about a location.
type CandidateTemplate struct {
- digest digest.Digest
- compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
- compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
+ digest digest.Digest
+ compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
+ compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
+ compressionAnnotations map[string]string // If necessary, annotations necessary to use compressionAlgorithm
}
// CandidateTemplateWithCompression returns a CandidateTemplate if a blob with data is acceptable
@@ -40,7 +41,7 @@ type CandidateTemplate struct {
// if not nil, the call is assumed to be CandidateLocations2.
func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, data blobinfocache.DigestCompressorData) *CandidateTemplate {
if v2Options == nil {
- return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm values are not used.
+ return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm and compressionAnnotations values are not used.
digest: digest,
}
}
@@ -60,14 +61,40 @@ func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocation
return nil
}
return &CandidateTemplate{
- digest: digest,
- compressionOperation: types.Decompress,
- compressionAlgorithm: nil,
+ digest: digest,
+ compressionOperation: types.Decompress,
+ compressionAlgorithm: nil,
+ compressionAnnotations: nil,
}
case blobinfocache.UnknownCompression:
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String())
return nil // Not allowed with CandidateLocations2
default:
+ // See if we can use the specific variant, first.
+ if data.SpecificVariantCompressor != blobinfocache.UnknownCompression {
+ algo, err := compression.AlgorithmByName(data.SpecificVariantCompressor)
+ if err != nil {
+ logrus.Debugf("Not considering unrecognized specific compression variant %q for BlobInfoCache record of digest %q: %v",
+ data.SpecificVariantCompressor, digest.String(), err)
+ } else {
+ if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
+ PossibleManifestFormats: v2Options.PossibleManifestFormats,
+ RequiredCompression: v2Options.RequiredCompression,
+ }, &algo) {
+ logrus.Debugf("Ignoring specific compression variant %q for BlobInfoCache record of digest %q, it does not match required %s or MIME types %#v",
+ data.SpecificVariantCompressor, digest.String(), requiredCompression, v2Options.PossibleManifestFormats)
+ } else {
+ return &CandidateTemplate{
+ digest: digest,
+ compressionOperation: types.Compress,
+ compressionAlgorithm: &algo,
+ compressionAnnotations: data.SpecificVariantAnnotations,
+ }
+ }
+ }
+ }
+
+ // Try the base variant.
algo, err := compression.AlgorithmByName(data.BaseVariantCompressor)
if err != nil {
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v",
@@ -83,9 +110,10 @@ func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocation
return nil
}
return &CandidateTemplate{
- digest: digest,
- compressionOperation: types.Compress,
- compressionAlgorithm: &algo,
+ digest: digest,
+ compressionOperation: types.Compress,
+ compressionAlgorithm: &algo,
+ compressionAnnotations: nil,
}
}
}
@@ -100,11 +128,12 @@ type CandidateWithTime struct {
func (template CandidateTemplate) CandidateWithLocation(location types.BICLocationReference, lastSeen time.Time) CandidateWithTime {
return CandidateWithTime{
candidate: blobinfocache.BICReplacementCandidate2{
- Digest: template.digest,
- CompressionOperation: template.compressionOperation,
- CompressionAlgorithm: template.compressionAlgorithm,
- UnknownLocation: false,
- Location: location,
+ Digest: template.digest,
+ CompressionOperation: template.compressionOperation,
+ CompressionAlgorithm: template.compressionAlgorithm,
+ CompressionAnnotations: template.compressionAnnotations,
+ UnknownLocation: false,
+ Location: location,
},
lastSeen: lastSeen,
}
@@ -114,11 +143,12 @@ func (template CandidateTemplate) CandidateWithLocation(location types.BICLocati
func (template CandidateTemplate) CandidateWithUnknownLocation() CandidateWithTime {
return CandidateWithTime{
candidate: blobinfocache.BICReplacementCandidate2{
- Digest: template.digest,
- CompressionOperation: template.compressionOperation,
- CompressionAlgorithm: template.compressionAlgorithm,
- UnknownLocation: true,
- Location: types.BICLocationReference{Opaque: ""},
+ Digest: template.digest,
+ CompressionOperation: template.compressionOperation,
+ CompressionAlgorithm: template.compressionAlgorithm,
+ CompressionAnnotations: template.compressionAnnotations,
+ UnknownLocation: true,
+ Location: types.BICLocationReference{Opaque: ""},
},
lastSeen: time.Time{},
}
@@ -170,8 +200,6 @@ func (css *candidateSortState) compare(xi, xj CandidateWithTime) int {
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
// number of entries to limit for known and unknown location separately, only to make testing simpler.
-// TODO: following function is not destructive any more in the nature instead prioritized result is actually copies of the original
-// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix.
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 {
// split unknown candidates and known candidates
// and limit them separately.
diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
index 067c6b7e1..9d4125d66 100644
--- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
@@ -28,7 +28,7 @@ type cache struct {
uncompressedDigestsByTOC map[digest.Digest]digest.Digest
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
- compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression), for each digest
+ compressors map[digest.Digest]blobinfocache.DigestCompressorData // stores compression data for each digest; BaseVariantCompressor != UnknownCompression
}
// New returns a BlobInfoCache implementation which is in-memory only.
@@ -49,7 +49,7 @@ func new2() *cache {
uncompressedDigestsByTOC: map[digest.Digest]digest.Digest{},
digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
- compressors: map[digest.Digest]string{},
+ compressors: map[digest.Digest]blobinfocache.DigestCompressorData{},
}
}
@@ -148,20 +148,36 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type
// WARNING: Only call this with LOCALLY VERIFIED data:
// - don’t record a compressor for a digest just because some remote author claims so
// (e.g. because a manifest says so);
+// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
+// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
+// in a manifest)
//
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
// information in a manifest.
func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) {
mem.mutex.Lock()
defer mem.mutex.Unlock()
- if previous, ok := mem.compressors[anyDigest]; ok && previous != data.BaseVariantCompressor {
- logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor)
+ if previous, ok := mem.compressors[anyDigest]; ok {
+ if previous.BaseVariantCompressor != data.BaseVariantCompressor {
+ logrus.Warnf("Base compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.BaseVariantCompressor, data.BaseVariantCompressor)
+ } else if previous.SpecificVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor != blobinfocache.UnknownCompression &&
+ previous.SpecificVariantCompressor != data.SpecificVariantCompressor {
+ logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.SpecificVariantCompressor, data.SpecificVariantCompressor)
+ }
+ // We don’t check SpecificVariantAnnotations for equality, it’s possible that their generation is not deterministic.
+
+ // Preserve specific variant information if the incoming data does not have it.
+ if data.BaseVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor == blobinfocache.UnknownCompression &&
+ previous.SpecificVariantCompressor != blobinfocache.UnknownCompression {
+ data.SpecificVariantCompressor = previous.SpecificVariantCompressor
+ data.SpecificVariantAnnotations = previous.SpecificVariantAnnotations
+ }
}
if data.BaseVariantCompressor == blobinfocache.UnknownCompression {
delete(mem.compressors, anyDigest)
return
}
- mem.compressors[anyDigest] = data.BaseVariantCompressor
+ mem.compressors[anyDigest] = data
}
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
@@ -171,13 +187,15 @@ func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobi
// with unknown compression.
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime {
- compressorName := blobinfocache.UnknownCompression
+ compressionData := blobinfocache.DigestCompressorData{
+ BaseVariantCompressor: blobinfocache.UnknownCompression,
+ SpecificVariantCompressor: blobinfocache.UnknownCompression,
+ SpecificVariantAnnotations: nil,
+ }
if v, ok := mem.compressors[digest]; ok {
- compressorName = v
+ compressionData = v
}
- template := prioritize.CandidateTemplateWithCompression(v2Options, digest, blobinfocache.DigestCompressorData{
- BaseVariantCompressor: compressorName,
- })
+ template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData)
if template == nil {
return candidates
}
diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
index 8d2bf7289..1a7931023 100644
--- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
@@ -3,6 +3,7 @@ package sqlite
import (
"database/sql"
+ "encoding/json"
"errors"
"fmt"
"sync"
@@ -303,6 +304,16 @@ func ensureDBHasCurrentSchema(db *sql.DB) error {
`uncompressedDigest TEXT NOT NULL
)`,
},
+ {
+ "DigestSpecificVariantCompressors", // If changing the schema incompatibly, merge this with DigestCompressors.
+ `CREATE TABLE IF NOT EXISTS DigestSpecificVariantCompressors(` +
+ // index implied by PRIMARY KEY
+ `digest TEXT PRIMARY KEY NOT NULL,` +
+ // The compressor is not `UnknownCompression`.
+ `specificVariantCompressor TEXT NOT NULL,
+ specificVariantAnnotations BLOB NOT NULL
+ )`,
+ },
}
_, err := dbTransaction(db, func(tx *sql.Tx) (void, error) {
@@ -461,6 +472,9 @@ func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope type
// WARNING: Only call this with LOCALLY VERIFIED data:
// - don’t record a compressor for a digest just because some remote author claims so
// (e.g. because a manifest says so);
+// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
+// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
+// in a manifest)
//
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
// information in a manifest.
@@ -468,21 +482,46 @@ func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobi
_, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String())
if err != nil {
- return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest)
+ return void{}, fmt.Errorf("looking for compressor of %q", anyDigest)
}
+ warned := false
if gotPrevious && previous != data.BaseVariantCompressor {
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor)
+ warned = true
}
if data.BaseVariantCompressor == blobinfocache.UnknownCompression {
if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil {
return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err)
}
+ if _, err := tx.Exec("DELETE FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()); err != nil {
+ return void{}, fmt.Errorf("deleting specific variant compressor for digest %q: %w", anyDigest, err)
+ }
} else {
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)",
anyDigest.String(), data.BaseVariantCompressor); err != nil {
return void{}, fmt.Errorf("recording compressor %q for %q: %w", data.BaseVariantCompressor, anyDigest, err)
}
}
+
+ if data.SpecificVariantCompressor != blobinfocache.UnknownCompression {
+ if !warned { // Don’t warn twice about the same digest
+ prevSVC, found, err := querySingleValue[string](tx, "SELECT specificVariantCompressor FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String())
+ if err != nil {
+ return void{}, fmt.Errorf("looking for specific variant compressor of %q", anyDigest)
+ }
+ if found && data.SpecificVariantCompressor != prevSVC {
+ logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor)
+ }
+ }
+ annotations, err := json.Marshal(data.SpecificVariantAnnotations)
+ if err != nil {
+ return void{}, err
+ }
+ if _, err := tx.Exec("INSERT OR REPLACE INTO DigestSpecificVariantCompressors(digest, specificVariantCompressor, specificVariantAnnotations) VALUES (?, ?, ?)",
+ anyDigest.String(), data.SpecificVariantCompressor, annotations); err != nil {
+ return void{}, fmt.Errorf("recording specific variant compressor %q/%q for %q: %w", data.SpecificVariantCompressor, annotations, anyDigest, err)
+ }
+ }
return void{}, nil
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
@@ -493,19 +532,32 @@ func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobi
// with unknown compression.
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) {
- compressorName := blobinfocache.UnknownCompression
+ compressionData := blobinfocache.DigestCompressorData{
+ BaseVariantCompressor: blobinfocache.UnknownCompression,
+ SpecificVariantCompressor: blobinfocache.UnknownCompression,
+ SpecificVariantAnnotations: nil,
+ }
if v2Options != nil {
- compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
- if err != nil {
- return nil, fmt.Errorf("scanning compressorName: %w", err)
- }
- if found {
- compressorName = compressor
+ var baseVariantCompressor string
+ var specificVariantCompressor sql.NullString
+ var annotationBytes []byte
+ switch err := tx.QueryRow("SELECT compressor, specificVariantCompressor, specificVariantAnnotations "+
+ "FROM DigestCompressors LEFT JOIN DigestSpecificVariantCompressors USING (digest) WHERE digest = ?", digest.String()).
+ Scan(&baseVariantCompressor, &specificVariantCompressor, &annotationBytes); {
+ case errors.Is(err, sql.ErrNoRows): // Do nothing
+ case err != nil:
+ return nil, fmt.Errorf("scanning compressor data: %w", err)
+ default:
+ compressionData.BaseVariantCompressor = baseVariantCompressor
+ if specificVariantCompressor.Valid && annotationBytes != nil {
+ compressionData.SpecificVariantCompressor = specificVariantCompressor.String
+ if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil {
+ return nil, err
+ }
+ }
}
}
- template := prioritize.CandidateTemplateWithCompression(v2Options, digest, blobinfocache.DigestCompressorData{
- BaseVariantCompressor: compressorName,
- })
+ template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData)
if template == nil {
return candidates, nil
}
@@ -561,40 +613,41 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
if err != nil {
return nil, err
}
-
- // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries.
- // (In the extreme, we could turn _everything_ this function does into a single query.
- // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.)
- // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations.
- rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String())
- if err != nil {
- return nil, fmt.Errorf("querying for other digests: %w", err)
- }
- defer rows.Close()
- for rows.Next() {
- var otherDigestString string
- if err := rows.Scan(&otherDigestString); err != nil {
- return nil, fmt.Errorf("scanning other digest: %w", err)
- }
- otherDigest, err := digest.Parse(otherDigestString)
+ if uncompressedDigest != "" {
+ // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries.
+ // (In the extreme, we could turn _everything_ this function does into a single query.
+ // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.)
+ // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations.
+ rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String())
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("querying for other digests: %w", err)
}
- if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
- res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
+ defer rows.Close()
+ for rows.Next() {
+ var otherDigestString string
+ if err := rows.Scan(&otherDigestString); err != nil {
+ return nil, fmt.Errorf("scanning other digest: %w", err)
+ }
+ otherDigest, err := digest.Parse(otherDigestString)
if err != nil {
return nil, err
}
+ if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
+ res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("iterating through other digests: %w", err)
}
- }
- if err := rows.Err(); err != nil {
- return nil, fmt.Errorf("iterating through other digests: %w", err)
- }
- if uncompressedDigest != primaryDigest {
- res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
- if err != nil {
- return nil, err
+ if uncompressedDigest != primaryDigest {
+ res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
+ if err != nil {
+ return nil, err
+ }
}
}
}
diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go
index 4e9986422..31dfdd342 100644
--- a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go
+++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go
@@ -195,10 +195,10 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
return untrustedCertificate.PublicKey, nil
}
-func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
+func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
- rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKey, untrustedRekorSET, untrustedCertificateBytes,
+ rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKeys, untrustedRekorSET, untrustedCertificateBytes,
untrustedBase64Signature, untrustedPayloadBytes)
if err != nil {
return nil, err
diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
index e79c91cf9..bddaca690 100644
--- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
+++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
@@ -40,17 +40,20 @@ type UntrustedRekorPayload struct {
// A compile-time check that UntrustedRekorSET implements json.Unmarshaler
var _ json.Unmarshaler = (*UntrustedRekorSET)(nil)
-// UnmarshalJSON implements the json.Unmarshaler interface
-func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
- err := s.strictUnmarshalJSON(data)
- if err != nil {
- if formatErr, ok := err.(JSONFormatError); ok {
- err = NewInvalidSignatureError(formatErr.Error())
- }
+// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError.
+// All other errors are returned as is.
+func JSONFormatToInvalidSignatureError(err error) error {
+ if formatErr, ok := err.(JSONFormatError); ok {
+ err = NewInvalidSignatureError(formatErr.Error())
}
return err
}
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
+ return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
+}
+
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error {
@@ -77,13 +80,7 @@ var _ json.Unmarshaler = (*UntrustedRekorPayload)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface
func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error {
- err := p.strictUnmarshalJSON(data)
- if err != nil {
- if formatErr, ok := err.(JSONFormatError); ok {
- err = NewInvalidSignatureError(formatErr.Error())
- }
- }
- return err
+ return JSONFormatToInvalidSignatureError(p.strictUnmarshalJSON(data))
}
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
@@ -113,7 +110,7 @@ func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) {
// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
// Returns bundle upload time on success.
-func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
+func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
// FIXME: Should the publicKey parameter hard-code ecdsa?
// == Parse SET bytes
@@ -130,7 +127,14 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("canonicalizing Rekor SET JSON: %v", err))
}
untrustedSETPayloadHash := sha256.Sum256(untrustedSETPayloadCanonicalBytes)
- if !ecdsa.VerifyASN1(publicKey, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) {
+ publicKeymatched := false
+ for _, pk := range publicKeys {
+ if ecdsa.VerifyASN1(pk, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) {
+ publicKeymatched = true
+ break
+ }
+ }
+ if !publicKeymatched {
return time.Time{}, NewInvalidSignatureError("cryptographic signature verification of Rekor SET failed")
}
diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
index a2609c954..90a81dc1c 100644
--- a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
+++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
@@ -7,6 +7,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "strings"
"time"
"github.com/containers/image/v5/version"
@@ -79,13 +80,7 @@ var _ json.Unmarshaler = (*UntrustedSigstorePayload)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface
func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error {
- err := s.strictUnmarshalJSON(data)
- if err != nil {
- if formatErr, ok := err.(JSONFormatError); ok {
- err = NewInvalidSignatureError(formatErr.Error())
- }
- }
- return err
+ return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
}
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
@@ -126,7 +121,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
if gotTimestamp {
intTimestamp := int64(timestamp)
if float64(intTimestamp) != timestamp {
- return NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
+ return NewInvalidSignatureError("Field optional.timestamp is not an integer")
}
s.untrustedTimestamp = &intTimestamp
}
@@ -171,24 +166,62 @@ type SigstorePayloadAcceptanceRules struct {
ValidateSignedDockerManifestDigest func(digest.Digest) error
}
-// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by publicKey, and that its principal components
+// verifySigstorePayloadBlobSignature verifies unverifiedSignature of unverifiedPayload was correctly created
+// by any of the public keys in publicKeys.
+//
+// This is an internal implementation detail of VerifySigstorePayload and should have no other callers.
+// It is INSUFFICIENT alone to consider the signature acceptable.
+func verifySigstorePayloadBlobSignature(publicKeys []crypto.PublicKey, unverifiedPayload, unverifiedSignature []byte) error {
+ if len(publicKeys) == 0 {
+ return errors.New("Need at least one public key to verify the sigstore payload, but got 0")
+ }
+
+ verifiers := make([]sigstoreSignature.Verifier, 0, len(publicKeys))
+ for _, key := range publicKeys {
+ // Failing to load a verifier indicates that something is really, really
+ // invalid about the public key; prefer to fail even if the signature might be
+ // valid with other keys, so that users fix their fallback keys before they need them.
+ // For that reason, we even initialize all verifiers before trying to validate the signature
+ // with any key.
+ verifier, err := sigstoreSignature.LoadVerifier(key, sigstoreHarcodedHashAlgorithm)
+ if err != nil {
+ return err
+ }
+ verifiers = append(verifiers, verifier)
+ }
+
+ var failures []string
+ for _, verifier := range verifiers {
+ // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(),
+ // which seems to be not used by anything. So we don’t bother.
+ err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload))
+ if err == nil {
+ return nil
+ }
+
+ failures = append(failures, err.Error())
+ }
+
+ if len(failures) == 0 {
+ // Coverage: We have checked there is at least one public key, any success causes an early return,
+ // and any failure adds an entry to failures => there must be at least one error.
+ return fmt.Errorf("Internal error: signature verification failed but no errors have been recorded")
+ }
+ return NewInvalidSignatureError("cryptographic signature verification failed: " + strings.Join(failures, ", "))
+}
+
+// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by any of the public keys in publicKeys, and that its principal components
// match expected values, both as specified by rules, and returns it.
// We return an *UntrustedSigstorePayload, although nothing actually uses it,
// just to double-check against stupid typos.
-func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) {
- verifier, err := sigstoreSignature.LoadVerifier(publicKey, sigstoreHarcodedHashAlgorithm)
- if err != nil {
- return nil, fmt.Errorf("creating verifier: %w", err)
- }
-
+func VerifySigstorePayload(publicKeys []crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) {
unverifiedSignature, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature)
if err != nil {
return nil, NewInvalidSignatureError(fmt.Sprintf("base64 decoding: %v", err))
}
- // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(),
- // which seems to be not used by anything. So we don’t bother.
- if err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)); err != nil {
- return nil, NewInvalidSignatureError(fmt.Sprintf("cryptographic signature verification failed: %v", err))
+
+ if err := verifySigstorePayloadBlobSignature(publicKeys, unverifiedPayload, unverifiedSignature); err != nil {
+ return nil, err
}
var unmatchedPayload UntrustedSigstorePayload
diff --git a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go
index beb5d0673..965901e18 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go
@@ -2,7 +2,6 @@ package signature
import (
"encoding/json"
- "errors"
"fmt"
"github.com/containers/image/v5/signature/internal"
@@ -15,29 +14,57 @@ type PRSigstoreSignedOption func(*prSigstoreSigned) error
func PRSigstoreSignedWithKeyPath(keyPath string) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.KeyPath != "" {
- return errors.New(`"keyPath" already specified`)
+ return InvalidPolicyFormatError(`"keyPath" already specified`)
}
pr.KeyPath = keyPath
return nil
}
}
+// PRSigstoreSignedWithKeyPaths specifies a value for the "keyPaths" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithKeyPaths(keyPaths []string) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.KeyPaths != nil {
+ return InvalidPolicyFormatError(`"keyPaths" already specified`)
+ }
+ if len(keyPaths) == 0 {
+ return InvalidPolicyFormatError(`"keyPaths" contains no entries`)
+ }
+ pr.KeyPaths = keyPaths
+ return nil
+ }
+}
+
// PRSigstoreSignedWithKeyData specifies a value for the "keyData" field when calling NewPRSigstoreSigned.
func PRSigstoreSignedWithKeyData(keyData []byte) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.KeyData != nil {
- return errors.New(`"keyData" already specified`)
+ return InvalidPolicyFormatError(`"keyData" already specified`)
}
pr.KeyData = keyData
return nil
}
}
+// PRSigstoreSignedWithKeyDatas specifies a value for the "keyDatas" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithKeyDatas(keyDatas [][]byte) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.KeyDatas != nil {
+ return InvalidPolicyFormatError(`"keyDatas" already specified`)
+ }
+ if len(keyDatas) == 0 {
+ return InvalidPolicyFormatError(`"keyDatas" contains no entries`)
+ }
+ pr.KeyDatas = keyDatas
+ return nil
+ }
+}
+
// PRSigstoreSignedWithFulcio specifies a value for the "fulcio" field when calling NewPRSigstoreSigned.
func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.Fulcio != nil {
- return errors.New(`"fulcio" already specified`)
+ return InvalidPolicyFormatError(`"fulcio" already specified`)
}
pr.Fulcio = fulcio
return nil
@@ -48,29 +75,57 @@ func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedO
func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.RekorPublicKeyPath != "" {
- return errors.New(`"rekorPublicKeyPath" already specified`)
+ return InvalidPolicyFormatError(`"rekorPublicKeyPath" already specified`)
}
pr.RekorPublicKeyPath = rekorPublicKeyPath
return nil
}
}
+// PRSigstoreSignedWithRekorPublicKeyPaths specifies a value for the rRekorPublickeyPaths" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithRekorPublicKeyPaths(rekorPublickeyPaths []string) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.RekorPublicKeyPaths != nil {
+ return InvalidPolicyFormatError(`"rekorPublickeyPaths" already specified`)
+ }
+ if len(rekorPublickeyPaths) == 0 {
+ return InvalidPolicyFormatError(`"rekorPublickeyPaths" contains no entries`)
+ }
+ pr.RekorPublicKeyPaths = rekorPublickeyPaths
+ return nil
+ }
+}
+
// PRSigstoreSignedWithRekorPublicKeyData specifies a value for the "rekorPublicKeyData" field when calling NewPRSigstoreSigned.
func PRSigstoreSignedWithRekorPublicKeyData(rekorPublicKeyData []byte) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.RekorPublicKeyData != nil {
- return errors.New(`"rekorPublicKeyData" already specified`)
+ return InvalidPolicyFormatError(`"rekorPublicKeyData" already specified`)
}
pr.RekorPublicKeyData = rekorPublicKeyData
return nil
}
}
+// PRSigstoreSignedWithRekorPublicKeyDatas specifies a value for the "rekorPublickeyDatas" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithRekorPublicKeyDatas(rekorPublickeyDatas [][]byte) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.RekorPublicKeyDatas != nil {
+ return InvalidPolicyFormatError(`"rekorPublickeyDatas" already specified`)
+ }
+ if len(rekorPublickeyDatas) == 0 {
+ return InvalidPolicyFormatError(`"rekorPublickeyDatas" contains no entries`)
+ }
+ pr.RekorPublicKeyDatas = rekorPublickeyDatas
+ return nil
+ }
+}
+
// PRSigstoreSignedWithSignedIdentity specifies a value for the "signedIdentity" field when calling NewPRSigstoreSigned.
func PRSigstoreSignedWithSignedIdentity(signedIdentity PolicyReferenceMatch) PRSigstoreSignedOption {
return func(pr *prSigstoreSigned) error {
if pr.SignedIdentity != nil {
- return errors.New(`"signedIdentity" already specified`)
+ return InvalidPolicyFormatError(`"signedIdentity" already specified`)
}
pr.SignedIdentity = signedIdentity
return nil
@@ -92,21 +147,40 @@ func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned,
if res.KeyPath != "" {
keySources++
}
+ if res.KeyPaths != nil {
+ keySources++
+ }
if res.KeyData != nil {
keySources++
}
+ if res.KeyDatas != nil {
+ keySources++
+ }
if res.Fulcio != nil {
keySources++
}
if keySources != 1 {
- return nil, InvalidPolicyFormatError("exactly one of keyPath, keyData and fulcio must be specified")
+ return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths, keyData, keyDatas and fulcio must be specified")
}
- if res.RekorPublicKeyPath != "" && res.RekorPublicKeyData != nil {
- return nil, InvalidPolicyFormatError("rekorPublickeyType and rekorPublickeyData cannot be used simultaneously")
+ rekorSources := 0
+ if res.RekorPublicKeyPath != "" {
+ rekorSources++
+ }
+ if res.RekorPublicKeyPaths != nil {
+ rekorSources++
+ }
+ if res.RekorPublicKeyData != nil {
+ rekorSources++
+ }
+ if res.RekorPublicKeyDatas != nil {
+ rekorSources++
}
- if res.Fulcio != nil && res.RekorPublicKeyPath == "" && res.RekorPublicKeyData == nil {
- return nil, InvalidPolicyFormatError("At least one of RekorPublickeyPath and RekorPublickeyData must be specified if fulcio is used")
+ if rekorSources > 1 {
+ return nil, InvalidPolicyFormatError("at most one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas can be used simultaneously")
+ }
+ if res.Fulcio != nil && rekorSources == 0 {
+ return nil, InvalidPolicyFormatError("At least one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas must be specified if fulcio is used")
}
if res.SignedIdentity == nil {
@@ -144,7 +218,8 @@ var _ json.Unmarshaler = (*prSigstoreSigned)(nil)
func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
*pr = prSigstoreSigned{}
var tmp prSigstoreSigned
- var gotKeyPath, gotKeyData, gotFulcio, gotRekorPublicKeyPath, gotRekorPublicKeyData bool
+ var gotKeyPath, gotKeyPaths, gotKeyData, gotKeyDatas, gotFulcio bool
+ var gotRekorPublicKeyPath, gotRekorPublicKeyPaths, gotRekorPublicKeyData, gotRekorPublicKeyDatas bool
var fulcio prSigstoreSignedFulcio
var signedIdentity json.RawMessage
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
@@ -154,18 +229,30 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
case "keyPath":
gotKeyPath = true
return &tmp.KeyPath
+ case "keyPaths":
+ gotKeyPaths = true
+ return &tmp.KeyPaths
case "keyData":
gotKeyData = true
return &tmp.KeyData
+ case "keyDatas":
+ gotKeyDatas = true
+ return &tmp.KeyDatas
case "fulcio":
gotFulcio = true
return &fulcio
case "rekorPublicKeyPath":
gotRekorPublicKeyPath = true
return &tmp.RekorPublicKeyPath
+ case "rekorPublicKeyPaths":
+ gotRekorPublicKeyPaths = true
+ return &tmp.RekorPublicKeyPaths
case "rekorPublicKeyData":
gotRekorPublicKeyData = true
return &tmp.RekorPublicKeyData
+ case "rekorPublicKeyDatas":
+ gotRekorPublicKeyDatas = true
+ return &tmp.RekorPublicKeyDatas
case "signedIdentity":
return &signedIdentity
default:
@@ -192,18 +279,30 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
if gotKeyPath {
opts = append(opts, PRSigstoreSignedWithKeyPath(tmp.KeyPath))
}
+ if gotKeyPaths {
+ opts = append(opts, PRSigstoreSignedWithKeyPaths(tmp.KeyPaths))
+ }
if gotKeyData {
opts = append(opts, PRSigstoreSignedWithKeyData(tmp.KeyData))
}
+ if gotKeyDatas {
+ opts = append(opts, PRSigstoreSignedWithKeyDatas(tmp.KeyDatas))
+ }
if gotFulcio {
opts = append(opts, PRSigstoreSignedWithFulcio(&fulcio))
}
if gotRekorPublicKeyPath {
opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPath(tmp.RekorPublicKeyPath))
}
+ if gotRekorPublicKeyPaths {
+ opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPaths(tmp.RekorPublicKeyPaths))
+ }
if gotRekorPublicKeyData {
opts = append(opts, PRSigstoreSignedWithRekorPublicKeyData(tmp.RekorPublicKeyData))
}
+ if gotRekorPublicKeyDatas {
+ opts = append(opts, PRSigstoreSignedWithRekorPublicKeyDatas(tmp.RekorPublicKeyDatas))
+ }
opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity))
res, err := newPRSigstoreSigned(opts...)
@@ -221,7 +320,7 @@ type PRSigstoreSignedFulcioOption func(*prSigstoreSignedFulcio) error
func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOption {
return func(f *prSigstoreSignedFulcio) error {
if f.CAPath != "" {
- return errors.New(`"caPath" already specified`)
+ return InvalidPolicyFormatError(`"caPath" already specified`)
}
f.CAPath = caPath
return nil
@@ -232,7 +331,7 @@ func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOptio
func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOption {
return func(f *prSigstoreSignedFulcio) error {
if f.CAData != nil {
- return errors.New(`"caData" already specified`)
+ return InvalidPolicyFormatError(`"caData" already specified`)
}
f.CAData = caData
return nil
@@ -243,7 +342,7 @@ func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOptio
func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFulcioOption {
return func(f *prSigstoreSignedFulcio) error {
if f.OIDCIssuer != "" {
- return errors.New(`"oidcIssuer" already specified`)
+ return InvalidPolicyFormatError(`"oidcIssuer" already specified`)
}
f.OIDCIssuer = oidcIssuer
return nil
@@ -254,7 +353,7 @@ func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFul
func PRSigstoreSignedFulcioWithSubjectEmail(subjectEmail string) PRSigstoreSignedFulcioOption {
return func(f *prSigstoreSignedFulcio) error {
if f.SubjectEmail != "" {
- return errors.New(`"subjectEmail" already specified`)
+ return InvalidPolicyFormatError(`"subjectEmail" already specified`)
}
f.SubjectEmail = subjectEmail
return nil
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
index 896ca5a60..e5c932918 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
@@ -6,7 +6,6 @@ import (
"context"
"errors"
"fmt"
- "os"
"slices"
"github.com/containers/image/v5/internal/multierr"
@@ -27,33 +26,18 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva
}
// FIXME: move this to per-context initialization
- var data [][]byte
- keySources := 0
- if pr.KeyPath != "" {
- keySources++
- d, err := os.ReadFile(pr.KeyPath)
- if err != nil {
- return sarRejected, nil, err
- }
- data = [][]byte{d}
- }
- if pr.KeyPaths != nil {
- keySources++
- data = [][]byte{}
- for _, path := range pr.KeyPaths {
- d, err := os.ReadFile(path)
- if err != nil {
- return sarRejected, nil, err
- }
- data = append(data, d)
- }
- }
- if pr.KeyData != nil {
- keySources++
- data = [][]byte{pr.KeyData}
+ const notOneSourceErrorText = `Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified`
+ data, err := loadBytesFromConfigSources(configBytesSources{
+ inconsistencyErrorMessage: notOneSourceErrorText,
+ path: pr.KeyPath,
+ paths: pr.KeyPaths,
+ data: pr.KeyData,
+ })
+ if err != nil {
+ return sarRejected, nil, err
}
- if keySources != 1 {
- return sarRejected, nil, errors.New(`Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified`)
+ if data == nil {
+ return sarRejected, nil, errors.New(notOneSourceErrorText)
}
// FIXME: move this to per-context initialization
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
index 485165077..9c553771c 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
@@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"os"
+ "strings"
"github.com/containers/image/v5/internal/multierr"
"github.com/containers/image/v5/internal/private"
@@ -20,37 +21,69 @@ import (
"github.com/sigstore/sigstore/pkg/cryptoutils"
)
-// loadBytesFromDataOrPath ensures there is at most one of ${prefix}Data and ${prefix}Path set,
+// configBytesSources contains configuration fields which may result in one or more []byte values
+type configBytesSources struct {
+ inconsistencyErrorMessage string // Error to return if more than one source is set
+ path string // …Path: a path to a file containing the data, or ""
+ paths []string // …Paths: paths to files containing the data, or nil
+ data []byte // …Data: a single instance ofhe raw data, or nil
+ datas [][]byte // …Datas: the raw data, or nil // codespell:ignore datas
+}
+
+// loadBytesFromConfigSources ensures at most one of the sources in src is set,
// and returns the referenced data, or nil if neither is set.
-func loadBytesFromDataOrPath(prefix string, data []byte, path string) ([]byte, error) {
- switch {
- case data != nil && path != "":
- return nil, fmt.Errorf(`Internal inconsistency: both "%sPath" and "%sData" specified`, prefix, prefix)
- case path != "":
- d, err := os.ReadFile(path)
+func loadBytesFromConfigSources(src configBytesSources) ([][]byte, error) {
+ sources := 0
+ var data [][]byte // = nil
+ if src.path != "" {
+ sources++
+ d, err := os.ReadFile(src.path)
if err != nil {
return nil, err
}
- return d, nil
- case data != nil:
- return data, nil
- default: // Nothing
- return nil, nil
+ data = [][]byte{d}
+ }
+ if src.paths != nil {
+ sources++
+ data = [][]byte{}
+ for _, path := range src.paths {
+ d, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, d)
+ }
}
+ if src.data != nil {
+ sources++
+ data = [][]byte{src.data}
+ }
+ if src.datas != nil { // codespell:ignore datas
+ sources++
+ data = src.datas // codespell:ignore datas
+ }
+ if sources > 1 {
+ return nil, errors.New(src.inconsistencyErrorMessage)
+ }
+ return data, nil
}
// prepareTrustRoot creates a fulcioTrustRoot from the input data.
// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.)
func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) {
- caCertBytes, err := loadBytesFromDataOrPath("fulcioCA", f.CAData, f.CAPath)
+ caCertPEMs, err := loadBytesFromConfigSources(configBytesSources{
+ inconsistencyErrorMessage: `Internal inconsistency: both "caPath" and "caData" specified`,
+ path: f.CAPath,
+ data: f.CAData,
+ })
if err != nil {
return nil, err
}
- if caCertBytes == nil {
- return nil, errors.New(`Internal inconsistency: Fulcio specified with neither "caPath" nor "caData"`)
+ if len(caCertPEMs) != 1 {
+ return nil, errors.New(`Internal inconsistency: Fulcio specified with not exactly one of "caPath" nor "caData"`)
}
certs := x509.NewCertPool()
- if ok := certs.AppendCertsFromPEM(caCertBytes); !ok {
+ if ok := certs.AppendCertsFromPEM(caCertPEMs[0]); !ok {
return nil, errors.New("error loading Fulcio CA certificates")
}
fulcio := fulcioTrustRoot{
@@ -66,24 +99,35 @@ func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) {
// sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy
type sigstoreSignedTrustRoot struct {
- publicKey crypto.PublicKey
- fulcio *fulcioTrustRoot
- rekorPublicKey *ecdsa.PublicKey
+ publicKeys []crypto.PublicKey
+ fulcio *fulcioTrustRoot
+ rekorPublicKeys []*ecdsa.PublicKey
}
func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) {
res := sigstoreSignedTrustRoot{}
- publicKeyPEM, err := loadBytesFromDataOrPath("key", pr.KeyData, pr.KeyPath)
+ publicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{
+ inconsistencyErrorMessage: `Internal inconsistency: more than one of "keyPath", "keyPaths", "keyData", "keyDatas" specified`,
+ path: pr.KeyPath,
+ paths: pr.KeyPaths,
+ data: pr.KeyData,
+ datas: pr.KeyDatas, // codespell:ignore datas
+ })
if err != nil {
return nil, err
}
- if publicKeyPEM != nil {
- pk, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM)
- if err != nil {
- return nil, fmt.Errorf("parsing public key: %w", err)
+ if publicKeyPEMs != nil {
+ for index, keyData := range publicKeyPEMs {
+ pk, err := cryptoutils.UnmarshalPEMToPublicKey(keyData)
+ if err != nil {
+ return nil, fmt.Errorf("parsing public key %d: %w", index+1, err)
+ }
+ res.publicKeys = append(res.publicKeys, pk)
+ }
+ if len(res.publicKeys) == 0 {
+ return nil, errors.New(`Internal inconsistency: "keyPath", "keyPaths", "keyData" and "keyDatas" produced no public keys`)
}
- res.publicKey = pk
}
if pr.Fulcio != nil {
@@ -94,21 +138,32 @@ func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error)
res.fulcio = f
}
- rekorPublicKeyPEM, err := loadBytesFromDataOrPath("rekorPublicKey", pr.RekorPublicKeyData, pr.RekorPublicKeyPath)
+ rekorPublicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{
+ inconsistencyErrorMessage: `Internal inconsistency: both "rekorPublicKeyPath" and "rekorPublicKeyData" specified`,
+ path: pr.RekorPublicKeyPath,
+ paths: pr.RekorPublicKeyPaths,
+ data: pr.RekorPublicKeyData,
+ datas: pr.RekorPublicKeyDatas, // codespell:ignore datas
+ })
if err != nil {
return nil, err
}
- if rekorPublicKeyPEM != nil {
- pk, err := cryptoutils.UnmarshalPEMToPublicKey(rekorPublicKeyPEM)
- if err != nil {
- return nil, fmt.Errorf("parsing Rekor public key: %w", err)
- }
- pkECDSA, ok := pk.(*ecdsa.PublicKey)
- if !ok {
- return nil, fmt.Errorf("Rekor public key is not using ECDSA")
+ if rekorPublicKeyPEMs != nil {
+ for index, pem := range rekorPublicKeyPEMs {
+ pk, err := cryptoutils.UnmarshalPEMToPublicKey(pem)
+ if err != nil {
+ return nil, fmt.Errorf("parsing Rekor public key %d: %w", index+1, err)
+ }
+ pkECDSA, ok := pk.(*ecdsa.PublicKey)
+ if !ok {
+ return nil, fmt.Errorf("Rekor public key %d is not using ECDSA", index+1)
+ }
+ res.rekorPublicKeys = append(res.rekorPublicKeys, pkECDSA)
+ }
+ if len(res.rekorPublicKeys) == 0 {
+ return nil, errors.New(`Internal inconsistency: "rekorPublicKeyPath", "rekorPublicKeyPaths", "rekorPublicKeyData" and "rekorPublicKeyDatas" produced no public keys`)
}
- res.rekorPublicKey = pkECDSA
}
return &res, nil
@@ -134,37 +189,51 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva
}
untrustedPayload := sig.UntrustedPayload()
- var publicKey crypto.PublicKey
+ var publicKeys []crypto.PublicKey
switch {
- case trustRoot.publicKey != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations.
+ case trustRoot.publicKeys != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations.
return sarRejected, errors.New("Internal inconsistency: Both a public key and Fulcio CA specified")
- case trustRoot.publicKey == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations.
+ case trustRoot.publicKeys == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations.
return sarRejected, errors.New("Internal inconsistency: Neither a public key nor a Fulcio CA specified")
- case trustRoot.publicKey != nil:
- if trustRoot.rekorPublicKey != nil {
+ case trustRoot.publicKeys != nil:
+ if trustRoot.rekorPublicKeys != nil {
untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey]
if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should work.
return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey)
}
- // We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies.
- // FIXME: We could just generate DER instead of the full PEM text
- recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(trustRoot.publicKey)
- if err != nil {
- // Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail.
- // (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.)
- return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err)
+ var rekorFailures []string
+ for _, candidatePublicKey := range trustRoot.publicKeys {
+ // We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies.
+ // FIXME: We could just generate DER instead of the full PEM text
+ recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(candidatePublicKey)
+ if err != nil {
+ // Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail.
+ // (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.)
+ return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err)
+ }
+ // We don’t care about the Rekor timestamp, just about log presence.
+ _, err = internal.VerifyRekorSET(trustRoot.rekorPublicKeys, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload)
+ if err == nil {
+ publicKeys = append(publicKeys, candidatePublicKey)
+ break // The SET can only accept one public key entry, so if we found one, the rest either doesn’t match or is a duplicate
+ }
+ rekorFailures = append(rekorFailures, err.Error())
}
- // We don’t care about the Rekor timestamp, just about log presence.
- if _, err := internal.VerifyRekorSET(trustRoot.rekorPublicKey, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload); err != nil {
- return sarRejected, err
+ if len(publicKeys) == 0 {
+ if len(rekorFailures) == 0 {
+ // Coverage: We have ensured that len(trustRoot.publicKeys) != 0, when nothing succeeds, there must be at least one failure.
+ return sarRejected, errors.New(`Internal inconsistency: Rekor SET did not match any key but we have no failures.`)
+ }
+ return sarRejected, internal.NewInvalidSignatureError(fmt.Sprintf("No public key verified against the RekorSET: %s", strings.Join(rekorFailures, ", ")))
}
+ } else {
+ publicKeys = trustRoot.publicKeys
}
- publicKey = trustRoot.publicKey
case trustRoot.fulcio != nil:
- if trustRoot.rekorPublicKey == nil { // newPRSigstoreSigned rejects such combinations.
+ if trustRoot.rekorPublicKeys == nil { // newPRSigstoreSigned rejects such combinations.
return sarRejected, errors.New("Internal inconsistency: Fulcio CA specified without a Rekor public key")
}
untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey]
@@ -179,19 +248,20 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva
if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok {
untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain)
}
- pk, err := verifyRekorFulcio(trustRoot.rekorPublicKey, trustRoot.fulcio,
+ pk, err := verifyRekorFulcio(trustRoot.rekorPublicKeys, trustRoot.fulcio,
[]byte(untrustedSET), []byte(untrustedCert), untrustedIntermediateChainBytes, untrustedBase64Signature, untrustedPayload)
if err != nil {
return sarRejected, err
}
- publicKey = pk
+ publicKeys = []crypto.PublicKey{pk}
}
- if publicKey == nil {
- // Coverage: This should never happen, we have already excluded the possibility in the switch above.
+ if len(publicKeys) == 0 {
+ // Coverage: This should never happen, we ensured that trustRoot.publicKeys is non-empty if set,
+ // and we have already excluded the possibility in the switch above.
return sarRejected, fmt.Errorf("Internal inconsistency: publicKey not set before verifying sigstore payload")
}
- signature, err := internal.VerifySigstorePayload(publicKey, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
+ signature, err := internal.VerifySigstorePayload(publicKeys, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
ValidateSignedDockerReference: func(ref string) error {
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref))
diff --git a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go
index 48dbfbbde..390957b02 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go
@@ -136,7 +136,7 @@ func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (referenc
newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1)
newParsedRef, err := reference.ParseNamed(newNamedRef)
if err != nil {
- return nil, fmt.Errorf(`error rewriting reference from %q to %q: %v`, refString, newNamedRef, err)
+ return nil, fmt.Errorf(`error rewriting reference from %q to %q: %w`, refString, newNamedRef, err)
}
return newParsedRef, nil
}
diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go
index 96e91a0a9..32aa1c0ad 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_types.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_types.go
@@ -74,7 +74,7 @@ type prSignedBy struct {
// KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
KeyPath string `json:"keyPath,omitempty"`
- // KeyPaths if a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
+ // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
KeyPaths []string `json:"keyPaths,omitempty"`
// KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath, KeyPaths and KeyData must be specified.
KeyData []byte `json:"keyData,omitempty"`
@@ -111,24 +111,35 @@ type prSignedBaseLayer struct {
type prSigstoreSigned struct {
prCommon
- // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyData, Fulcio must be specified.
+ // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
KeyPath string `json:"keyPath,omitempty"`
- // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyData, Fulcio must be specified.
+ // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
+ KeyPaths []string `json:"keyPaths,omitempty"`
+ // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
KeyData []byte `json:"keyData,omitempty"`
- // FIXME: Multiple public keys?
+ // KeyDatas is a set of trusted keys, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
+ KeyDatas [][]byte `json:"keyDatas,omitempty"`
- // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyData, Fulcio must be specified.
+ // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
// If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well.
Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"`
// RekorPublicKeyPath is a pathname to local file containing a public key of a Rekor server which must record acceptable signatures.
- // If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional
- // (and Rekor inclusion is not required if a Rekor public key is not specified).
+ // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
+ // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
RekorPublicKeyPath string `json:"rekorPublicKeyPath,omitempty"`
+ // RekorPublicKeyPaths is a set of pathnames to local files, each containing a public key of a Rekor server. One of the keys must record acceptable signatures.
+ // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
+ // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
+ RekorPublicKeyPaths []string `json:"rekorPublicKeyPaths,omitempty"`
// RekorPublicKeyPath contain a base64-encoded public key of a Rekor server which must record acceptable signatures.
- // If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional
- // (and Rekor inclusion is not required if a Rekor public key is not specified).
+ // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
+ // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
RekorPublicKeyData []byte `json:"rekorPublicKeyData,omitempty"`
+ // RekorPublicKeyDatas each contain a base64-encoded public key of a Rekor server. One of the keys must record acceptable signatures.
+ // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
+ // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
+ RekorPublicKeyDatas [][]byte `json:"rekorPublicKeyDatas,omitempty"`
// SignedIdentity specifies what image identity the signature must be claiming about the image.
// Defaults to "matchRepoDigestOrExact" if not specified.
diff --git a/vendor/github.com/containers/image/v5/signature/simple.go b/vendor/github.com/containers/image/v5/signature/simple.go
index 30df997d8..94a846593 100644
--- a/vendor/github.com/containers/image/v5/signature/simple.go
+++ b/vendor/github.com/containers/image/v5/signature/simple.go
@@ -105,13 +105,7 @@ var _ json.Unmarshaler = (*untrustedSignature)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface
func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
- err := s.strictUnmarshalJSON(data)
- if err != nil {
- if formatErr, ok := err.(internal.JSONFormatError); ok {
- err = internal.NewInvalidSignatureError(formatErr.Error())
- }
- }
- return err
+ return internal.JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
}
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal.JSONFormatError error type.
@@ -149,7 +143,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
if gotTimestamp {
intTimestamp := int64(timestamp)
if float64(intTimestamp) != timestamp {
- return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
+ return internal.NewInvalidSignatureError("Field optional.timestamp is not an integer")
}
s.untrustedTimestamp = &intTimestamp
}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go
index 842a3ab06..a7a2865fc 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_dest.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go
@@ -325,7 +325,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
return private.UploadedBlob{}, err
}
- out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ)
+ out, err := s.imageRef.transport.store.PrepareStagedLayer(nil, differ)
if err != nil {
return private.UploadedBlob{}, err
}
@@ -337,7 +337,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
}()
if out.TOCDigest == "" && out.UncompressedDigest == "" {
- return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set")
+ return private.UploadedBlob{}, errors.New("internal error: PrepareStagedLayer succeeded with neither TOCDigest nor UncompressedDigest set")
}
blobDigest := srcInfo.Digest
@@ -356,11 +356,11 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
// responsible for ensuring blobDigest has been validated.
if out.CompressedDigest != blobDigest {
- return private.UploadedBlob{}, fmt.Errorf("internal error: ApplyDiffWithDiffer returned CompressedDigest %q not matching expected %q",
+ return private.UploadedBlob{}, fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
out.CompressedDigest, blobDigest)
}
// So, record also information about blobDigest, that might benefit reuse.
- // We trust ApplyDiffWithDiffer to validate or create both values correctly.
+ // We trust PrepareStagedLayer to validate or create both values correctly.
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
} else {
diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go
index 2a1099f67..acc4cb30e 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_reference.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go
@@ -37,7 +37,7 @@ func newReference(transport storageTransport, named reference.Named, id string)
}
if id != "" {
if err := validateImageID(id); err != nil {
- return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err, ErrInvalidReference)
+ return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err.Error(), ErrInvalidReference)
}
}
// We take a copy of the transport, which contains a pointer to the
diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go
index 4f501fc22..55788f887 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_src.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_src.go
@@ -11,6 +11,7 @@ import (
"fmt"
"io"
"os"
+ "slices"
"sync"
"github.com/containers/image/v5/docker/reference"
@@ -300,7 +301,7 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
}
- physicalBlobInfos := []types.BlobInfo{}
+ physicalBlobInfos := []types.BlobInfo{} // Built reversed
layerID := s.image.TopLayer
for layerID != "" {
layer, err := s.imageRef.transport.store.Layer(layerID)
@@ -340,9 +341,10 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
Size: size,
MediaType: uncompressedLayerType,
}
- physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
+ physicalBlobInfos = append(physicalBlobInfos, blobInfo)
layerID = layer.Parent
}
+ slices.Reverse(physicalBlobInfos)
res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
if err != nil {
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
index 18d4cc2d2..7d4a83bc9 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
@@ -103,7 +103,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
}
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
if _, err := io.Copy(io.Discard, reader); err != nil {
- return nil, fmt.Errorf("error reading %q: %v", filename, err)
+ return nil, fmt.Errorf("error reading %q: %w", filename, err)
}
if uncompressed != nil {
uncompressed.Close()
@@ -152,7 +152,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
// Encode and digest the image configuration blob.
configBytes, err := json.Marshal(&config)
if err != nil {
- return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err)
+ return nil, fmt.Errorf("error generating configuration blob for %q: %w", strings.Join(r.filenames, separator), err)
}
configID := digest.Canonical.FromBytes(configBytes)
blobs[configID] = tarballBlob{
@@ -177,7 +177,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
// Encode the manifest.
manifestBytes, err := json.Marshal(&manifest)
if err != nil {
- return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err)
+ return nil, fmt.Errorf("error generating manifest for %q: %w", strings.Join(r.filenames, separator), err)
}
// Return the image.
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
index 63d835530..b33208a51 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
@@ -38,13 +38,13 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc
if filename == "-" {
stdin, err = io.ReadAll(os.Stdin)
if err != nil {
- return nil, fmt.Errorf("error buffering stdin: %v", err)
+ return nil, fmt.Errorf("error buffering stdin: %w", err)
}
continue
}
f, err := os.Open(filename)
if err != nil {
- return nil, fmt.Errorf("error opening %q: %v", filename, err)
+ return nil, fmt.Errorf("error opening %q: %w", filename, err)
}
f.Close()
}
diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml
index 887147040..1c93587dc 100644
--- a/vendor/github.com/containers/storage/.cirrus.yml
+++ b/vendor/github.com/containers/storage/.cirrus.yml
@@ -171,7 +171,7 @@ vendor_task:
cross_task:
alias: cross
container:
- image: golang:1.21
+ image: golang:1.22
build_script: make cross
diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile
index 6f20e059d..a619694fd 100644
--- a/vendor/github.com/containers/storage/Makefile
+++ b/vendor/github.com/containers/storage/Makefile
@@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de
# N/B: This value is managed by Renovate, manual changes are
# possible, as long as they don't disturb the formatting
# (i.e. DO NOT ADD A 'v' prefix!)
-GOLANGCI_LINT_VERSION := 1.60.3
+GOLANGCI_LINT_VERSION := 1.61.0
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs
diff --git a/vendor/github.com/containers/storage/check.go b/vendor/github.com/containers/storage/check.go
index 7176ba361..396648e7f 100644
--- a/vendor/github.com/containers/storage/check.go
+++ b/vendor/github.com/containers/storage/check.go
@@ -8,6 +8,7 @@ import (
"os"
"path"
"path/filepath"
+ "slices"
"sort"
"strings"
"sync"
@@ -769,12 +770,9 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error {
return d
}
isUnaccounted := func(errs []error) bool {
- for _, err := range errs {
- if errors.Is(err, ErrLayerUnaccounted) {
- return true
- }
- }
- return false
+ return slices.ContainsFunc(errs, func(err error) bool {
+ return errors.Is(err, ErrLayerUnaccounted)
+ })
}
sort.Slice(layersToDelete, func(i, j int) bool {
// we've not heard of either of them, so remove them in the order the driver suggested
@@ -1005,12 +1003,12 @@ func (c *checkDirectory) remove(path string) {
func (c *checkDirectory) header(hdr *tar.Header) {
name := path.Clean(hdr.Name)
dir, base := path.Split(name)
- if strings.HasPrefix(base, archive.WhiteoutPrefix) {
+ if file, ok := strings.CutPrefix(base, archive.WhiteoutPrefix); ok {
if base == archive.WhiteoutOpaqueDir {
c.remove(path.Clean(dir))
c.add(path.Clean(dir), tar.TypeDir, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode), hdr.ModTime.Unix())
} else {
- c.remove(path.Join(dir, base[len(archive.WhiteoutPrefix):]))
+ c.remove(path.Join(dir, file))
}
} else {
if hdr.Typeflag == tar.TypeLink {
@@ -1044,7 +1042,7 @@ func (c *checkDirectory) header(hdr *tar.Header) {
// headers updates a checkDirectory using information from the passed-in header slice
func (c *checkDirectory) headers(hdrs []*tar.Header) {
- hdrs = append([]*tar.Header{}, hdrs...)
+ hdrs = slices.Clone(hdrs)
// sort the headers from the diff to ensure that whiteouts appear
// before content when they both appear in the same directory, per
// https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index a7dfb405b..c669ce7b0 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -3,8 +3,10 @@ package storage
import (
"errors"
"fmt"
+ "maps"
"os"
"path/filepath"
+ "slices"
"sync"
"time"
@@ -162,17 +164,17 @@ type containerStore struct {
func copyContainer(c *Container) *Container {
return &Container{
ID: c.ID,
- Names: copyStringSlice(c.Names),
+ Names: slices.Clone(c.Names),
ImageID: c.ImageID,
LayerID: c.LayerID,
Metadata: c.Metadata,
- BigDataNames: copyStringSlice(c.BigDataNames),
- BigDataSizes: copyStringInt64Map(c.BigDataSizes),
- BigDataDigests: copyStringDigestMap(c.BigDataDigests),
+ BigDataNames: slices.Clone(c.BigDataNames),
+ BigDataSizes: maps.Clone(c.BigDataSizes),
+ BigDataDigests: maps.Clone(c.BigDataDigests),
Created: c.Created,
UIDMap: copyIDMap(c.UIDMap),
GIDMap: copyIDMap(c.GIDMap),
- Flags: copyStringInterfaceMap(c.Flags),
+ Flags: maps.Clone(c.Flags),
volatileStore: c.volatileStore,
}
}
@@ -696,7 +698,7 @@ func (r *containerStore) create(id string, names []string, image, layer string,
volatileStore: options.Volatile,
}
if options.MountOpts != nil {
- container.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...)
+ container.Flags[mountOptsFlag] = slices.Clone(options.MountOpts)
}
if options.Volatile {
container.Flags[volatileFlag] = true
@@ -788,13 +790,6 @@ func (r *containerStore) Delete(id string) error {
return ErrContainerUnknown
}
id = container.ID
- toDeleteIndex := -1
- for i, candidate := range r.containers {
- if candidate.ID == id {
- toDeleteIndex = i
- break
- }
- }
delete(r.byid, id)
// This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
// The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
@@ -803,14 +798,9 @@ func (r *containerStore) Delete(id string) error {
for _, name := range container.Names {
delete(r.byname, name)
}
- if toDeleteIndex != -1 {
- // delete the container at toDeleteIndex
- if toDeleteIndex == len(r.containers)-1 {
- r.containers = r.containers[:len(r.containers)-1]
- } else {
- r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...)
- }
- }
+ r.containers = slices.DeleteFunc(r.containers, func(candidate *Container) bool {
+ return candidate.ID == id
+ })
if err := r.saveFor(container); err != nil {
return err
}
@@ -948,14 +938,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
}
- addName := true
- for _, name := range c.BigDataNames {
- if name == key {
- addName = false
- break
- }
- }
- if addName {
+ if !slices.Contains(c.BigDataNames, key) {
c.BigDataNames = append(c.BigDataNames, key)
save = true
}
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index b62234e57..91b240c45 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -254,8 +254,8 @@ type Differ interface {
type DriverWithDiffer interface {
Driver
// ApplyDiffWithDiffer applies the changes using the callback function.
- // If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory.
- ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
+ // The staging directory created by this function is guaranteed to be usable with ApplyDiffFromStagingDirectory.
+ ApplyDiffWithDiffer(options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
// ApplyDiffFromStagingDirectory applies the changes using the diffOutput target directory.
ApplyDiffFromStagingDirectory(id, parent string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go
index 8829e55e9..82c1a460b 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/mount.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go
@@ -103,20 +103,20 @@ func mountOverlayFromMain() {
// paths, but we don't want to mess with other options.
var upperk, upperv, workk, workv, lowerk, lowerv, labelk, labelv, others string
for _, arg := range strings.Split(options.Label, ",") {
- kv := strings.SplitN(arg, "=", 2)
- switch kv[0] {
+ key, val, _ := strings.Cut(arg, "=")
+ switch key {
case "upperdir":
upperk = "upperdir="
- upperv = kv[1]
+ upperv = val
case "workdir":
workk = "workdir="
- workv = kv[1]
+ workv = val
case "lowerdir":
lowerk = "lowerdir="
- lowerv = kv[1]
+ lowerv = val
case "label":
labelk = "label="
- labelv = kv[1]
+ labelv = val
default:
if others == "" {
others = arg
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 63777fe47..ee3703aff 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -14,6 +14,7 @@ import (
"os/exec"
"path"
"path/filepath"
+ "slices"
"strconv"
"strings"
"sync"
@@ -158,30 +159,7 @@ func init() {
}
func hasMetacopyOption(opts []string) bool {
- for _, s := range opts {
- if s == "metacopy=on" {
- return true
- }
- }
- return false
-}
-
-func stripOption(opts []string, option string) []string {
- for i, s := range opts {
- if s == option {
- return stripOption(append(opts[:i], opts[i+1:]...), option)
- }
- }
- return opts
-}
-
-func hasVolatileOption(opts []string) bool {
- for _, s := range opts {
- if s == "volatile" {
- return true
- }
- }
- return false
+ return slices.Contains(opts, "metacopy=on")
}
func getMountProgramFlagFile(path string) string {
@@ -1526,14 +1504,13 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
logrus.Debugf("Ignoring global metacopy option, the mount program doesn't support it")
}
}
- optsList = stripOption(optsList, "metacopy=on")
+ optsList = slices.DeleteFunc(optsList, func(opt string) bool {
+ return opt == "metacopy=on"
+ })
}
- for _, o := range optsList {
- if o == "ro" {
- readWrite = false
- break
- }
+ if slices.Contains(optsList, "ro") {
+ readWrite = false
}
lowers, err := os.ReadFile(path.Join(dir, lowerFile))
@@ -1732,7 +1709,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
optsList = append(optsList, "userxattr")
}
- if options.Volatile && !hasVolatileOption(optsList) {
+ if options.Volatile && !slices.Contains(optsList, "volatile") {
supported, err := d.getSupportsVolatile()
if err != nil {
return "", err
@@ -1896,7 +1873,9 @@ func (d *Driver) getMergedDir(id, dir string, inAdditionalStore bool) string {
// and since the rundir cannot be shared for different stores, it is safe to assume the
// current process has exclusive access to it.
//
- // LOCKING BUG? the .DiffSize operation does not currently hold an exclusive lock on the primary store.
+ // TO DO: LOCKING BUG: the .DiffSize operation does not currently hold an exclusive lock on the primary store.
+ // (_Some_ of the callers might be better ported to use a metadata-only size computation instead of DiffSize,
+ // but DiffSize probably needs to remain for computing sizes of container’s RW layers.)
if inAdditionalStore {
return path.Join(d.runhome, id, "merged")
}
@@ -2187,7 +2166,7 @@ func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
}
// ApplyDiffWithDiffer applies the changes in the new layer using the specified function
-func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) {
+func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) {
var idMappings *idtools.IDMappings
var forceMask *os.FileMode
@@ -2205,44 +2184,36 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
var applyDir string
- if id == "" {
- stagingDir := d.getStagingDir(id)
- err := os.MkdirAll(stagingDir, 0o700)
- if err != nil && !os.IsExist(err) {
- return graphdriver.DriverWithDifferOutput{}, err
- }
- layerDir, err := os.MkdirTemp(stagingDir, "")
- if err != nil {
- return graphdriver.DriverWithDifferOutput{}, err
- }
- perms := defaultPerms
- if forceMask != nil {
- perms = *forceMask
- }
- applyDir = filepath.Join(layerDir, "dir")
- if err := os.Mkdir(applyDir, perms); err != nil {
- return graphdriver.DriverWithDifferOutput{}, err
- }
+ stagingDir := d.getStagingDir("")
+ err := os.MkdirAll(stagingDir, 0o700)
+ if err != nil && !os.IsExist(err) {
+ return graphdriver.DriverWithDifferOutput{}, err
+ }
+ layerDir, err := os.MkdirTemp(stagingDir, "")
+ if err != nil {
+ return graphdriver.DriverWithDifferOutput{}, err
+ }
+ perms := defaultPerms
+ if forceMask != nil {
+ perms = *forceMask
+ }
+ applyDir = filepath.Join(layerDir, "dir")
+ if err := os.Mkdir(applyDir, perms); err != nil {
+ return graphdriver.DriverWithDifferOutput{}, err
+ }
- lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile))
- if err != nil {
- return graphdriver.DriverWithDifferOutput{}, err
- }
- defer func() {
- if errRet != nil {
- delete(d.stagingDirsLocks, layerDir)
- lock.Unlock()
- }
- }()
- d.stagingDirsLocks[layerDir] = lock
- lock.Lock()
- } else {
- var err error
- applyDir, err = d.getDiffPath(id)
- if err != nil {
- return graphdriver.DriverWithDifferOutput{}, err
- }
+ lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile))
+ if err != nil {
+ return graphdriver.DriverWithDifferOutput{}, err
}
+ defer func() {
+ if errRet != nil {
+ delete(d.stagingDirsLocks, layerDir)
+ lock.Unlock()
+ }
+ }()
+ d.stagingDirsLocks[layerDir] = lock
+ lock.Lock()
logrus.Debugf("Applying differ in %s", applyDir)
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index 18f90fdc5..d38e74534 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -764,8 +764,8 @@ func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64,
buf := bufio.NewWriter(nil)
for err == nil {
base := path.Base(hdr.Name)
- if strings.HasPrefix(base, archive.WhiteoutPrefix) {
- name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):])
+ if rm, ok := strings.CutPrefix(base, archive.WhiteoutPrefix); ok {
+ name := path.Join(path.Dir(hdr.Name), rm)
err = w.Remove(filepath.FromSlash(name))
if err != nil {
return 0, err
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
index d71eab08b..8593c03c8 100644
--- a/vendor/github.com/containers/storage/images.go
+++ b/vendor/github.com/containers/storage/images.go
@@ -2,8 +2,10 @@ package storage
import (
"fmt"
+ "maps"
"os"
"path/filepath"
+ "slices"
"strings"
"sync"
"time"
@@ -181,18 +183,18 @@ func copyImage(i *Image) *Image {
return &Image{
ID: i.ID,
Digest: i.Digest,
- Digests: copyDigestSlice(i.Digests),
- Names: copyStringSlice(i.Names),
- NamesHistory: copyStringSlice(i.NamesHistory),
+ Digests: slices.Clone(i.Digests),
+ Names: slices.Clone(i.Names),
+ NamesHistory: slices.Clone(i.NamesHistory),
TopLayer: i.TopLayer,
- MappedTopLayers: copyStringSlice(i.MappedTopLayers),
+ MappedTopLayers: slices.Clone(i.MappedTopLayers),
Metadata: i.Metadata,
- BigDataNames: copyStringSlice(i.BigDataNames),
- BigDataSizes: copyStringInt64Map(i.BigDataSizes),
- BigDataDigests: copyStringDigestMap(i.BigDataDigests),
+ BigDataNames: slices.Clone(i.BigDataNames),
+ BigDataSizes: maps.Clone(i.BigDataSizes),
+ BigDataDigests: maps.Clone(i.BigDataDigests),
Created: i.Created,
ReadOnly: i.ReadOnly,
- Flags: copyStringInterfaceMap(i.Flags),
+ Flags: maps.Clone(i.Flags),
}
}
@@ -863,12 +865,6 @@ func (r *imageStore) Delete(id string) error {
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
id = image.ID
- toDeleteIndex := -1
- for i, candidate := range r.images {
- if candidate.ID == id {
- toDeleteIndex = i
- }
- }
delete(r.byid, id)
// This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
// The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
@@ -877,21 +873,18 @@ func (r *imageStore) Delete(id string) error {
delete(r.byname, name)
}
for _, digest := range image.Digests {
- prunedList := imageSliceWithoutValue(r.bydigest[digest], image)
+ prunedList := slices.DeleteFunc(r.bydigest[digest], func(i *Image) bool {
+ return i == image
+ })
if len(prunedList) == 0 {
delete(r.bydigest, digest)
} else {
r.bydigest[digest] = prunedList
}
}
- if toDeleteIndex != -1 {
- // delete the image at toDeleteIndex
- if toDeleteIndex == len(r.images)-1 {
- r.images = r.images[:len(r.images)-1]
- } else {
- r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
- }
- }
+ r.images = slices.DeleteFunc(r.images, func(candidate *Image) bool {
+ return candidate.ID == id
+ })
if err := r.Save(); err != nil {
return err
}
@@ -977,17 +970,6 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) {
return copyStringSlice(image.BigDataNames), nil
}
-func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
- modified := make([]*Image, 0, len(slice))
- for _, v := range slice {
- if v == value {
- continue
- }
- modified = append(modified, v)
- }
- return modified
-}
-
// Requires startWriting.
func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
if !r.lockfile.IsReadWrite() {
@@ -1037,21 +1019,16 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
}
- addName := true
- for _, name := range image.BigDataNames {
- if name == key {
- addName = false
- break
- }
- }
- if addName {
+ if !slices.Contains(image.BigDataNames, key) {
image.BigDataNames = append(image.BigDataNames, key)
save = true
}
for _, oldDigest := range image.Digests {
// remove the image from the list of images in the digest-based index
if list, ok := r.bydigest[oldDigest]; ok {
- prunedList := imageSliceWithoutValue(list, image)
+ prunedList := slices.DeleteFunc(list, func(i *Image) bool {
+ return i == image
+ })
if len(prunedList) == 0 {
delete(r.bydigest, oldDigest)
} else {
@@ -1066,9 +1043,7 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest
// add the image to the list of images in the digest-based index which
// corresponds to the new digest for this item, unless it's already there
list := r.bydigest[newDigest]
- if len(list) == len(imageSliceWithoutValue(list, image)) {
- // the list isn't shortened by trying to prune this image from it,
- // so it's not in there yet
+ if !slices.Contains(list, image) {
r.bydigest[newDigest] = append(list, image)
}
}
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index 8ae969894..c65be5f44 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -5,10 +5,12 @@ import (
"errors"
"fmt"
"io"
+ "maps"
"os"
"path"
"path/filepath"
"reflect"
+ "slices"
"sort"
"strings"
"sync"
@@ -312,9 +314,8 @@ type rwLayerStore interface {
// applies its changes to a specified layer.
ApplyDiff(to string, diff io.Reader) (int64, error)
- // ApplyDiffWithDiffer applies the changes through the differ callback function.
- // If to is the empty string, then a staging directory is created by the driver.
- ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
+ // applyDiffWithDifferNoLock applies the changes through the differ callback function.
+ applyDiffWithDifferNoLock(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagingDirectory(stagingDirectory string) error
@@ -435,7 +436,7 @@ func layerLocation(l *Layer) layerLocations {
func copyLayer(l *Layer) *Layer {
return &Layer{
ID: l.ID,
- Names: copyStringSlice(l.Names),
+ Names: slices.Clone(l.Names),
Parent: l.Parent,
Metadata: l.Metadata,
MountLabel: l.MountLabel,
@@ -450,8 +451,8 @@ func copyLayer(l *Layer) *Layer {
CompressionType: l.CompressionType,
ReadOnly: l.ReadOnly,
volatileStore: l.volatileStore,
- BigDataNames: copyStringSlice(l.BigDataNames),
- Flags: copyStringInterfaceMap(l.Flags),
+ BigDataNames: slices.Clone(l.BigDataNames),
+ Flags: maps.Clone(l.Flags),
UIDMap: copyIDMap(l.UIDMap),
GIDMap: copyIDMap(l.GIDMap),
UIDs: copyUint32Slice(l.UIDs),
@@ -1372,7 +1373,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize
templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
templateCompressionType = templateLayer.CompressionType
- templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...)
+ templateUIDs, templateGIDs = slices.Clone(templateLayer.UIDs), slices.Clone(templateLayer.GIDs)
templateTSdata, err = os.ReadFile(r.tspath(templateLayer.ID))
if err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, -1, err
@@ -1564,19 +1565,9 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
// - r.layers[].MountPoint (directly and via loadMounts / saveMounts)
// - r.bymount (via loadMounts / saveMounts)
- // check whether options include ro option
- hasReadOnlyOpt := func(opts []string) bool {
- for _, item := range opts {
- if item == "ro" {
- return true
- }
- }
- return false
- }
-
// You are not allowed to mount layers from readonly stores if they
// are not mounted read/only.
- if !r.lockfile.IsReadWrite() && !hasReadOnlyOpt(options.Options) {
+ if !r.lockfile.IsReadWrite() && !slices.Contains(options.Options, "ro") {
return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.Lock()
@@ -1836,14 +1827,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error
return fmt.Errorf("closing bigdata file for the layer: %w", err)
}
- addName := true
- for _, name := range layer.BigDataNames {
- if name == key {
- addName = false
- break
- }
- }
- if addName {
+ if !slices.Contains(layer.BigDataNames, key) {
layer.BigDataNames = append(layer.BigDataNames, key)
return r.saveFor(layer)
}
@@ -1938,32 +1922,13 @@ func (r *layerStore) deleteInternal(id string) error {
delete(r.bymount, layer.MountPoint)
}
r.deleteInDigestMap(id)
- toDeleteIndex := -1
- for i, candidate := range r.layers {
- if candidate.ID == id {
- toDeleteIndex = i
- break
- }
- }
- if toDeleteIndex != -1 {
- // delete the layer at toDeleteIndex
- if toDeleteIndex == len(r.layers)-1 {
- r.layers = r.layers[:len(r.layers)-1]
- } else {
- r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...)
- }
- }
- if mountLabel != "" {
- var found bool
- for _, candidate := range r.layers {
- if candidate.MountLabel == mountLabel {
- found = true
- break
- }
- }
- if !found {
- selinux.ReleaseLabel(mountLabel)
- }
+ r.layers = slices.DeleteFunc(r.layers, func(candidate *Layer) bool {
+ return candidate.ID == id
+ })
+ if mountLabel != "" && !slices.ContainsFunc(r.layers, func(candidate *Layer) bool {
+ return candidate.MountLabel == mountLabel
+ }) {
+ selinux.ReleaseLabel(mountLabel)
}
return nil
}
@@ -1971,21 +1936,15 @@ func (r *layerStore) deleteInternal(id string) error {
// Requires startWriting.
func (r *layerStore) deleteInDigestMap(id string) {
for digest, layers := range r.bycompressedsum {
- for i, layerID := range layers {
- if layerID == id {
- layers = append(layers[:i], layers[i+1:]...)
- r.bycompressedsum[digest] = layers
- break
- }
+ if i := slices.Index(layers, id); i != -1 {
+ layers = slices.Delete(layers, i, i+1)
+ r.bycompressedsum[digest] = layers
}
}
for digest, layers := range r.byuncompressedsum {
- for i, layerID := range layers {
- if layerID == id {
- layers = append(layers[:i], layers[i+1:]...)
- r.byuncompressedsum[digest] = layers
- break
- }
+ if i := slices.Index(layers, id); i != -1 {
+ layers = slices.Delete(layers, i, i+1)
+ r.byuncompressedsum[digest] = layers
}
}
}
@@ -2545,9 +2504,7 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
}
- for k, v := range options.Flags {
- layer.Flags[k] = v
- }
+ maps.Copy(layer.Flags, options.Flags)
}
if err = r.saveFor(layer); err != nil {
return err
@@ -2585,37 +2542,14 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
return err
}
-// Requires startWriting.
-func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
+// It must be called without any c/storage locks held to allow differ to make c/storage calls.
+func (r *layerStore) applyDiffWithDifferNoLock(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
if !ok {
return nil, ErrNotSupported
}
- if to == "" {
- output, err := ddriver.ApplyDiffWithDiffer("", "", options, differ)
- return &output, err
- }
-
- layer, ok := r.lookup(to)
- if !ok {
- return nil, ErrLayerUnknown
- }
- if options == nil {
- options = &drivers.ApplyDiffWithDifferOpts{
- ApplyDiffOpts: drivers.ApplyDiffOpts{
- Mappings: r.layerMappings(layer),
- MountLabel: layer.MountLabel,
- },
- }
- }
- output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ)
- if err != nil {
- return nil, err
- }
- layer.UIDs = output.UIDs
- layer.GIDs = output.GIDs
- err = r.saveFor(layer)
+ output, err := ddriver.ApplyDiffWithDiffer(options, differ)
return &output, err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
index eae60a305..b9d718b60 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
@@ -124,8 +124,7 @@ func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path str
}
// if a file was deleted and we are using overlay, we need to create a character device
- if strings.HasPrefix(base, WhiteoutPrefix) {
- originalBase := base[len(WhiteoutPrefix):]
+ if originalBase, ok := strings.CutPrefix(base, WhiteoutPrefix); ok {
originalPath := filepath.Join(dir, originalBase)
if err := handler.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go
index 448784549..3075c27bb 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes.go
@@ -5,6 +5,7 @@ import (
"bytes"
"fmt"
"io"
+ "maps"
"os"
"path/filepath"
"reflect"
@@ -97,8 +98,7 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
- if strings.HasPrefix(f, WhiteoutPrefix) {
- originalFile := f[len(WhiteoutPrefix):]
+ if originalFile, ok := strings.CutPrefix(f, WhiteoutPrefix); ok {
return filepath.Join(filepath.Dir(path), originalFile), nil
}
@@ -319,9 +319,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
// otherwise any previous delete/change is considered recursive
oldChildren := make(map[string]*FileInfo)
if oldInfo != nil && info.isDir() {
- for k, v := range oldInfo.children {
- oldChildren[k] = v
- }
+ maps.Copy(oldChildren, oldInfo.children)
}
for name, newChild := range info.children {
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go
index ca272e68f..a23bdf84b 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go
@@ -31,7 +31,7 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool
}()
// block until both routines have returned
- for i := 0; i < 2; i++ {
+ for range 2 {
if err := <-errs; err != nil {
return nil, nil, err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go
index 92b8d05ed..5b8dc84e2 100644
--- a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go
+++ b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go
@@ -80,9 +80,9 @@ func parseFileFlags(fflags string) (uint32, uint32, error) {
var set, clear uint32 = 0, 0
for _, fflag := range strings.Split(fflags, ",") {
isClear := false
- if strings.HasPrefix(fflag, "no") {
+ if clean, ok := strings.CutPrefix(fflag, "no"); ok {
isClear = true
- fflag = strings.TrimPrefix(fflag, "no")
+ fflag = clean
}
if value, ok := flagNameToValue[fflag]; ok {
if isClear {
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
index 5b8acdaba..3ca99a2c2 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
@@ -8,7 +8,7 @@ import (
"path/filepath"
"github.com/containers/storage/pkg/mount"
- "github.com/syndtr/gocapability/capability"
+ "github.com/moby/sys/capability"
"golang.org/x/sys/unix"
)
diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
index 1d823c8d4..a7dc18be4 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
@@ -65,11 +65,10 @@ type layer struct {
}
type layersCache struct {
- layers []*layer
- refs int
- store storage.Store
- mutex sync.RWMutex
- created time.Time
+ layers []*layer
+ refs int
+ store storage.Store
+ mutex sync.RWMutex
}
var (
@@ -83,6 +82,7 @@ func (c *layer) release() {
if err := unix.Munmap(c.mmapBuffer); err != nil {
logrus.Warnf("Error Munmap: layer %q: %v", c.id, err)
}
+ c.mmapBuffer = nil
}
}
@@ -107,14 +107,13 @@ func (c *layersCache) release() {
func getLayersCacheRef(store storage.Store) *layersCache {
cacheMutex.Lock()
defer cacheMutex.Unlock()
- if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 {
+ if cache != nil && cache.store == store {
cache.refs++
return cache
}
- cache := &layersCache{
- store: store,
- refs: 1,
- created: time.Now(),
+ cache = &layersCache{
+ store: store,
+ refs: 1,
}
return cache
}
@@ -291,7 +290,7 @@ func (c *layersCache) load() error {
if r.ReadOnly {
// If the layer is coming from a read-only store, do not attempt
// to write to it.
- // Therefore,we won’t find any matches in read-only-store layers,
+ // Therefore, we won’t find any matches in read-only-store layers,
// unless the read-only store layer comes prepopulated with cacheKey data.
continue
}
@@ -781,14 +780,14 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64,
return "", "", -1, nil
}
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
binaryDigest, err := makeBinaryDigest(digest)
if err != nil {
return "", "", 0, err
}
- c.mutex.RLock()
- defer c.mutex.RUnlock()
-
for _, layer := range c.layers {
if !layer.cacheFile.bloomFilter.maybeContains(binaryDigest) {
continue
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index 403d7d5aa..60cada2cc 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -1331,7 +1331,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
wg.Wait()
}()
- for i := 0; i < copyGoRoutines; i++ {
+ for range copyGoRoutines {
wg.Add(1)
jobs := copyFileJobs
diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go
index dd6c02a77..bed040e0c 100644
--- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go
+++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go
@@ -4,8 +4,8 @@
package directory
import (
+ "errors"
"io/fs"
- "os"
"path/filepath"
"syscall"
)
@@ -27,7 +27,7 @@ func Usage(dir string) (usage *DiskUsage, err error) {
if err != nil {
// if dir does not exist, Usage() returns the error.
// if dir/x disappeared while walking, Usage() ignores dir/x.
- if os.IsNotExist(err) && d != dir {
+ if errors.Is(err, fs.ErrNotExist) && d != dir {
return nil
}
return err
@@ -35,6 +35,9 @@ func Usage(dir string) (usage *DiskUsage, err error) {
fileInfo, err := entry.Info()
if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ return nil
+ }
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go
index 482bc51a2..3c92b9567 100644
--- a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go
+++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go
@@ -4,8 +4,8 @@
package directory
import (
+ "errors"
"io/fs"
- "os"
"path/filepath"
)
@@ -25,7 +25,7 @@ func Usage(dir string) (usage *DiskUsage, err error) {
if err != nil {
// if dir does not exist, Size() returns the error.
// if dir/x disappeared while walking, Size() ignores dir/x.
- if os.IsNotExist(err) && path != dir {
+ if errors.Is(err, fs.ErrNotExist) && path != dir {
return nil
}
return err
@@ -40,6 +40,9 @@ func Usage(dir string) (usage *DiskUsage, err error) {
fileInfo, err := d.Info()
if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ return nil
+ }
return err
}
usage.Size += fileInfo.Size()
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go
index 5de3a671d..40a229932 100644
--- a/vendor/github.com/containers/storage/pkg/mount/flags.go
+++ b/vendor/github.com/containers/storage/pkg/mount/flags.go
@@ -97,14 +97,14 @@ func MergeTmpfsOptions(options []string) ([]string, error) {
}
continue
}
- opt := strings.SplitN(option, "=", 2)
- if len(opt) != 2 || !validFlags[opt[0]] {
+ opt, _, ok := strings.Cut(option, "=")
+ if !ok || !validFlags[opt] {
return nil, fmt.Errorf("invalid tmpfs option %q", opt)
}
- if !dataCollisions[opt[0]] {
+ if !dataCollisions[opt] {
// We prepend the option and add to collision map
newOptions = append([]string{option}, newOptions...)
- dataCollisions[opt[0]] = true
+ dataCollisions[opt] = true
}
}
@@ -140,8 +140,8 @@ func ParseOptions(options string) (int, string) {
func ParseTmpfsOptions(options string) (int, string, error) {
flags, data := ParseOptions(options)
for _, o := range strings.Split(data, ",") {
- opt := strings.SplitN(o, "=", 2)
- if !validFlags[opt[0]] {
+ opt, _, _ := strings.Cut(o, "=")
+ if !validFlags[opt] {
return 0, "", fmt.Errorf("invalid tmpfs option %q", opt)
}
}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
index c70b0bf99..afd321041 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
@@ -40,13 +40,9 @@ func mount(device, target, mType string, flag uintptr, data string) error {
isNullFS = true
continue
}
- opt := strings.SplitN(x, "=", 2)
- options = append(options, opt[0])
- if len(opt) == 2 {
- options = append(options, opt[1])
- } else {
- options = append(options, "")
- }
+ name, val, _ := strings.Cut(x, "=")
+ options = append(options, name)
+ options = append(options, val)
}
}
diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go
index a2a1d4072..a29e92090 100644
--- a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go
+++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go
@@ -11,7 +11,7 @@ import (
func unmount(target string, flags int) error {
var err error
- for i := 0; i < 50; i++ {
+ for range 50 {
err = unix.Unmount(target, flags)
switch err {
case unix.EBUSY:
diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go
index 3fb0c36b8..7b20b0628 100644
--- a/vendor/github.com/containers/storage/pkg/parsers/parsers.go
+++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go
@@ -11,11 +11,11 @@ import (
// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value)
func ParseKeyValueOpt(opt string) (string, string, error) {
- parts := strings.SplitN(opt, "=", 2)
- if len(parts) != 2 {
+ k, v, ok := strings.Cut(opt, "=")
+ if !ok {
return "", "", fmt.Errorf("unable to parse key/value option: %s", opt)
}
- return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
+ return strings.TrimSpace(k), strings.TrimSpace(v), nil
}
// ParseUintList parses and validates the specified string as the value
@@ -42,19 +42,19 @@ func ParseUintList(val string) (map[int]bool, error) {
errInvalidFormat := fmt.Errorf("invalid format: %s", val)
for _, r := range split {
- if !strings.Contains(r, "-") {
+ minS, maxS, ok := strings.Cut(r, "-")
+ if !ok {
v, err := strconv.Atoi(r)
if err != nil {
return nil, errInvalidFormat
}
availableInts[v] = true
} else {
- split := strings.SplitN(r, "-", 2)
- min, err := strconv.Atoi(split[0])
+ min, err := strconv.Atoi(minS)
if err != nil {
return nil, errInvalidFormat
}
- max, err := strconv.Atoi(split[1])
+ max, err := strconv.Atoi(maxS)
if err != nil {
return nil, errInvalidFormat
}
diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
index 66a59c85d..f63c3e444 100644
--- a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
+++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
@@ -3,7 +3,7 @@ package stringutils
import (
"bytes"
- "math/rand"
+ "math/rand/v2"
"strings"
)
@@ -13,7 +13,7 @@ func GenerateRandomAlphaOnlyString(n int) string {
letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]byte, n)
for i := range b {
- b[i] = letters[rand.Intn(len(letters))]
+ b[i] = letters[rand.IntN(len(letters))]
}
return string(b)
}
@@ -25,7 +25,7 @@ func GenerateRandomASCIIString(n int) string {
"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
res := make([]byte, n)
for i := 0; i < n; i++ {
- res[i] = chars[rand.Intn(len(chars))]
+ res[i] = chars[rand.IntN(len(chars))]
}
return string(res)
}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
index 32e8d7dca..98b810e9d 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
@@ -21,9 +21,9 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/reexec"
+ "github.com/moby/sys/capability"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
- "github.com/syndtr/gocapability/capability"
)
// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index bd4da7a46..692bf3531 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -6,9 +6,11 @@ import (
"errors"
"fmt"
"io"
+ "maps"
"os"
"path/filepath"
"reflect"
+ "slices"
"strings"
"sync"
"syscall"
@@ -339,11 +341,17 @@ type Store interface {
// }
ApplyDiff(to string, diff io.Reader) (int64, error)
- // ApplyDiffer applies a diff to a layer.
+ // ApplyDiffWithDiffer applies a diff to a layer.
// It is the caller responsibility to clean the staging directory if it is not
- // successfully applied with ApplyDiffFromStagingDirectory.
+ // successfully applied with ApplyStagedLayer.
+ // Deprecated: Use PrepareStagedLayer instead. ApplyDiffWithDiffer is going to be removed in a future release
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
+ // PrepareStagedLayer applies a diff to a layer.
+ // It is the caller responsibility to clean the staging directory if it is not
+ // successfully applied with ApplyStagedLayer.
+ PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
+
// ApplyStagedLayer combines the functions of creating a layer and using the staging
// directory to populate it.
// It marks the layer for automatic removal if applying the diff fails for any reason.
@@ -939,9 +947,7 @@ func (s *store) GraphOptions() []string {
func (s *store) PullOptions() map[string]string {
cp := make(map[string]string, len(s.pullOptions))
- for k, v := range s.pullOptions {
- cp[k] = v
- }
+ maps.Copy(cp, s.pullOptions)
return cp
}
@@ -1464,7 +1470,7 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare
if lOptions != nil {
options = *lOptions
options.BigData = copyLayerBigDataOptionSlice(lOptions.BigData)
- options.Flags = copyStringInterfaceMap(lOptions.Flags)
+ options.Flags = maps.Clone(lOptions.Flags)
}
if options.HostUIDMapping {
options.UIDMap = nil
@@ -1605,7 +1611,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i
CreationDate: i.Created,
Digest: i.Digest,
Digests: copyDigestSlice(i.Digests),
- NamesHistory: copyStringSlice(i.NamesHistory),
+ NamesHistory: slices.Clone(i.NamesHistory),
}
for _, key := range i.BigDataNames {
data, err := store.BigData(id, key)
@@ -1622,7 +1628,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i
Digest: dataDigest,
})
}
- namesToAddAfterCreating = dedupeStrings(append(append([]string{}, i.Names...), names...))
+ namesToAddAfterCreating = dedupeStrings(slices.Concat(i.Names, names))
break
}
}
@@ -1636,18 +1642,16 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i
if iOptions.Digest != "" {
options.Digest = iOptions.Digest
}
- options.Digests = append(options.Digests, copyDigestSlice(iOptions.Digests)...)
+ options.Digests = append(options.Digests, iOptions.Digests...)
if iOptions.Metadata != "" {
options.Metadata = iOptions.Metadata
}
options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...)
- options.NamesHistory = append(options.NamesHistory, copyStringSlice(iOptions.NamesHistory)...)
+ options.NamesHistory = append(options.NamesHistory, iOptions.NamesHistory...)
if options.Flags == nil {
options.Flags = make(map[string]interface{})
}
- for k, v := range iOptions.Flags {
- options.Flags[k] = v
- }
+ maps.Copy(options.Flags, iOptions.Flags)
}
if options.CreationDate.IsZero() {
@@ -1782,7 +1786,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
options.IDMappingOptions.UIDMap = copyIDMap(cOptions.IDMappingOptions.UIDMap)
options.IDMappingOptions.GIDMap = copyIDMap(cOptions.IDMappingOptions.GIDMap)
options.LabelOpts = copyStringSlice(cOptions.LabelOpts)
- options.Flags = copyStringInterfaceMap(cOptions.Flags)
+ options.Flags = maps.Clone(cOptions.Flags)
options.MountOpts = copyStringSlice(cOptions.MountOpts)
options.StorageOpt = copyStringStringMap(cOptions.StorageOpt)
options.BigData = copyContainerBigDataOptionSlice(cOptions.BigData)
@@ -3105,13 +3109,19 @@ func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) e
return err
}
+func (s *store) PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
+ rlstore, err := s.getLayerStore()
+ if err != nil {
+ return nil, err
+ }
+ return rlstore.applyDiffWithDifferNoLock(options, differ)
+}
+
func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
- return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) {
- if to != "" && !rlstore.Exists(to) {
- return nil, ErrLayerUnknown
- }
- return rlstore.ApplyDiffWithDiffer(to, options, differ)
- })
+ if to != "" {
+ return nil, fmt.Errorf("ApplyDiffWithDiffer does not support non-empty 'layer' parameter")
+ }
+ return s.PrepareStagedLayer(options, differ)
}
func (s *store) DifferTarget(id string) (string, error) {
@@ -3683,22 +3693,6 @@ func copyStringSlice(slice []string) []string {
return ret
}
-func copyStringInt64Map(m map[string]int64) map[string]int64 {
- ret := make(map[string]int64, len(m))
- for k, v := range m {
- ret[k] = v
- }
- return ret
-}
-
-func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest {
- ret := make(map[string]digest.Digest, len(m))
- for k, v := range m {
- ret[k] = v
- }
- return ret
-}
-
func copyStringStringMap(m map[string]string) map[string]string {
ret := make(map[string]string, len(m))
for k, v := range m {
@@ -3736,7 +3730,7 @@ func copyImageBigDataOptionSlice(slice []ImageBigDataOption) []ImageBigDataOptio
ret := make([]ImageBigDataOption, len(slice))
for i := range slice {
ret[i].Key = slice[i].Key
- ret[i].Data = append([]byte{}, slice[i].Data...)
+ ret[i].Data = slices.Clone(slice[i].Data)
ret[i].Digest = slice[i].Digest
}
return ret
@@ -3746,7 +3740,7 @@ func copyContainerBigDataOptionSlice(slice []ContainerBigDataOption) []Container
ret := make([]ContainerBigDataOption, len(slice))
for i := range slice {
ret[i].Key = slice[i].Key
- ret[i].Data = append([]byte{}, slice[i].Data...)
+ ret[i].Data = slices.Clone(slice[i].Data)
}
return ret
}
@@ -3800,10 +3794,8 @@ func GetMountOptions(driver string, graphDriverOptions []string) ([]string, erro
return nil, err
}
key = strings.ToLower(key)
- for _, m := range mountOpts {
- if m == key {
- return strings.Split(val, ","), nil
- }
+ if slices.Contains(mountOpts, key) {
+ return strings.Split(val, ","), nil
}
}
return nil, nil
@@ -3811,11 +3803,8 @@ func GetMountOptions(driver string, graphDriverOptions []string) ([]string, erro
// Free removes the store from the list of stores
func (s *store) Free() {
- for i := 0; i < len(stores); i++ {
- if stores[i] == s {
- stores = append(stores[:i], stores[i+1:]...)
- return
- }
+ if i := slices.Index(stores, s); i != -1 {
+ stores = slices.Delete(stores, i, i+1)
}
}
diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go
index f1a900b8d..efc08c476 100644
--- a/vendor/github.com/containers/storage/types/options.go
+++ b/vendor/github.com/containers/storage/types/options.go
@@ -344,8 +344,8 @@ func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) {
dirEntries, err := os.ReadDir(opts.GraphRoot)
if err == nil {
for _, entry := range dirEntries {
- if strings.HasSuffix(entry.Name(), "-images") {
- opts.GraphDriverName = strings.TrimSuffix(entry.Name(), "-images")
+ if name, ok := strings.CutSuffix(entry.Name(), "-images"); ok {
+ opts.GraphDriverName = name
break
}
}
diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go
index 57120731b..1b494ef12 100644
--- a/vendor/github.com/containers/storage/userns.go
+++ b/vendor/github.com/containers/storage/userns.go
@@ -89,7 +89,7 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
passwdFile = filepath.Join(containerMount, "etc/passwd")
}
if groupFile == "" {
- groupFile = filepath.Join(groupFile, "etc/group")
+ groupFile = filepath.Join(containerMount, "etc/group")
}
size := 0
@@ -99,14 +99,14 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
for _, u := range users {
// Skip the "nobody" user otherwise we end up with 65536
// ids with most images
- if u.Name == "nobody" {
+ if u.Name == "nobody" || u.Name == "nogroup" {
continue
}
if u.Uid > size && u.Uid != nobodyUser {
- size = u.Uid
+ size = u.Uid + 1
}
if u.Gid > size && u.Gid != nobodyUser {
- size = u.Gid
+ size = u.Gid + 1
}
}
}
@@ -114,11 +114,11 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
groups, err := libcontainerUser.ParseGroupFile(groupFile)
if err == nil {
for _, g := range groups {
- if g.Name == "nobody" {
+ if g.Name == "nobody" || g.Name == "nogroup" {
continue
}
if g.Gid > size && g.Gid != nobodyUser {
- size = g.Gid
+ size = g.Gid + 1
}
}
}
diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go
index 5bade6ffe..c61d79837 100644
--- a/vendor/github.com/containers/storage/utils.go
+++ b/vendor/github.com/containers/storage/utils.go
@@ -2,6 +2,7 @@ package storage
import (
"fmt"
+ "slices"
"github.com/containers/storage/types"
)
@@ -41,22 +42,12 @@ func applyNameOperation(oldNames []string, opParameters []string, op updateNameO
// remove given names from old names
result = make([]string, 0, len(oldNames))
for _, name := range oldNames {
- // only keep names in final result which do not intersect with input names
- // basically `result = oldNames - opParameters`
- nameShouldBeRemoved := false
- for _, opName := range opParameters {
- if name == opName {
- nameShouldBeRemoved = true
- }
- }
- if !nameShouldBeRemoved {
+ if !slices.Contains(opParameters, name) {
result = append(result, name)
}
}
case addNames:
- result = make([]string, 0, len(opParameters)+len(oldNames))
- result = append(result, opParameters...)
- result = append(result, oldNames...)
+ result = slices.Concat(opParameters, oldNames)
default:
return result, errInvalidUpdateNameOperation
}
diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
index 9a70c1432..6a846ece9 100644
--- a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
+++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
@@ -64,16 +64,28 @@ func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time)
if now == nil {
now = time.Now
}
- return &RemoteKeySet{jwksURL: jwksURL, ctx: ctx, now: now}
+ return &RemoteKeySet{
+ jwksURL: jwksURL,
+ now: now,
+ // For historical reasons, this package uses contexts for configuration, not just
+ // cancellation. In hindsight, this was a bad idea.
+ //
+ // Attemps to reason about how cancels should work with background requests have
+ // largely lead to confusion. Use the context here as a config bag-of-values and
+ // ignore the cancel function.
+ ctx: context.WithoutCancel(ctx),
+ }
}
// RemoteKeySet is a KeySet implementation that validates JSON web tokens against
// a jwks_uri endpoint.
type RemoteKeySet struct {
jwksURL string
- ctx context.Context
now func() time.Time
+ // Used for configuration. Cancelation is ignored.
+ ctx context.Context
+
// guard all other fields
mu sync.RWMutex
diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
index 0ac58d299..52b27b746 100644
--- a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
+++ b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
@@ -120,8 +120,8 @@ type Config struct {
}
// VerifierContext returns an IDTokenVerifier that uses the provider's key set to
-// verify JWTs. As opposed to Verifier, the context is used for all requests to
-// the upstream JWKs endpoint.
+// verify JWTs. As opposed to Verifier, the context is used to configure requests
+// to the upstream JWKs endpoint. The provided context's cancellation is ignored.
func (p *Provider) VerifierContext(ctx context.Context, config *Config) *IDTokenVerifier {
return p.newVerifier(NewRemoteKeySet(ctx, p.jwksURL), config)
}
diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
index 28bdd2fc0..6f717dbd8 100644
--- a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
+++ b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
@@ -1,3 +1,27 @@
+# v4.0.4
+
+## Fixed
+
+ - Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a
+ breaking change. See #136 / #137.
+
+# v4.0.3
+
+## Changed
+
+ - Allow unmarshalling JSONWebKeySets with unsupported key types (#130)
+ - Document that OpaqueKeyEncrypter can't be implemented (for now) (#129)
+ - Dependency updates
+
+# v4.0.2
+
+## Changed
+
+ - Improved documentation of Verify() to note that JSONWebKeySet is a supported
+ argument type (#104)
+ - Defined exported error values for missing x5c header and unsupported elliptic
+ curves error cases (#117)
+
# v4.0.1
## Fixed
diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go
index aba08424c..d81b03b44 100644
--- a/vendor/github.com/go-jose/go-jose/v4/crypter.go
+++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go
@@ -459,7 +459,10 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error)
return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
}
- key := tryJWKS(decryptionKey, obj.Header)
+ key, err := tryJWKS(decryptionKey, obj.Header)
+ if err != nil {
+ return nil, err
+ }
decrypter, err := newDecrypter(key)
if err != nil {
return nil, err
@@ -529,7 +532,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
}
- key := tryJWKS(decryptionKey, obj.Header)
+ key, err := tryJWKS(decryptionKey, obj.Header)
+ if err != nil {
+ return -1, Header{}, nil, err
+ }
decrypter, err := newDecrypter(key)
if err != nil {
return -1, Header{}, nil, err
diff --git a/vendor/github.com/go-jose/go-jose/v4/jwk.go b/vendor/github.com/go-jose/go-jose/v4/jwk.go
index a565aaab2..8a5284210 100644
--- a/vendor/github.com/go-jose/go-jose/v4/jwk.go
+++ b/vendor/github.com/go-jose/go-jose/v4/jwk.go
@@ -779,7 +779,13 @@ func (key rawJSONWebKey) symmetricKey() ([]byte, error) {
return key.K.bytes(), nil
}
-func tryJWKS(key interface{}, headers ...Header) interface{} {
+var (
+ // ErrJWKSKidNotFound is returned when a JWKS does not contain a JWK with a
+ // key ID which matches one in the provided tokens headers.
+ ErrJWKSKidNotFound = errors.New("go-jose/go-jose: JWK with matching kid not found in JWK Set")
+)
+
+func tryJWKS(key interface{}, headers ...Header) (interface{}, error) {
var jwks JSONWebKeySet
switch jwksType := key.(type) {
@@ -788,9 +794,11 @@ func tryJWKS(key interface{}, headers ...Header) interface{} {
case JSONWebKeySet:
jwks = jwksType
default:
- return key
+ // If the specified key is not a JWKS, return as is.
+ return key, nil
}
+ // Determine the KID to search for from the headers.
var kid string
for _, header := range headers {
if header.KeyID != "" {
@@ -799,14 +807,17 @@ func tryJWKS(key interface{}, headers ...Header) interface{} {
}
}
+ // If no KID is specified in the headers, reject.
if kid == "" {
- return key
+ return nil, ErrJWKSKidNotFound
}
+ // Find the JWK with the matching KID. If no JWK with the specified KID is
+ // found, reject.
keys := jwks.Key(kid)
if len(keys) == 0 {
- return key
+ return nil, ErrJWKSKidNotFound
}
- return keys[0].Key
+ return keys[0].Key, nil
}
diff --git a/vendor/github.com/go-jose/go-jose/v4/opaque.go b/vendor/github.com/go-jose/go-jose/v4/opaque.go
index 68db085ef..429427232 100644
--- a/vendor/github.com/go-jose/go-jose/v4/opaque.go
+++ b/vendor/github.com/go-jose/go-jose/v4/opaque.go
@@ -83,6 +83,9 @@ func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg Sig
}
// OpaqueKeyEncrypter is an interface that supports encrypting keys with an opaque key.
+//
+// Note: this cannot currently be implemented outside this package because of its
+// unexported method.
type OpaqueKeyEncrypter interface {
// KeyID returns the kid
KeyID() string
diff --git a/vendor/github.com/go-jose/go-jose/v4/signing.go b/vendor/github.com/go-jose/go-jose/v4/signing.go
index 46c9a4d96..3dec0112b 100644
--- a/vendor/github.com/go-jose/go-jose/v4/signing.go
+++ b/vendor/github.com/go-jose/go-jose/v4/signing.go
@@ -390,7 +390,10 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte {
// The verificationKey argument must have one of the types allowed for the
// verificationKey argument of JSONWebSignature.Verify().
func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error {
- key := tryJWKS(verificationKey, obj.headers()...)
+ key, err := tryJWKS(verificationKey, obj.headers()...)
+ if err != nil {
+ return err
+ }
verifier, err := newVerifier(key)
if err != nil {
return err
@@ -455,7 +458,10 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa
// The verificationKey argument must have one of the types allowed for the
// verificationKey argument of JSONWebSignature.Verify().
func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) {
- key := tryJWKS(verificationKey, obj.headers()...)
+ key, err := tryJWKS(verificationKey, obj.headers()...)
+ if err != nil {
+ return -1, Signature{}, err
+ }
verifier, err := newVerifier(key)
if err != nil {
return -1, Signature{}, err
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 05c7359e4..684a30853 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -16,6 +16,20 @@ This package provides various compression algorithms.
# changelog
+* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
+ * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
+ * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
+ * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971
+ * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951
+
+* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8)
+ * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885
+ * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938
+
+* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7)
+ * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927
+ * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930
+
* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
@@ -81,7 +95,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
- * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
+ * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
@@ -136,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
- * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
+ * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
@@ -339,7 +353,7 @@ While the release has been extensively tested, it is recommended to testing when
* s2: Fix binaries.
* Feb 25, 2021 (v1.11.8)
- * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
+ * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended.
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
@@ -518,7 +532,7 @@ While the release has been extensively tested, it is recommended to testing when
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
* Feb 19, 2016: Handle small payloads faster in level 1-3.
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
-* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
+* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5.
* Feb 14, 2016: Snappy: Merge upstream changes.
* Feb 14, 2016: Snappy: Fix aggressive skipping.
* Feb 14, 2016: Snappy: Update benchmark.
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
index 66d1657d2..af53fb860 100644
--- a/vendor/github.com/klauspost/compress/flate/deflate.go
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -861,7 +861,7 @@ func (d *compressor) reset(w io.Writer) {
}
switch d.compressionLevel.chain {
case 0:
- // level was NoCompression or ConstantCompresssion.
+ // level was NoCompression or ConstantCompression.
d.windowEnd = 0
default:
s := d.state
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
index 2f410d64f..0d7b437f1 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -298,6 +298,14 @@ const (
huffmanGenericReader
)
+// flushMode tells decompressor when to return data
+type flushMode uint8
+
+const (
+ syncFlush flushMode = iota // return data after sync flush block
+ partialFlush // return data after each block
+)
+
// Decompress state.
type decompressor struct {
// Input source.
@@ -332,6 +340,8 @@ type decompressor struct {
nb uint
final bool
+
+ flushMode flushMode
}
func (f *decompressor) nextBlock() {
@@ -618,7 +628,10 @@ func (f *decompressor) dataBlock() {
}
if n == 0 {
- f.toRead = f.dict.readFlush()
+ if f.flushMode == syncFlush {
+ f.toRead = f.dict.readFlush()
+ }
+
f.finishBlock()
return
}
@@ -657,8 +670,12 @@ func (f *decompressor) finishBlock() {
if f.dict.availRead() > 0 {
f.toRead = f.dict.readFlush()
}
+
f.err = io.EOF
+ } else if f.flushMode == partialFlush && f.dict.availRead() > 0 {
+ f.toRead = f.dict.readFlush()
}
+
f.step = nextBlock
}
@@ -789,15 +806,25 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error {
return nil
}
-// NewReader returns a new ReadCloser that can be used
-// to read the uncompressed version of r.
-// If r does not also implement io.ByteReader,
-// the decompressor may read more data than necessary from r.
-// It is the caller's responsibility to call Close on the ReadCloser
-// when finished reading.
-//
-// The ReadCloser returned by NewReader also implements Resetter.
-func NewReader(r io.Reader) io.ReadCloser {
+type ReaderOpt func(*decompressor)
+
+// WithPartialBlock tells decompressor to return after each block,
+// so it can read data written with partial flush
+func WithPartialBlock() ReaderOpt {
+ return func(f *decompressor) {
+ f.flushMode = partialFlush
+ }
+}
+
+// WithDict initializes the reader with a preset dictionary
+func WithDict(dict []byte) ReaderOpt {
+ return func(f *decompressor) {
+ f.dict.init(maxMatchOffset, dict)
+ }
+}
+
+// NewReaderOpts returns new reader with provided options
+func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser {
fixedHuffmanDecoderInit()
var f decompressor
@@ -806,9 +833,26 @@ func NewReader(r io.Reader) io.ReadCloser {
f.codebits = new([numCodes]int)
f.step = nextBlock
f.dict.init(maxMatchOffset, nil)
+
+ for _, opt := range opts {
+ opt(&f)
+ }
+
return &f
}
+// NewReader returns a new ReadCloser that can be used
+// to read the uncompressed version of r.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+// It is the caller's responsibility to call Close on the ReadCloser
+// when finished reading.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReader(r io.Reader) io.ReadCloser {
+ return NewReaderOpts(r)
+}
+
// NewReaderDict is like NewReader but initializes the reader
// with a preset dictionary. The returned Reader behaves as if
// the uncompressed data stream started with the given dictionary,
@@ -817,13 +861,5 @@ func NewReader(r io.Reader) io.ReadCloser {
//
// The ReadCloser returned by NewReader also implements Resetter.
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
- fixedHuffmanDecoderInit()
-
- var f decompressor
- f.r = makeReader(r)
- f.bits = new([maxNumLit + maxNumDist]int)
- f.codebits = new([numCodes]int)
- f.step = nextBlock
- f.dict.init(maxMatchOffset, dict)
- return &f
+ return NewReaderOpts(r, WithDict(dict))
}
diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go
index cc05d0f7e..0c7dd4ffe 100644
--- a/vendor/github.com/klauspost/compress/fse/decompress.go
+++ b/vendor/github.com/klauspost/compress/fse/decompress.go
@@ -15,7 +15,7 @@ const (
// It is possible, but by no way guaranteed that corrupt data will
// return an error.
// It is up to the caller to verify integrity of the returned data.
-// Use a predefined Scrach to set maximum acceptable output size.
+// Use a predefined Scratch to set maximum acceptable output size.
func Decompress(b []byte, s *Scratch) ([]byte, error) {
s, err := s.prepare(b)
if err != nil {
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 54bd08b25..0f56b02d7 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
errs++
}
if errs > 0 {
- fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
+ fmt.Fprintf(w, "%d errors in base, stopping\n", errs)
continue
}
// Ensure that all combinations are covered.
@@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
errs++
}
if errs > 20 {
- fmt.Fprintf(w, "%d errros, stopping\n", errs)
+ fmt.Fprintf(w, "%d errors, stopping\n", errs)
break
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 03744fbc7..9c28840c3 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -598,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
printf("RLE set to 0x%x, code: %v", symb, v)
}
case compModeFSE:
- println("Reading table for", tableIndex(i))
+ if debugDecoder {
+ println("Reading table for", tableIndex(i))
+ }
if seq.fse == nil || seq.fse.preDefined {
seq.fse = fseDecoderPool.Get().(*fseDecoder)
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index a4f5bf91f..84a79fde7 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -179,9 +179,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -210,12 +210,12 @@ encodeLoop:
// Index match start+1 (long) -> s - 1
index0 := s + repOff
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -241,9 +241,9 @@ encodeLoop:
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well.
var seq seq
- lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+ length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -270,11 +270,11 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff2
+ s += length + repOff2
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -708,9 +708,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -738,12 +738,12 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -772,9 +772,9 @@ encodeLoop:
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well.
var seq seq
- lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+ length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -801,11 +801,11 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff2
+ s += length + repOff2
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index a154c18f7..d36be7bd8 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -138,9 +138,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -166,11 +166,11 @@ encodeLoop:
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -798,9 +798,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -826,11 +826,11 @@ encodeLoop:
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index 72af7ef0f..a79c4a527 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -202,7 +202,7 @@ func (e *Encoder) nextBlock(final bool) error {
return nil
}
if final && len(s.filling) > 0 {
- s.current = e.EncodeAll(s.filling, s.current[:0])
+ s.current = e.encodeAll(s.encoder, s.filling, s.current[:0])
var n2 int
n2, s.err = s.w.Write(s.current)
if s.err != nil {
@@ -469,6 +469,15 @@ func (e *Encoder) Close() error {
// Data compressed with EncodeAll can be decoded with the Decoder,
// using either a stream or DecodeAll.
func (e *Encoder) EncodeAll(src, dst []byte) []byte {
+ e.init.Do(e.initialize)
+ enc := <-e.encoders
+ defer func() {
+ e.encoders <- enc
+ }()
+ return e.encodeAll(enc, src, dst)
+}
+
+func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte {
if len(src) == 0 {
if e.o.fullZero {
// Add frame header.
@@ -491,13 +500,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
return dst
}
- e.init.Do(e.initialize)
- enc := <-e.encoders
- defer func() {
- // Release encoder reference to last block.
- // If a non-single block is needed the encoder will reset again.
- e.encoders <- enc
- }()
+
// Use single segments when above minimum window and below window size.
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
if e.o.single != nil {
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 53e160f7e..e47af66e7 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error {
}
return err
}
- printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+ if debugDecoder {
+ printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+ }
windowLog := 10 + (wd >> 3)
windowBase := uint64(1) << windowLog
windowAdd := (windowBase / 8) * uint64(wd&0x7)
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
index 8adabd828..c59f17e07 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
default:
- return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
+ return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode)
}
s.seqSize += ctx.litRemain
@@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
return io.ErrUnexpectedEOF
}
- return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
+ return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode)
}
if ctx.litRemain < 0 {
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index 5b06174b8..f5591fa1e 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
MOVQ 40(SP), AX
ADDQ AX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R10, 32(SP)
// outBase += outPosition
@@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
MOVQ 40(SP), CX
ADDQ CX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R9, 32(SP)
// outBase += outPosition
@@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
MOVQ 40(SP), AX
ADDQ AX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R10, 32(SP)
// outBase += outPosition
@@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
MOVQ 40(SP), CX
ADDQ CX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R9, 32(SP)
// outBase += outPosition
diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go
index 64df4a8d8..c01f551ab 100644
--- a/vendor/github.com/letsencrypt/boulder/core/objects.go
+++ b/vendor/github.com/letsencrypt/boulder/core/objects.go
@@ -157,58 +157,44 @@ type ValidationRecord struct {
UsedRSAKEX bool `json:"-"`
}
-func looksLikeKeyAuthorization(str string) error {
- parts := strings.Split(str, ".")
- if len(parts) != 2 {
- return fmt.Errorf("Invalid key authorization: does not look like a key authorization")
- } else if !LooksLikeAToken(parts[0]) {
- return fmt.Errorf("Invalid key authorization: malformed token")
- } else if !LooksLikeAToken(parts[1]) {
- // Thumbprints have the same syntax as tokens in boulder
- // Both are base64-encoded and 32 octets
- return fmt.Errorf("Invalid key authorization: malformed key thumbprint")
- }
- return nil
-}
-
// Challenge is an aggregate of all data needed for any challenges.
//
// Rather than define individual types for different types of
// challenge, we just throw all the elements into one bucket,
// together with the common metadata elements.
type Challenge struct {
- // The type of challenge
+ // Type is the type of challenge encoded in this object.
Type AcmeChallenge `json:"type"`
- // The status of this challenge
- Status AcmeStatus `json:"status,omitempty"`
+ // URL is the URL to which a response can be posted. Required for all types.
+ URL string `json:"url,omitempty"`
- // Contains the error that occurred during challenge validation, if any
- Error *probs.ProblemDetails `json:"error,omitempty"`
+ // Status is the status of this challenge. Required for all types.
+ Status AcmeStatus `json:"status,omitempty"`
- // A URI to which a response can be POSTed
- URI string `json:"uri,omitempty"`
+ // Validated is the time at which the server validated the challenge. Required
+ // if status is valid.
+ Validated *time.Time `json:"validated,omitempty"`
- // For the V2 API the "URI" field is deprecated in favour of URL.
- URL string `json:"url,omitempty"`
+ // Error contains the error that occurred during challenge validation, if any.
+ // If set, the Status must be "invalid".
+ Error *probs.ProblemDetails `json:"error,omitempty"`
- // Used by http-01, tls-sni-01, tls-alpn-01 and dns-01 challenges
+ // Token is a random value that uniquely identifies the challenge. It is used
+ // by all current challenges (http-01, tls-alpn-01, and dns-01).
Token string `json:"token,omitempty"`
- // The expected KeyAuthorization for validation of the challenge. Populated by
- // the RA prior to passing the challenge to the VA. For legacy reasons this
- // field is called "ProvidedKeyAuthorization" because it was initially set by
- // the content of the challenge update POST from the client. It is no longer
- // set that way and should be renamed to "KeyAuthorization".
- // TODO(@cpu): Rename `ProvidedKeyAuthorization` to `KeyAuthorization`.
+ // ProvidedKeyAuthorization used to carry the expected key authorization from
+ // the RA to the VA. However, since this field is never presented to the user
+ // via the ACME API, it should not be on this type.
+ //
+ // Deprecated: use vapb.PerformValidationRequest.ExpectedKeyAuthorization instead.
+ // TODO(#7514): Remove this.
ProvidedKeyAuthorization string `json:"keyAuthorization,omitempty"`
// Contains information about URLs used or redirected to and IPs resolved and
// used
ValidationRecord []ValidationRecord `json:"validationRecord,omitempty"`
- // The time at which the server validated the challenge. Required by
- // RFC8555 if status is valid.
- Validated *time.Time `json:"validated,omitempty"`
}
// ExpectedKeyAuthorization computes the expected KeyAuthorization value for
@@ -273,43 +259,18 @@ func (ch Challenge) RecordsSane() bool {
return true
}
-// CheckConsistencyForClientOffer checks the fields of a challenge object before it is
-// given to the client.
-func (ch Challenge) CheckConsistencyForClientOffer() error {
- err := ch.checkConsistency()
- if err != nil {
- return err
- }
-
- // Before completion, the key authorization field should be empty
- if ch.ProvidedKeyAuthorization != "" {
- return fmt.Errorf("A response to this challenge was already submitted.")
- }
- return nil
-}
-
-// CheckConsistencyForValidation checks the fields of a challenge object before it is
-// given to the VA.
-func (ch Challenge) CheckConsistencyForValidation() error {
- err := ch.checkConsistency()
- if err != nil {
- return err
- }
-
- // If the challenge is completed, then there should be a key authorization
- return looksLikeKeyAuthorization(ch.ProvidedKeyAuthorization)
-}
-
-// checkConsistency checks the sanity of a challenge object before issued to the client.
-func (ch Challenge) checkConsistency() error {
+// CheckPending ensures that a challenge object is pending and has a token.
+// This is used before offering the challenge to the client, and before actually
+// validating a challenge.
+func (ch Challenge) CheckPending() error {
if ch.Status != StatusPending {
- return fmt.Errorf("The challenge is not pending.")
+ return fmt.Errorf("challenge is not pending")
}
- // There always needs to be a token
- if !LooksLikeAToken(ch.Token) {
- return fmt.Errorf("The token is missing.")
+ if !looksLikeAToken(ch.Token) {
+ return fmt.Errorf("token is missing or malformed")
}
+
return nil
}
diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go
index 31f6d2fcf..641521f16 100644
--- a/vendor/github.com/letsencrypt/boulder/core/util.go
+++ b/vendor/github.com/letsencrypt/boulder/core/util.go
@@ -76,9 +76,9 @@ func NewToken() string {
var tokenFormat = regexp.MustCompile(`^[\w-]{43}$`)
-// LooksLikeAToken checks whether a string represents a 32-octet value in
+// looksLikeAToken checks whether a string represents a 32-octet value in
// the URL-safe base64 alphabet.
-func LooksLikeAToken(token string) bool {
+func looksLikeAToken(token string) bool {
return tokenFormat.MatchString(token)
}
diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go
index 087a01812..04a075d35 100644
--- a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go
+++ b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go
@@ -39,6 +39,9 @@ var (
)
type Config struct {
+ // AllowedKeys enables or disables specific key algorithms and sizes. If
+ // nil, defaults to just those keys allowed by the Let's Encrypt CPS.
+ AllowedKeys *AllowedKeys
// WeakKeyFile is the path to a JSON file containing truncated modulus hashes
// of known weak RSA keys. If this config value is empty, then RSA modulus
// hash checking will be disabled.
@@ -54,6 +57,40 @@ type Config struct {
FermatRounds int
}
+// AllowedKeys is a map of six specific key algorithm and size combinations to
+// booleans indicating whether keys of that type are considered good.
+type AllowedKeys struct {
+ // Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple
+ // of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes
+ // Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which
+ // have a known method to easily compute their private key, such as Debian Weak
+ // Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at
+ // common key sizes, so we restrict all issuance to those common key sizes.
+ RSA2048 bool
+ RSA3072 bool
+ RSA4096 bool
+ // Baseline Requirements, Section 6.1.5 requires that ECDSA keys be valid
+ // points on the NIST P-256, P-384, or P-521 elliptic curves.
+ ECDSAP256 bool
+ ECDSAP384 bool
+ ECDSAP521 bool
+}
+
+// LetsEncryptCPS encodes the five key algorithms and sizes allowed by the Let's
+// Encrypt CPS CV-SSL Subscriber Certificate Profile: RSA 2048, RSA 3076, RSA
+// 4096, ECDSA 256 and ECDSA P384.
+// https://github.com/letsencrypt/cp-cps/blob/main/CP-CPS.md#dv-ssl-subscriber-certificate
+// If this is ever changed, the CP/CPS MUST be changed first.
+func LetsEncryptCPS() AllowedKeys {
+ return AllowedKeys{
+ RSA2048: true,
+ RSA3072: true,
+ RSA4096: true,
+ ECDSAP256: true,
+ ECDSAP384: true,
+ }
+}
+
// ErrBadKey represents an error with a key. It is distinct from the various
// ways in which an ACME request can have an erroneous key (BadPublicKeyError,
// BadCSRError) because this library is used to check both JWS signing keys and
@@ -74,28 +111,29 @@ type BlockedKeyCheckFunc func(ctx context.Context, keyHash []byte) (bool, error)
// KeyPolicy determines which types of key may be used with various boulder
// operations.
type KeyPolicy struct {
- AllowRSA bool // Whether RSA keys should be allowed.
- AllowECDSANISTP256 bool // Whether ECDSA NISTP256 keys should be allowed.
- AllowECDSANISTP384 bool // Whether ECDSA NISTP384 keys should be allowed.
- weakRSAList *WeakRSAKeys
- blockedList *blockedKeys
- fermatRounds int
- blockedCheck BlockedKeyCheckFunc
+ allowedKeys AllowedKeys
+ weakRSAList *WeakRSAKeys
+ blockedList *blockedKeys
+ fermatRounds int
+ blockedCheck BlockedKeyCheckFunc
}
-// NewKeyPolicy returns a KeyPolicy that allows RSA, ECDSA256 and ECDSA384.
-// weakKeyFile contains the path to a JSON file containing truncated modulus
-// hashes of known weak RSA keys. If this argument is empty RSA modulus hash
-// checking will be disabled. blockedKeyFile contains the path to a YAML file
-// containing Base64 encoded SHA256 hashes of pkix subject public keys that
-// should be blocked. If this argument is empty then no blocked key checking is
-// performed.
-func NewKeyPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) {
+// NewPolicy returns a key policy based on the given configuration, with sane
+// defaults. If the config's AllowedKeys is nil, the LetsEncryptCPS AllowedKeys
+// is used. If the config's WeakKeyFile or BlockedKeyFile paths are empty, those
+// checks are disabled. If the config's FermatRounds is 0, Fermat Factorization
+// is disabled.
+func NewPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) {
+ if config == nil {
+ config = &Config{}
+ }
kp := KeyPolicy{
- AllowRSA: true,
- AllowECDSANISTP256: true,
- AllowECDSANISTP384: true,
- blockedCheck: bkc,
+ blockedCheck: bkc,
+ }
+ if config.AllowedKeys == nil {
+ kp.allowedKeys = LetsEncryptCPS()
+ } else {
+ kp.allowedKeys = *config.AllowedKeys
}
if config.WeakKeyFile != "" {
keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile)
@@ -264,44 +302,30 @@ func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) {
// Simply use a whitelist for now.
params := c.Params()
switch {
- case policy.AllowECDSANISTP256 && params == elliptic.P256().Params():
+ case policy.allowedKeys.ECDSAP256 && params == elliptic.P256().Params():
+ return nil
+ case policy.allowedKeys.ECDSAP384 && params == elliptic.P384().Params():
return nil
- case policy.AllowECDSANISTP384 && params == elliptic.P384().Params():
+ case policy.allowedKeys.ECDSAP521 && params == elliptic.P521().Params():
return nil
default:
return badKey("ECDSA curve %v not allowed", params.Name)
}
}
-// Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple
-// of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes
-// Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which
-// have a known method to easily compute their private key, such as Debian Weak
-// Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at
-// common key sizes, so we restrict all issuance to those common key sizes.
-var acceptableRSAKeySizes = map[int]bool{
- 2048: true,
- 3072: true,
- 4096: true,
-}
-
// GoodKeyRSA determines if a RSA pubkey meets our requirements
-func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) {
- if !policy.AllowRSA {
- return badKey("RSA keys are not allowed")
+func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) error {
+ modulus := key.N
+
+ err := policy.goodRSABitLen(key)
+ if err != nil {
+ return err
}
+
if policy.weakRSAList != nil && policy.weakRSAList.Known(key) {
return badKey("key is on a known weak RSA key list")
}
- modulus := key.N
-
- // See comment on acceptableRSAKeySizes above.
- modulusBitLen := modulus.BitLen()
- if !acceptableRSAKeySizes[modulusBitLen] {
- return badKey("key size not supported: %d", modulusBitLen)
- }
-
// Rather than support arbitrary exponents, which significantly increases
// the size of the key space we allow, we restrict E to the defacto standard
// RSA exponent 65537. There is no specific standards document that specifies
@@ -341,6 +365,21 @@ func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) {
return nil
}
+func (policy *KeyPolicy) goodRSABitLen(key *rsa.PublicKey) error {
+ // See comment on AllowedKeys above.
+ modulusBitLen := key.N.BitLen()
+ switch {
+ case modulusBitLen == 2048 && policy.allowedKeys.RSA2048:
+ return nil
+ case modulusBitLen == 3072 && policy.allowedKeys.RSA3072:
+ return nil
+ case modulusBitLen == 4096 && policy.allowedKeys.RSA4096:
+ return nil
+ default:
+ return badKey("key size not supported: %d", modulusBitLen)
+ }
+}
+
// Returns true iff integer i is divisible by any of the primes in smallPrimes.
//
// Short circuits; execution time is dependent on i. Do not use this on secret
@@ -400,7 +439,7 @@ func checkPrimeFactorsTooClose(n *big.Int, rounds int) error {
b2 := new(big.Int)
b2.Mul(a, a).Sub(b2, n)
- for i := 0; i < rounds; i++ {
+ for range rounds {
// To see if b2 is a perfect square, we take its square root, square that,
// and check to see if we got the same result back.
bb.Sqrt(b2).Mul(bb, bb)
diff --git a/vendor/github.com/moby/sys/capability/.codespellrc b/vendor/github.com/moby/sys/capability/.codespellrc
new file mode 100644
index 000000000..e874be563
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/.codespellrc
@@ -0,0 +1,3 @@
+[codespell]
+skip = ./.git
+ignore-words-list = nd
diff --git a/vendor/github.com/moby/sys/capability/.golangci.yml b/vendor/github.com/moby/sys/capability/.golangci.yml
new file mode 100644
index 000000000..d775aadd6
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/.golangci.yml
@@ -0,0 +1,6 @@
+linters:
+ enable:
+ - unconvert
+ - unparam
+ - gofumpt
+ - errorlint
diff --git a/vendor/github.com/moby/sys/capability/CHANGELOG.md b/vendor/github.com/moby/sys/capability/CHANGELOG.md
new file mode 100644
index 000000000..037ef010a
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/CHANGELOG.md
@@ -0,0 +1,90 @@
+# Changelog
+This file documents all notable changes made to this project since the initial fork
+from https://github.com/syndtr/gocapability/commit/42c35b4376354fd5.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [0.3.0] - 2024-09-25
+
+### Added
+* Added [ListKnown] and [ListSupported] functions. (#153)
+* [LastCap] is now available on non-Linux platforms (where it returns an error). (#152)
+
+### Changed
+* [List] is now deprecated in favor of [ListKnown] and [ListSupported]. (#153)
+
+### Fixed
+* Various documentation improvements. (#151)
+* Fix "generated code" comment. (#153)
+
+## [0.2.0] - 2024-09-16
+
+This is the first release after the move to a new home in
+github.com/moby/sys/capability.
+
+### Fixed
+ * Fixed URLs in documentation to reflect the new home.
+
+## [0.1.1] - 2024-08-01
+
+This is a maintenance release, fixing a few minor issues.
+
+### Fixed
+ * Fixed future kernel compatibility, for real this time. [#11]
+ * Fixed [LastCap] to be a function. [#12]
+
+## [0.1.0] - 2024-07-31
+
+This is an initial release since the fork.
+
+### Breaking changes
+
+ * The `CAP_LAST_CAP` variable is removed; users need to modify the code to
+ use [LastCap] to get the value. [#6]
+ * The code now requires Go >= 1.21.
+
+### Added
+ * `go.mod` and `go.sum` files. [#2]
+ * New [LastCap] function. [#6]
+ * Basic CI using GHA infra. [#8], [#9]
+ * README and CHANGELOG. [#10]
+
+### Fixed
+ * Fixed ambient capabilities error handling in [Apply]. [#3]
+ * Fixed future kernel compatibility. [#1]
+ * Fixed various linter warnings. [#4], [#7]
+
+### Changed
+ * Go build tags changed from old-style (`+build`) to new Go 1.17+ style (`go:build`). [#2]
+
+### Removed
+ * Removed support for capabilities v1 and v2. [#1]
+ * Removed init function so programs that use this package start faster. [#6]
+ * Removed `CAP_LAST_CAP` (use [LastCap] instead). [#6]
+
+<!-- Doc links. -->
+[Apply]: https://pkg.go.dev/github.com/moby/sys/capability#Capabilities.Apply
+[LastCap]: https://pkg.go.dev/github.com/moby/sys/capability#LastCap
+[List]: https://pkg.go.dev/github.com/moby/sys/capability#List
+[ListKnown]: https://pkg.go.dev/github.com/moby/sys/capability#ListKnown
+[ListSupported]: https://pkg.go.dev/github.com/moby/sys/capability#ListSupported
+
+<!-- Minor releases. -->
+[0.3.0]: https://github.com/moby/sys/releases/tag/capability%2Fv0.3.0
+[0.2.0]: https://github.com/moby/sys/releases/tag/capability%2Fv0.2.0
+[0.1.1]: https://github.com/kolyshkin/capability/compare/v0.1.0...v0.1.1
+[0.1.0]: https://github.com/kolyshkin/capability/compare/42c35b4376354fd5...v0.1.0
+
+<!-- PRs in 0.1.x releases. -->
+[#1]: https://github.com/kolyshkin/capability/pull/1
+[#2]: https://github.com/kolyshkin/capability/pull/2
+[#3]: https://github.com/kolyshkin/capability/pull/3
+[#4]: https://github.com/kolyshkin/capability/pull/4
+[#6]: https://github.com/kolyshkin/capability/pull/6
+[#7]: https://github.com/kolyshkin/capability/pull/7
+[#8]: https://github.com/kolyshkin/capability/pull/8
+[#9]: https://github.com/kolyshkin/capability/pull/9
+[#10]: https://github.com/kolyshkin/capability/pull/10
+[#11]: https://github.com/kolyshkin/capability/pull/11
+[#12]: https://github.com/kolyshkin/capability/pull/12
diff --git a/vendor/github.com/moby/sys/capability/LICENSE b/vendor/github.com/moby/sys/capability/LICENSE
new file mode 100644
index 000000000..08adcd6ec
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/LICENSE
@@ -0,0 +1,25 @@
+Copyright 2023 The Capability Authors.
+Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/moby/sys/capability/README.md b/vendor/github.com/moby/sys/capability/README.md
new file mode 100644
index 000000000..84b74871a
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/README.md
@@ -0,0 +1,13 @@
+This is a fork of (apparently no longer maintained)
+https://github.com/syndtr/gocapability package. It provides basic primitives to
+work with [Linux capabilities][capabilities(7)].
+
+For changes, see [CHANGELOG.md](./CHANGELOG.md).
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/moby/sys/capability/capability.svg)](https://pkg.go.dev/github.com/moby/sys/capability)
+
+## Alternatives
+
+ * https://pkg.go.dev/kernel.org/pub/linux/libs/security/libcap/cap
+
+[capabilities(7)]: https://man7.org/linux/man-pages/man7/capabilities.7.html
diff --git a/vendor/github.com/moby/sys/capability/capability.go b/vendor/github.com/moby/sys/capability/capability.go
new file mode 100644
index 000000000..1b36f5f22
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/capability.go
@@ -0,0 +1,144 @@
+// Copyright 2023 The Capability Authors.
+// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package capability provides utilities for manipulating POSIX capabilities.
+package capability
+
+type Capabilities interface {
+ // Get check whether a capability present in the given
+ // capabilities set. The 'which' value should be one of EFFECTIVE,
+ // PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+ Get(which CapType, what Cap) bool
+
+ // Empty check whether all capability bits of the given capabilities
+ // set are zero. The 'which' value should be one of EFFECTIVE,
+ // PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+ Empty(which CapType) bool
+
+ // Full check whether all capability bits of the given capabilities
+ // set are one. The 'which' value should be one of EFFECTIVE,
+ // PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+ Full(which CapType) bool
+
+ // Set sets capabilities of the given capabilities sets. The
+ // 'which' value should be one or combination (OR'ed) of EFFECTIVE,
+ // PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+ Set(which CapType, caps ...Cap)
+
+ // Unset unsets capabilities of the given capabilities sets. The
+ // 'which' value should be one or combination (OR'ed) of EFFECTIVE,
+ // PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+ Unset(which CapType, caps ...Cap)
+
+ // Fill sets all bits of the given capabilities kind to one. The
+ // 'kind' value should be one or combination (OR'ed) of CAPS,
+ // BOUNDS or AMBS.
+ Fill(kind CapType)
+
+ // Clear sets all bits of the given capabilities kind to zero. The
+ // 'kind' value should be one or combination (OR'ed) of CAPS,
+ // BOUNDS or AMBS.
+ Clear(kind CapType)
+
+ // String return current capabilities state of the given capabilities
+ // set as string. The 'which' value should be one of EFFECTIVE,
+ // PERMITTED, INHERITABLE BOUNDING or AMBIENT
+ StringCap(which CapType) string
+
+ // String return current capabilities state as string.
+ String() string
+
+ // Load load actual capabilities value. This will overwrite all
+ // outstanding changes.
+ Load() error
+
+ // Apply apply the capabilities settings, so all changes will take
+ // effect.
+ Apply(kind CapType) error
+}
+
+// NewPid initializes a new [Capabilities] object for given pid when
+// it is nonzero, or for the current process if pid is 0.
+//
+// Deprecated: Replace with [NewPid2] followed by [Capabilities.Load].
+// For example, replace:
+//
+// c, err := NewPid(0)
+// if err != nil {
+// return err
+// }
+//
+// with:
+//
+// c, err := NewPid2(0)
+// if err != nil {
+// return err
+// }
+// err = c.Load()
+// if err != nil {
+// return err
+// }
+func NewPid(pid int) (Capabilities, error) {
+ c, err := newPid(pid)
+ if err != nil {
+ return c, err
+ }
+ err = c.Load()
+ return c, err
+}
+
+// NewPid2 initializes a new [Capabilities] object for given pid when
+// it is nonzero, or for the current process if pid is 0. This
+// does not load the process's current capabilities; to do that you
+// must call [Capabilities.Load] explicitly.
+func NewPid2(pid int) (Capabilities, error) {
+ return newPid(pid)
+}
+
+// NewFile initializes a new Capabilities object for given file path.
+//
+// Deprecated: Replace with [NewFile2] followed by [Capabilities.Load].
+// For example, replace:
+//
+// c, err := NewFile(path)
+// if err != nil {
+// return err
+// }
+//
+// with:
+//
+// c, err := NewFile2(path)
+// if err != nil {
+// return err
+// }
+// err = c.Load()
+// if err != nil {
+// return err
+// }
+func NewFile(path string) (Capabilities, error) {
+ c, err := newFile(path)
+ if err != nil {
+ return c, err
+ }
+ err = c.Load()
+ return c, err
+}
+
+// NewFile2 creates a new initialized [Capabilities] object for given
+// file path. This does not load the process's current capabilities;
+// to do that you must call [Capabilities.Load] explicitly.
+func NewFile2(path string) (Capabilities, error) {
+ return newFile(path)
+}
+
+// LastCap returns highest valid capability of the running kernel,
+// or an error if it can not be obtained.
+//
+// See also: [ListSupported].
+func LastCap() (Cap, error) {
+ return lastCap()
+}
diff --git a/vendor/github.com/moby/sys/capability/capability_linux.go b/vendor/github.com/moby/sys/capability/capability_linux.go
new file mode 100644
index 000000000..aa600e1d9
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/capability_linux.go
@@ -0,0 +1,541 @@
+// Copyright 2023 The Capability Authors.
+// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package capability
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+)
+
+const (
+ linuxCapVer1 = 0x19980330 // No longer supported.
+ linuxCapVer2 = 0x20071026 // No longer supported.
+ linuxCapVer3 = 0x20080522
+)
+
+var lastCap = sync.OnceValues(func() (Cap, error) {
+ f, err := os.Open("/proc/sys/kernel/cap_last_cap")
+ if err != nil {
+ return 0, err
+ }
+
+ buf := make([]byte, 11)
+ l, err := f.Read(buf)
+ f.Close()
+ if err != nil {
+ return 0, err
+ }
+ buf = buf[:l]
+
+ last, err := strconv.Atoi(strings.TrimSpace(string(buf)))
+ if err != nil {
+ return 0, err
+ }
+ return Cap(last), nil
+})
+
+func capUpperMask() uint32 {
+ last, err := lastCap()
+ if err != nil || last < 32 {
+ return 0
+ }
+ return (uint32(1) << (uint(last) - 31)) - 1
+}
+
+func mkStringCap(c Capabilities, which CapType) (ret string) {
+ last, err := lastCap()
+ if err != nil {
+ return ""
+ }
+ for i, first := Cap(0), true; i <= last; i++ {
+ if !c.Get(which, i) {
+ continue
+ }
+ if first {
+ first = false
+ } else {
+ ret += ", "
+ }
+ ret += i.String()
+ }
+ return
+}
+
+func mkString(c Capabilities, max CapType) (ret string) {
+ ret = "{"
+ for i := CapType(1); i <= max; i <<= 1 {
+ ret += " " + i.String() + "=\""
+ if c.Empty(i) {
+ ret += "empty"
+ } else if c.Full(i) {
+ ret += "full"
+ } else {
+ ret += c.StringCap(i)
+ }
+ ret += "\""
+ }
+ ret += " }"
+ return
+}
+
+var capVersion = sync.OnceValues(func() (uint32, error) {
+ var hdr capHeader
+ err := capget(&hdr, nil)
+ return hdr.version, err
+})
+
+func newPid(pid int) (c Capabilities, retErr error) {
+ ver, err := capVersion()
+ if err != nil {
+ retErr = fmt.Errorf("unable to get capability version from the kernel: %w", err)
+ return
+ }
+ switch ver {
+ case linuxCapVer1, linuxCapVer2:
+ retErr = errors.New("old/unsupported capability version (kernel older than 2.6.26?)")
+ default:
+ // Either linuxCapVer3, or an unknown/future version (such as v4).
+ // In the latter case, we fall back to v3 as the latest version known
+ // to this package, as kernel should be backward-compatible to v3.
+ p := new(capsV3)
+ p.hdr.version = linuxCapVer3
+ p.hdr.pid = int32(pid)
+ c = p
+ }
+ return
+}
+
+type capsV3 struct {
+ hdr capHeader
+ data [2]capData
+ bounds [2]uint32
+ ambient [2]uint32
+}
+
+func (c *capsV3) Get(which CapType, what Cap) bool {
+ var i uint
+ if what > 31 {
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ switch which {
+ case EFFECTIVE:
+ return (1<<uint(what))&c.data[i].effective != 0
+ case PERMITTED:
+ return (1<<uint(what))&c.data[i].permitted != 0
+ case INHERITABLE:
+ return (1<<uint(what))&c.data[i].inheritable != 0
+ case BOUNDING:
+ return (1<<uint(what))&c.bounds[i] != 0
+ case AMBIENT:
+ return (1<<uint(what))&c.ambient[i] != 0
+ }
+
+ return false
+}
+
+func (c *capsV3) getData(which CapType, dest []uint32) {
+ switch which {
+ case EFFECTIVE:
+ dest[0] = c.data[0].effective
+ dest[1] = c.data[1].effective
+ case PERMITTED:
+ dest[0] = c.data[0].permitted
+ dest[1] = c.data[1].permitted
+ case INHERITABLE:
+ dest[0] = c.data[0].inheritable
+ dest[1] = c.data[1].inheritable
+ case BOUNDING:
+ dest[0] = c.bounds[0]
+ dest[1] = c.bounds[1]
+ case AMBIENT:
+ dest[0] = c.ambient[0]
+ dest[1] = c.ambient[1]
+ }
+}
+
+func (c *capsV3) Empty(which CapType) bool {
+ var data [2]uint32
+ c.getData(which, data[:])
+ return data[0] == 0 && data[1] == 0
+}
+
+func (c *capsV3) Full(which CapType) bool {
+ var data [2]uint32
+ c.getData(which, data[:])
+ if (data[0] & 0xffffffff) != 0xffffffff {
+ return false
+ }
+ mask := capUpperMask()
+ return (data[1] & mask) == mask
+}
+
+func (c *capsV3) Set(which CapType, caps ...Cap) {
+ for _, what := range caps {
+ var i uint
+ if what > 31 {
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data[i].effective |= 1 << uint(what)
+ }
+ if which&PERMITTED != 0 {
+ c.data[i].permitted |= 1 << uint(what)
+ }
+ if which&INHERITABLE != 0 {
+ c.data[i].inheritable |= 1 << uint(what)
+ }
+ if which&BOUNDING != 0 {
+ c.bounds[i] |= 1 << uint(what)
+ }
+ if which&AMBIENT != 0 {
+ c.ambient[i] |= 1 << uint(what)
+ }
+ }
+}
+
+func (c *capsV3) Unset(which CapType, caps ...Cap) {
+ for _, what := range caps {
+ var i uint
+ if what > 31 {
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data[i].effective &= ^(1 << uint(what))
+ }
+ if which&PERMITTED != 0 {
+ c.data[i].permitted &= ^(1 << uint(what))
+ }
+ if which&INHERITABLE != 0 {
+ c.data[i].inheritable &= ^(1 << uint(what))
+ }
+ if which&BOUNDING != 0 {
+ c.bounds[i] &= ^(1 << uint(what))
+ }
+ if which&AMBIENT != 0 {
+ c.ambient[i] &= ^(1 << uint(what))
+ }
+ }
+}
+
+func (c *capsV3) Fill(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data[0].effective = 0xffffffff
+ c.data[0].permitted = 0xffffffff
+ c.data[0].inheritable = 0
+ c.data[1].effective = 0xffffffff
+ c.data[1].permitted = 0xffffffff
+ c.data[1].inheritable = 0
+ }
+
+ if kind&BOUNDS == BOUNDS {
+ c.bounds[0] = 0xffffffff
+ c.bounds[1] = 0xffffffff
+ }
+ if kind&AMBS == AMBS {
+ c.ambient[0] = 0xffffffff
+ c.ambient[1] = 0xffffffff
+ }
+}
+
+func (c *capsV3) Clear(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data[0].effective = 0
+ c.data[0].permitted = 0
+ c.data[0].inheritable = 0
+ c.data[1].effective = 0
+ c.data[1].permitted = 0
+ c.data[1].inheritable = 0
+ }
+
+ if kind&BOUNDS == BOUNDS {
+ c.bounds[0] = 0
+ c.bounds[1] = 0
+ }
+ if kind&AMBS == AMBS {
+ c.ambient[0] = 0
+ c.ambient[1] = 0
+ }
+}
+
+func (c *capsV3) StringCap(which CapType) (ret string) {
+ return mkStringCap(c, which)
+}
+
+func (c *capsV3) String() (ret string) {
+ return mkString(c, BOUNDING)
+}
+
+func (c *capsV3) Load() (err error) {
+ err = capget(&c.hdr, &c.data[0])
+ if err != nil {
+ return
+ }
+
+ path := "/proc/self/status"
+ if c.hdr.pid != 0 {
+ path = fmt.Sprintf("/proc/%d/status", c.hdr.pid)
+ }
+
+ f, err := os.Open(path)
+ if err != nil {
+ return
+ }
+ b := bufio.NewReader(f)
+ for {
+ line, e := b.ReadString('\n')
+ if e != nil {
+ if e != io.EOF {
+ err = e
+ }
+ break
+ }
+ if strings.HasPrefix(line, "CapB") {
+ _, err = fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0])
+ if err != nil {
+ break
+ }
+ continue
+ }
+ if strings.HasPrefix(line, "CapA") {
+ _, err = fmt.Sscanf(line[4:], "mb: %08x%08x", &c.ambient[1], &c.ambient[0])
+ if err != nil {
+ break
+ }
+ continue
+ }
+ }
+ f.Close()
+
+ return
+}
+
+func (c *capsV3) Apply(kind CapType) (err error) {
+ last, err := LastCap()
+ if err != nil {
+ return err
+ }
+ if kind&BOUNDS == BOUNDS {
+ var data [2]capData
+ err = capget(&c.hdr, &data[0])
+ if err != nil {
+ return
+ }
+ if (1<<uint(CAP_SETPCAP))&data[0].effective != 0 {
+ for i := Cap(0); i <= last; i++ {
+ if c.Get(BOUNDING, i) {
+ continue
+ }
+ err = prctl(syscall.PR_CAPBSET_DROP, uintptr(i), 0, 0, 0)
+ if err != nil {
+ // Ignore EINVAL since the capability may not be supported in this system.
+ if err == syscall.EINVAL { //nolint:errorlint // Errors from syscall are bare.
+ err = nil
+ continue
+ }
+ return
+ }
+ }
+ }
+ }
+
+ if kind&CAPS == CAPS {
+ err = capset(&c.hdr, &c.data[0])
+ if err != nil {
+ return
+ }
+ }
+
+ if kind&AMBS == AMBS {
+ for i := Cap(0); i <= last; i++ {
+ action := pr_CAP_AMBIENT_LOWER
+ if c.Get(AMBIENT, i) {
+ action = pr_CAP_AMBIENT_RAISE
+ }
+ err = prctl(pr_CAP_AMBIENT, action, uintptr(i), 0, 0)
+ if err != nil {
+ // Ignore EINVAL as not supported on kernels before 4.3
+ if err == syscall.EINVAL { //nolint:errorlint // Errors from syscall are bare.
+ err = nil
+ continue
+ }
+ return
+ }
+ }
+ }
+
+ return
+}
+
+func newFile(path string) (c Capabilities, err error) {
+ c = &capsFile{path: path}
+ return
+}
+
+type capsFile struct {
+ path string
+ data vfscapData
+}
+
+func (c *capsFile) Get(which CapType, what Cap) bool {
+ var i uint
+ if what > 31 {
+ if c.data.version == 1 {
+ return false
+ }
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ switch which {
+ case EFFECTIVE:
+ return (1<<uint(what))&c.data.effective[i] != 0
+ case PERMITTED:
+ return (1<<uint(what))&c.data.data[i].permitted != 0
+ case INHERITABLE:
+ return (1<<uint(what))&c.data.data[i].inheritable != 0
+ }
+
+ return false
+}
+
+func (c *capsFile) getData(which CapType, dest []uint32) {
+ switch which {
+ case EFFECTIVE:
+ dest[0] = c.data.effective[0]
+ dest[1] = c.data.effective[1]
+ case PERMITTED:
+ dest[0] = c.data.data[0].permitted
+ dest[1] = c.data.data[1].permitted
+ case INHERITABLE:
+ dest[0] = c.data.data[0].inheritable
+ dest[1] = c.data.data[1].inheritable
+ }
+}
+
+func (c *capsFile) Empty(which CapType) bool {
+ var data [2]uint32
+ c.getData(which, data[:])
+ return data[0] == 0 && data[1] == 0
+}
+
+func (c *capsFile) Full(which CapType) bool {
+ var data [2]uint32
+ c.getData(which, data[:])
+ if c.data.version == 0 {
+ return (data[0] & 0x7fffffff) == 0x7fffffff
+ }
+ if (data[0] & 0xffffffff) != 0xffffffff {
+ return false
+ }
+ mask := capUpperMask()
+ return (data[1] & mask) == mask
+}
+
+func (c *capsFile) Set(which CapType, caps ...Cap) {
+ for _, what := range caps {
+ var i uint
+ if what > 31 {
+ if c.data.version == 1 {
+ continue
+ }
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data.effective[i] |= 1 << uint(what)
+ }
+ if which&PERMITTED != 0 {
+ c.data.data[i].permitted |= 1 << uint(what)
+ }
+ if which&INHERITABLE != 0 {
+ c.data.data[i].inheritable |= 1 << uint(what)
+ }
+ }
+}
+
+func (c *capsFile) Unset(which CapType, caps ...Cap) {
+ for _, what := range caps {
+ var i uint
+ if what > 31 {
+ if c.data.version == 1 {
+ continue
+ }
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data.effective[i] &= ^(1 << uint(what))
+ }
+ if which&PERMITTED != 0 {
+ c.data.data[i].permitted &= ^(1 << uint(what))
+ }
+ if which&INHERITABLE != 0 {
+ c.data.data[i].inheritable &= ^(1 << uint(what))
+ }
+ }
+}
+
+func (c *capsFile) Fill(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data.effective[0] = 0xffffffff
+ c.data.data[0].permitted = 0xffffffff
+ c.data.data[0].inheritable = 0
+ if c.data.version == 2 {
+ c.data.effective[1] = 0xffffffff
+ c.data.data[1].permitted = 0xffffffff
+ c.data.data[1].inheritable = 0
+ }
+ }
+}
+
+func (c *capsFile) Clear(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data.effective[0] = 0
+ c.data.data[0].permitted = 0
+ c.data.data[0].inheritable = 0
+ if c.data.version == 2 {
+ c.data.effective[1] = 0
+ c.data.data[1].permitted = 0
+ c.data.data[1].inheritable = 0
+ }
+ }
+}
+
+func (c *capsFile) StringCap(which CapType) (ret string) {
+ return mkStringCap(c, which)
+}
+
+func (c *capsFile) String() (ret string) {
+ return mkString(c, INHERITABLE)
+}
+
+func (c *capsFile) Load() (err error) {
+ return getVfsCap(c.path, &c.data)
+}
+
+func (c *capsFile) Apply(kind CapType) (err error) {
+ if kind&CAPS == CAPS {
+ return setVfsCap(c.path, &c.data)
+ }
+ return
+}
diff --git a/vendor/github.com/moby/sys/capability/capability_noop.go b/vendor/github.com/moby/sys/capability/capability_noop.go
new file mode 100644
index 000000000..ba819ff05
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/capability_noop.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The Capability Authors.
+// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !linux
+
+package capability
+
+import "errors"
+
+var errNotSup = errors.New("not supported")
+
+func newPid(_ int) (Capabilities, error) {
+ return nil, errNotSup
+}
+
+func newFile(_ string) (Capabilities, error) {
+ return nil, errNotSup
+}
+
+func lastCap() (Cap, error) {
+ return -1, errNotSup
+}
diff --git a/vendor/github.com/moby/sys/capability/enum.go b/vendor/github.com/moby/sys/capability/enum.go
new file mode 100644
index 000000000..f89f0273a
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/enum.go
@@ -0,0 +1,330 @@
+// Copyright 2024 The Capability Authors.
+// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package capability
+
+import "slices"
+
+type CapType uint
+
+func (c CapType) String() string {
+ switch c {
+ case EFFECTIVE:
+ return "effective"
+ case PERMITTED:
+ return "permitted"
+ case INHERITABLE:
+ return "inheritable"
+ case BOUNDING:
+ return "bounding"
+ case CAPS:
+ return "caps"
+ case AMBIENT:
+ return "ambient"
+ }
+ return "unknown"
+}
+
+const (
+ EFFECTIVE CapType = 1 << iota
+ PERMITTED
+ INHERITABLE
+ BOUNDING
+ AMBIENT
+
+ CAPS = EFFECTIVE | PERMITTED | INHERITABLE
+ BOUNDS = BOUNDING
+ AMBS = AMBIENT
+)
+
+//go:generate go run enumgen/gen.go
+type Cap int
+
+// POSIX-draft defined capabilities and Linux extensions.
+//
+// Defined in https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h
+const (
+ // In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this
+ // overrides the restriction of changing file ownership and group
+ // ownership.
+ CAP_CHOWN = Cap(0)
+
+ // Override all DAC access, including ACL execute access if
+ // [_POSIX_ACL] is defined. Excluding DAC access covered by
+ // CAP_LINUX_IMMUTABLE.
+ CAP_DAC_OVERRIDE = Cap(1)
+
+ // Overrides all DAC restrictions regarding read and search on files
+ // and directories, including ACL restrictions if [_POSIX_ACL] is
+ // defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE.
+ CAP_DAC_READ_SEARCH = Cap(2)
+
+ // Overrides all restrictions about allowed operations on files, where
+ // file owner ID must be equal to the user ID, except where CAP_FSETID
+ // is applicable. It doesn't override MAC and DAC restrictions.
+ CAP_FOWNER = Cap(3)
+
+ // Overrides the following restrictions that the effective user ID
+ // shall match the file owner ID when setting the S_ISUID and S_ISGID
+ // bits on that file; that the effective group ID (or one of the
+ // supplementary group IDs) shall match the file owner ID when setting
+ // the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are
+ // cleared on successful return from chown(2) (not implemented).
+ CAP_FSETID = Cap(4)
+
+ // Overrides the restriction that the real or effective user ID of a
+ // process sending a signal must match the real or effective user ID
+ // of the process receiving the signal.
+ CAP_KILL = Cap(5)
+
+ // Allows setgid(2) manipulation
+ // Allows setgroups(2)
+ // Allows forged gids on socket credentials passing.
+ CAP_SETGID = Cap(6)
+
+ // Allows set*uid(2) manipulation (including fsuid).
+ // Allows forged pids on socket credentials passing.
+ CAP_SETUID = Cap(7)
+
+ // Linux-specific capabilities
+
+ // Without VFS support for capabilities:
+ // Transfer any capability in your permitted set to any pid,
+ // remove any capability in your permitted set from any pid
+ // With VFS support for capabilities (neither of above, but)
+ // Add any capability from current's capability bounding set
+ // to the current process' inheritable set
+ // Allow taking bits out of capability bounding set
+ // Allow modification of the securebits for a process
+ CAP_SETPCAP = Cap(8)
+
+ // Allow modification of S_IMMUTABLE and S_APPEND file attributes
+ CAP_LINUX_IMMUTABLE = Cap(9)
+
+ // Allows binding to TCP/UDP sockets below 1024
+ // Allows binding to ATM VCIs below 32
+ CAP_NET_BIND_SERVICE = Cap(10)
+
+ // Allow broadcasting, listen to multicast
+ CAP_NET_BROADCAST = Cap(11)
+
+ // Allow interface configuration
+ // Allow administration of IP firewall, masquerading and accounting
+ // Allow setting debug option on sockets
+ // Allow modification of routing tables
+ // Allow setting arbitrary process / process group ownership on
+ // sockets
+ // Allow binding to any address for transparent proxying (also via NET_RAW)
+ // Allow setting TOS (type of service)
+ // Allow setting promiscuous mode
+ // Allow clearing driver statistics
+ // Allow multicasting
+ // Allow read/write of device-specific registers
+ // Allow activation of ATM control sockets
+ CAP_NET_ADMIN = Cap(12)
+
+ // Allow use of RAW sockets
+ // Allow use of PACKET sockets
+ // Allow binding to any address for transparent proxying (also via NET_ADMIN)
+ CAP_NET_RAW = Cap(13)
+
+ // Allow locking of shared memory segments
+ // Allow mlock and mlockall (which doesn't really have anything to do
+ // with IPC)
+ CAP_IPC_LOCK = Cap(14)
+
+ // Override IPC ownership checks
+ CAP_IPC_OWNER = Cap(15)
+
+ // Insert and remove kernel modules - modify kernel without limit
+ CAP_SYS_MODULE = Cap(16)
+
+ // Allow ioperm/iopl access
+ // Allow sending USB messages to any device via /proc/bus/usb
+ CAP_SYS_RAWIO = Cap(17)
+
+ // Allow use of chroot()
+ CAP_SYS_CHROOT = Cap(18)
+
+ // Allow ptrace() of any process
+ CAP_SYS_PTRACE = Cap(19)
+
+ // Allow configuration of process accounting
+ CAP_SYS_PACCT = Cap(20)
+
+ // Allow configuration of the secure attention key
+ // Allow administration of the random device
+ // Allow examination and configuration of disk quotas
+ // Allow setting the domainname
+ // Allow setting the hostname
+ // Allow calling bdflush()
+ // Allow mount() and umount(), setting up new smb connection
+ // Allow some autofs root ioctls
+ // Allow nfsservctl
+ // Allow VM86_REQUEST_IRQ
+ // Allow to read/write pci config on alpha
+ // Allow irix_prctl on mips (setstacksize)
+ // Allow flushing all cache on m68k (sys_cacheflush)
+ // Allow removing semaphores
+ // Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores
+ // and shared memory
+ // Allow locking/unlocking of shared memory segment
+ // Allow turning swap on/off
+ // Allow forged pids on socket credentials passing
+ // Allow setting readahead and flushing buffers on block devices
+ // Allow setting geometry in floppy driver
+ // Allow turning DMA on/off in xd driver
+ // Allow administration of md devices (mostly the above, but some
+ // extra ioctls)
+ // Allow tuning the ide driver
+ // Allow access to the nvram device
+ // Allow administration of apm_bios, serial and bttv (TV) device
+ // Allow manufacturer commands in isdn CAPI support driver
+ // Allow reading non-standardized portions of pci configuration space
+ // Allow DDI debug ioctl on sbpcd driver
+ // Allow setting up serial ports
+ // Allow sending raw qic-117 commands
+ // Allow enabling/disabling tagged queuing on SCSI controllers and sending
+ // arbitrary SCSI commands
+ // Allow setting encryption key on loopback filesystem
+ // Allow setting zone reclaim policy
+ // Allow everything under CAP_BPF and CAP_PERFMON for backward compatibility
+ CAP_SYS_ADMIN = Cap(21)
+
+ // Allow use of reboot()
+ CAP_SYS_BOOT = Cap(22)
+
+ // Allow raising priority and setting priority on other (different
+ // UID) processes
+ // Allow use of FIFO and round-robin (realtime) scheduling on own
+ // processes and setting the scheduling algorithm used by another
+ // process.
+ // Allow setting cpu affinity on other processes
+ CAP_SYS_NICE = Cap(23)
+
+ // Override resource limits. Set resource limits.
+ // Override quota limits.
+ // Override reserved space on ext2 filesystem
+ // Modify data journaling mode on ext3 filesystem (uses journaling
+ // resources)
+ // NOTE: ext2 honors fsuid when checking for resource overrides, so
+ // you can override using fsuid too
+ // Override size restrictions on IPC message queues
+ // Allow more than 64hz interrupts from the real-time clock
+ // Override max number of consoles on console allocation
+ // Override max number of keymaps
+ // Control memory reclaim behavior
+ CAP_SYS_RESOURCE = Cap(24)
+
+ // Allow manipulation of system clock
+ // Allow irix_stime on mips
+ // Allow setting the real-time clock
+ CAP_SYS_TIME = Cap(25)
+
+ // Allow configuration of tty devices
+ // Allow vhangup() of tty
+ CAP_SYS_TTY_CONFIG = Cap(26)
+
+ // Allow the privileged aspects of mknod()
+ CAP_MKNOD = Cap(27)
+
+ // Allow taking of leases on files
+ CAP_LEASE = Cap(28)
+
+ CAP_AUDIT_WRITE = Cap(29)
+ CAP_AUDIT_CONTROL = Cap(30)
+ CAP_SETFCAP = Cap(31)
+
+ // Override MAC access.
+ // The base kernel enforces no MAC policy.
+ // An LSM may enforce a MAC policy, and if it does and it chooses
+ // to implement capability based overrides of that policy, this is
+ // the capability it should use to do so.
+ CAP_MAC_OVERRIDE = Cap(32)
+
+ // Allow MAC configuration or state changes.
+ // The base kernel requires no MAC configuration.
+ // An LSM may enforce a MAC policy, and if it does and it chooses
+ // to implement capability based checks on modifications to that
+ // policy or the data required to maintain it, this is the
+ // capability it should use to do so.
+ CAP_MAC_ADMIN = Cap(33)
+
+ // Allow configuring the kernel's syslog (printk behaviour)
+ CAP_SYSLOG = Cap(34)
+
+ // Allow triggering something that will wake the system
+ CAP_WAKE_ALARM = Cap(35)
+
+ // Allow preventing system suspends
+ CAP_BLOCK_SUSPEND = Cap(36)
+
+ // Allow reading the audit log via multicast netlink socket
+ CAP_AUDIT_READ = Cap(37)
+
+ // Allow system performance and observability privileged operations
+ // using perf_events, i915_perf and other kernel subsystems
+ CAP_PERFMON = Cap(38)
+
+ // CAP_BPF allows the following BPF operations:
+ // - Creating all types of BPF maps
+ // - Advanced verifier features
+ // - Indirect variable access
+ // - Bounded loops
+ // - BPF to BPF function calls
+ // - Scalar precision tracking
+ // - Larger complexity limits
+ // - Dead code elimination
+ // - And potentially other features
+ // - Loading BPF Type Format (BTF) data
+ // - Retrieve xlated and JITed code of BPF programs
+ // - Use bpf_spin_lock() helper
+ //
+ // CAP_PERFMON relaxes the verifier checks further:
+ // - BPF progs can use of pointer-to-integer conversions
+ // - speculation attack hardening measures are bypassed
+ // - bpf_probe_read to read arbitrary kernel memory is allowed
+ // - bpf_trace_printk to print kernel memory is allowed
+ //
+ // CAP_SYS_ADMIN is required to use bpf_probe_write_user.
+ //
+ // CAP_SYS_ADMIN is required to iterate system wide loaded
+ // programs, maps, links, BTFs and convert their IDs to file descriptors.
+ //
+ // CAP_PERFMON and CAP_BPF are required to load tracing programs.
+ // CAP_NET_ADMIN and CAP_BPF are required to load networking programs.
+ CAP_BPF = Cap(39)
+
+ // Allow checkpoint/restore related operations.
+ // Introduced in kernel 5.9
+ CAP_CHECKPOINT_RESTORE = Cap(40)
+)
+
+// List returns the list of all capabilities known to the package.
+//
+// Deprecated: use [ListKnown] or [ListSupported] instead.
+func List() []Cap {
+ return ListKnown()
+}
+
+// ListKnown returns the list of all capabilities known to the package.
+func ListKnown() []Cap {
+ return list()
+}
+
+// ListSupported retuns the list of all capabilities known to the package,
+// except those that are not supported by the currently running Linux kernel.
+func ListSupported() ([]Cap, error) {
+ last, err := LastCap()
+ if err != nil {
+ return nil, err
+ }
+ return slices.DeleteFunc(list(), func(c Cap) bool {
+ // Remove caps not supported by the kernel.
+ return c > last
+ }), nil
+}
diff --git a/vendor/github.com/moby/sys/capability/enum_gen.go b/vendor/github.com/moby/sys/capability/enum_gen.go
new file mode 100644
index 000000000..f72cd43a6
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/enum_gen.go
@@ -0,0 +1,137 @@
+// Code generated by go generate; DO NOT EDIT.
+
+package capability
+
+func (c Cap) String() string {
+ switch c {
+ case CAP_CHOWN:
+ return "chown"
+ case CAP_DAC_OVERRIDE:
+ return "dac_override"
+ case CAP_DAC_READ_SEARCH:
+ return "dac_read_search"
+ case CAP_FOWNER:
+ return "fowner"
+ case CAP_FSETID:
+ return "fsetid"
+ case CAP_KILL:
+ return "kill"
+ case CAP_SETGID:
+ return "setgid"
+ case CAP_SETUID:
+ return "setuid"
+ case CAP_SETPCAP:
+ return "setpcap"
+ case CAP_LINUX_IMMUTABLE:
+ return "linux_immutable"
+ case CAP_NET_BIND_SERVICE:
+ return "net_bind_service"
+ case CAP_NET_BROADCAST:
+ return "net_broadcast"
+ case CAP_NET_ADMIN:
+ return "net_admin"
+ case CAP_NET_RAW:
+ return "net_raw"
+ case CAP_IPC_LOCK:
+ return "ipc_lock"
+ case CAP_IPC_OWNER:
+ return "ipc_owner"
+ case CAP_SYS_MODULE:
+ return "sys_module"
+ case CAP_SYS_RAWIO:
+ return "sys_rawio"
+ case CAP_SYS_CHROOT:
+ return "sys_chroot"
+ case CAP_SYS_PTRACE:
+ return "sys_ptrace"
+ case CAP_SYS_PACCT:
+ return "sys_pacct"
+ case CAP_SYS_ADMIN:
+ return "sys_admin"
+ case CAP_SYS_BOOT:
+ return "sys_boot"
+ case CAP_SYS_NICE:
+ return "sys_nice"
+ case CAP_SYS_RESOURCE:
+ return "sys_resource"
+ case CAP_SYS_TIME:
+ return "sys_time"
+ case CAP_SYS_TTY_CONFIG:
+ return "sys_tty_config"
+ case CAP_MKNOD:
+ return "mknod"
+ case CAP_LEASE:
+ return "lease"
+ case CAP_AUDIT_WRITE:
+ return "audit_write"
+ case CAP_AUDIT_CONTROL:
+ return "audit_control"
+ case CAP_SETFCAP:
+ return "setfcap"
+ case CAP_MAC_OVERRIDE:
+ return "mac_override"
+ case CAP_MAC_ADMIN:
+ return "mac_admin"
+ case CAP_SYSLOG:
+ return "syslog"
+ case CAP_WAKE_ALARM:
+ return "wake_alarm"
+ case CAP_BLOCK_SUSPEND:
+ return "block_suspend"
+ case CAP_AUDIT_READ:
+ return "audit_read"
+ case CAP_PERFMON:
+ return "perfmon"
+ case CAP_BPF:
+ return "bpf"
+ case CAP_CHECKPOINT_RESTORE:
+ return "checkpoint_restore"
+ }
+ return "unknown"
+}
+
+func list() []Cap {
+ return []Cap{
+ CAP_CHOWN,
+ CAP_DAC_OVERRIDE,
+ CAP_DAC_READ_SEARCH,
+ CAP_FOWNER,
+ CAP_FSETID,
+ CAP_KILL,
+ CAP_SETGID,
+ CAP_SETUID,
+ CAP_SETPCAP,
+ CAP_LINUX_IMMUTABLE,
+ CAP_NET_BIND_SERVICE,
+ CAP_NET_BROADCAST,
+ CAP_NET_ADMIN,
+ CAP_NET_RAW,
+ CAP_IPC_LOCK,
+ CAP_IPC_OWNER,
+ CAP_SYS_MODULE,
+ CAP_SYS_RAWIO,
+ CAP_SYS_CHROOT,
+ CAP_SYS_PTRACE,
+ CAP_SYS_PACCT,
+ CAP_SYS_ADMIN,
+ CAP_SYS_BOOT,
+ CAP_SYS_NICE,
+ CAP_SYS_RESOURCE,
+ CAP_SYS_TIME,
+ CAP_SYS_TTY_CONFIG,
+ CAP_MKNOD,
+ CAP_LEASE,
+ CAP_AUDIT_WRITE,
+ CAP_AUDIT_CONTROL,
+ CAP_SETFCAP,
+ CAP_MAC_OVERRIDE,
+ CAP_MAC_ADMIN,
+ CAP_SYSLOG,
+ CAP_WAKE_ALARM,
+ CAP_BLOCK_SUSPEND,
+ CAP_AUDIT_READ,
+ CAP_PERFMON,
+ CAP_BPF,
+ CAP_CHECKPOINT_RESTORE,
+ }
+}
diff --git a/vendor/github.com/moby/sys/capability/syscall_linux.go b/vendor/github.com/moby/sys/capability/syscall_linux.go
new file mode 100644
index 000000000..d6b6932a9
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/syscall_linux.go
@@ -0,0 +1,153 @@
+// Copyright 2024 The Capability Authors.
+// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package capability
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type capHeader struct {
+ version uint32
+ pid int32
+}
+
+type capData struct {
+ effective uint32
+ permitted uint32
+ inheritable uint32
+}
+
+func capget(hdr *capHeader, data *capData) (err error) {
+ _, _, e1 := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func capset(hdr *capHeader, data *capData) (err error) {
+ _, _, e1 := syscall.Syscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// not yet in syscall
+const (
+ pr_CAP_AMBIENT = 47
+ pr_CAP_AMBIENT_IS_SET = uintptr(1)
+ pr_CAP_AMBIENT_RAISE = uintptr(2)
+ pr_CAP_AMBIENT_LOWER = uintptr(3)
+ pr_CAP_AMBIENT_CLEAR_ALL = uintptr(4)
+)
+
+func prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) {
+ _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+const (
+ vfsXattrName = "security.capability"
+
+ vfsCapVerMask = 0xff000000
+ vfsCapVer1 = 0x01000000
+ vfsCapVer2 = 0x02000000
+
+ vfsCapFlagMask = ^vfsCapVerMask
+ vfsCapFlageffective = 0x000001
+
+ vfscapDataSizeV1 = 4 * (1 + 2*1)
+ vfscapDataSizeV2 = 4 * (1 + 2*2)
+)
+
+type vfscapData struct {
+ magic uint32
+ data [2]struct {
+ permitted uint32
+ inheritable uint32
+ }
+ effective [2]uint32
+ version int8
+}
+
+var _vfsXattrName *byte
+
+func init() {
+ _vfsXattrName, _ = syscall.BytePtrFromString(vfsXattrName)
+}
+
+func getVfsCap(path string, dest *vfscapData) (err error) {
+ var _p0 *byte
+ _p0, err = syscall.BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0)
+ if e1 != 0 {
+ if e1 == syscall.ENODATA {
+ dest.version = 2
+ return
+ }
+ err = e1
+ }
+ switch dest.magic & vfsCapVerMask {
+ case vfsCapVer1:
+ dest.version = 1
+ if r0 != vfscapDataSizeV1 {
+ return syscall.EINVAL
+ }
+ dest.data[1].permitted = 0
+ dest.data[1].inheritable = 0
+ case vfsCapVer2:
+ dest.version = 2
+ if r0 != vfscapDataSizeV2 {
+ return syscall.EINVAL
+ }
+ default:
+ return syscall.EINVAL
+ }
+ if dest.magic&vfsCapFlageffective != 0 {
+ dest.effective[0] = dest.data[0].permitted | dest.data[0].inheritable
+ dest.effective[1] = dest.data[1].permitted | dest.data[1].inheritable
+ } else {
+ dest.effective[0] = 0
+ dest.effective[1] = 0
+ }
+ return
+}
+
+func setVfsCap(path string, data *vfscapData) (err error) {
+ var _p0 *byte
+ _p0, err = syscall.BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var size uintptr
+ if data.version == 1 {
+ data.magic = vfsCapVer1
+ size = vfscapDataSizeV1
+ } else if data.version == 2 {
+ data.magic = vfsCapVer2
+ if data.effective[0] != 0 || data.effective[1] != 0 {
+ data.magic |= vfsCapFlageffective
+ }
+ size = vfscapDataSizeV2
+ } else {
+ return syscall.EINVAL
+ }
+ _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go
index 38f80d5ae..584aac971 100644
--- a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go
+++ b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go
@@ -69,69 +69,69 @@ type Extensions struct {
// Deprecated
// Triggering event of the Github Workflow. Matches the `event_name` claim of ID
// tokens from Github Actions
- GithubWorkflowTrigger string // OID 1.3.6.1.4.1.57264.1.2
+ GithubWorkflowTrigger string `json:"GithubWorkflowTrigger,omitempty" yaml:"github-workflow-trigger,omitempty"` // OID 1.3.6.1.4.1.57264.1.2
// Deprecated
// SHA of git commit being built in Github Actions. Matches the `sha` claim of ID
// tokens from Github Actions
- GithubWorkflowSHA string // OID 1.3.6.1.4.1.57264.1.3
+ GithubWorkflowSHA string `json:"GithubWorkflowSHA,omitempty" yaml:"github-workflow-sha,omitempty"` // OID 1.3.6.1.4.1.57264.1.3
// Deprecated
// Name of Github Actions Workflow. Matches the `workflow` claim of the ID
// tokens from Github Actions
- GithubWorkflowName string // OID 1.3.6.1.4.1.57264.1.4
+ GithubWorkflowName string `json:"GithubWorkflowName,omitempty" yaml:"github-workflow-name,omitempty"` // OID 1.3.6.1.4.1.57264.1.4
// Deprecated
// Repository of the Github Actions Workflow. Matches the `repository` claim of the ID
// tokens from Github Actions
- GithubWorkflowRepository string // OID 1.3.6.1.4.1.57264.1.5
+ GithubWorkflowRepository string `json:"GithubWorkflowRepository,omitempty" yaml:"github-workflow-repository,omitempty"` // OID 1.3.6.1.4.1.57264.1.5
// Deprecated
// Git Ref of the Github Actions Workflow. Matches the `ref` claim of the ID tokens
// from Github Actions
- GithubWorkflowRef string // 1.3.6.1.4.1.57264.1.6
+ GithubWorkflowRef string `json:"GithubWorkflowRef,omitempty" yaml:"github-workflow-ref,omitempty"` // 1.3.6.1.4.1.57264.1.6
// Reference to specific build instructions that are responsible for signing.
- BuildSignerURI string // 1.3.6.1.4.1.57264.1.9
+ BuildSignerURI string `json:"BuildSignerURI,omitempty" yaml:"build-signer-uri,omitempty"` // 1.3.6.1.4.1.57264.1.9
// Immutable reference to the specific version of the build instructions that is responsible for signing.
- BuildSignerDigest string // 1.3.6.1.4.1.57264.1.10
+ BuildSignerDigest string `json:"BuildSignerDigest,omitempty" yaml:"build-signer-digest,omitempty"` // 1.3.6.1.4.1.57264.1.10
// Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure.
- RunnerEnvironment string // 1.3.6.1.4.1.57264.1.11
+ RunnerEnvironment string `json:"RunnerEnvironment,omitempty" yaml:"runner-environment,omitempty"` // 1.3.6.1.4.1.57264.1.11
// Source repository URL that the build was based on.
- SourceRepositoryURI string // 1.3.6.1.4.1.57264.1.12
+ SourceRepositoryURI string `json:"SourceRepositoryURI,omitempty" yaml:"source-repository-uri,omitempty"` // 1.3.6.1.4.1.57264.1.12
// Immutable reference to a specific version of the source code that the build was based upon.
- SourceRepositoryDigest string // 1.3.6.1.4.1.57264.1.13
+ SourceRepositoryDigest string `json:"SourceRepositoryDigest,omitempty" yaml:"source-repository-digest,omitempty"` // 1.3.6.1.4.1.57264.1.13
// Source Repository Ref that the build run was based upon.
- SourceRepositoryRef string // 1.3.6.1.4.1.57264.1.14
+ SourceRepositoryRef string `json:"SourceRepositoryRef,omitempty" yaml:"source-repository-ref,omitempty"` // 1.3.6.1.4.1.57264.1.14
// Immutable identifier for the source repository the workflow was based upon.
- SourceRepositoryIdentifier string // 1.3.6.1.4.1.57264.1.15
+ SourceRepositoryIdentifier string `json:"SourceRepositoryIdentifier,omitempty" yaml:"source-repository-identifier,omitempty"` // 1.3.6.1.4.1.57264.1.15
// Source repository owner URL of the owner of the source repository that the build was based on.
- SourceRepositoryOwnerURI string // 1.3.6.1.4.1.57264.1.16
+ SourceRepositoryOwnerURI string `json:"SourceRepositoryOwnerURI,omitempty" yaml:"source-repository-owner-uri,omitempty"` // 1.3.6.1.4.1.57264.1.16
// Immutable identifier for the owner of the source repository that the workflow was based upon.
- SourceRepositoryOwnerIdentifier string // 1.3.6.1.4.1.57264.1.17
+ SourceRepositoryOwnerIdentifier string `json:"SourceRepositoryOwnerIdentifier,omitempty" yaml:"source-repository-owner-identifier,omitempty"` // 1.3.6.1.4.1.57264.1.17
// Build Config URL to the top-level/initiating build instructions.
- BuildConfigURI string // 1.3.6.1.4.1.57264.1.18
+ BuildConfigURI string `json:"BuildConfigURI,omitempty" yaml:"build-config-uri,omitempty"` // 1.3.6.1.4.1.57264.1.18
// Immutable reference to the specific version of the top-level/initiating build instructions.
- BuildConfigDigest string // 1.3.6.1.4.1.57264.1.19
+ BuildConfigDigest string `json:"BuildConfigDigest,omitempty" yaml:"build-config-digest,omitempty"` // 1.3.6.1.4.1.57264.1.19
// Event or action that initiated the build.
- BuildTrigger string // 1.3.6.1.4.1.57264.1.20
+ BuildTrigger string `json:"BuildTrigger,omitempty" yaml:"build-trigger,omitempty"` // 1.3.6.1.4.1.57264.1.20
// Run Invocation URL to uniquely identify the build execution.
- RunInvocationURI string // 1.3.6.1.4.1.57264.1.21
+ RunInvocationURI string `json:"RunInvocationURI,omitempty" yaml:"run-invocation-uri,omitempty"` // 1.3.6.1.4.1.57264.1.21
// Source repository visibility at the time of signing the certificate.
- SourceRepositoryVisibilityAtSigning string // 1.3.6.1.4.1.57264.1.22
+ SourceRepositoryVisibilityAtSigning string `json:"SourceRepositoryVisibilityAtSigning,omitempty" yaml:"source-repository-visibility-at-signing,omitempty"` // 1.3.6.1.4.1.57264.1.22
}
func (e Extensions) Render() ([]pkix.Extension, error) {
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
index a8b2805e6..1e2fa031b 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
@@ -20,7 +20,6 @@ import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
- "crypto/elliptic"
"crypto/rsa"
"crypto/sha1" // nolint:gosec
"crypto/x509"
@@ -104,15 +103,15 @@ func EqualKeys(first, second crypto.PublicKey) error {
switch pub := first.(type) {
case *rsa.PublicKey:
if !pub.Equal(second) {
- return fmt.Errorf(genErrMsg(first, second, "rsa"))
+ return errors.New(genErrMsg(first, second, "rsa"))
}
case *ecdsa.PublicKey:
if !pub.Equal(second) {
- return fmt.Errorf(genErrMsg(first, second, "ecdsa"))
+ return errors.New(genErrMsg(first, second, "ecdsa"))
}
case ed25519.PublicKey:
if !pub.Equal(second) {
- return fmt.Errorf(genErrMsg(first, second, "ed25519"))
+ return errors.New(genErrMsg(first, second, "ed25519"))
}
default:
return errors.New("unsupported key type")
@@ -137,47 +136,50 @@ func genErrMsg(first, second crypto.PublicKey, keyType string) string {
// ValidatePubKey validates the parameters of an RSA, ECDSA, or ED25519 public key.
func ValidatePubKey(pub crypto.PublicKey) error {
+ // goodkey policy enforces:
+ // * RSA
+ // * Size of key: 2048 <= size <= 4096, size % 8 = 0
+ // * Exponent E = 65537 (Default exponent for OpenSSL and Golang)
+ // * Small primes check for modulus
+ // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17)
+ // * Key is easily factored with Fermat's factorization method
+ // * EC
+ // * Public key Q is not the identity element (Ø)
+ // * Public key Q's x and y are within [0, p-1]
+ // * Public key Q is on the curve
+ // * Public key Q's order matches the subgroups (nQ = Ø)
+ allowedKeys := &goodkey.AllowedKeys{
+ RSA2048: true,
+ RSA3072: true,
+ RSA4096: true,
+ ECDSAP256: true,
+ ECDSAP384: true,
+ ECDSAP521: true,
+ }
+ cfg := &goodkey.Config{
+ FermatRounds: 100,
+ AllowedKeys: allowedKeys,
+ }
+ p, err := goodkey.NewPolicy(cfg, nil)
+ if err != nil {
+ // Should not occur, only chances to return errors are if fermat rounds
+ // are <0 or when loading blocked/weak keys from disk (not used here)
+ return errors.New("unable to initialize key policy")
+ }
+
switch pk := pub.(type) {
case *rsa.PublicKey:
- // goodkey policy enforces:
- // * Size of key: 2048 <= size <= 4096, size % 8 = 0
- // * Exponent E = 65537 (Default exponent for OpenSSL and Golang)
- // * Small primes check for modulus
- // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17)
- // * Key is easily factored with Fermat's factorization method
- p, err := goodkey.NewKeyPolicy(&goodkey.Config{FermatRounds: 100}, nil)
- if err != nil {
- // Should not occur, only chances to return errors are if fermat rounds
- // are <0 or when loading blocked/weak keys from disk (not used here)
- return errors.New("unable to initialize key policy")
- }
// ctx is unused
return p.GoodKey(context.Background(), pub)
case *ecdsa.PublicKey:
- // Unable to use goodkey policy because P-521 curve is not supported
- return validateEcdsaKey(pk)
+ // ctx is unused
+ return p.GoodKey(context.Background(), pub)
case ed25519.PublicKey:
return validateEd25519Key(pk)
}
return errors.New("unsupported public key type")
}
-// Enforce that the ECDSA key curve is one of:
-// * NIST P-256 (secp256r1, prime256v1)
-// * NIST P-384
-// * NIST P-521.
-// Other EC curves, like secp256k1, are not supported by Go.
-func validateEcdsaKey(pub *ecdsa.PublicKey) error {
- switch pub.Curve {
- case elliptic.P224():
- return fmt.Errorf("unsupported ec curve, expected NIST P-256, P-384, or P-521")
- case elliptic.P256(), elliptic.P384(), elliptic.P521():
- return nil
- default:
- return fmt.Errorf("unexpected ec curve")
- }
-}
-
// No validations currently, ED25519 supports only one key size.
func validateEd25519Key(_ ed25519.PublicKey) error {
return nil
diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go
index de56f8e21..3dad8c34f 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go
@@ -135,8 +135,9 @@ func (d *DeviceFlowTokenGetter) deviceFlow(p *oidc.Provider, clientID, redirectU
// Some providers use a secret here, we don't need for sigstore oauth one so leave it off.
data := url.Values{
"grant_type": []string{"urn:ietf:params:oauth:grant-type:device_code"},
+ "client_id": []string{clientID},
"device_code": []string{parsed.DeviceCode},
- "scope": []string{"openid", "email"},
+ "scope": []string{"openid email"},
"code_verifier": []string{pkce.Value},
}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go
index 28abcac50..c1b6ef6b7 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go
@@ -114,10 +114,24 @@ func OIDConnect(issuer, id, secret, redirectURL string, tg TokenGetter) (*OIDCID
return tg.GetIDToken(provider, config)
}
+type stringAsBool bool
+
+func (sb *stringAsBool) UnmarshalJSON(b []byte) error {
+ switch string(b) {
+ case "true", `"true"`, "True", `"True"`:
+ *sb = true
+ case "false", `"false"`, "False", `"False"`:
+ *sb = false
+ default:
+ return errors.New("invalid value for boolean")
+ }
+ return nil
+}
+
type claims struct {
- Email string `json:"email"`
- Verified bool `json:"email_verified"`
- Subject string `json:"sub"`
+ Email string `json:"email"`
+ Verified stringAsBool `json:"email_verified"`
+ Subject string `json:"sub"`
}
// SubjectFromToken extracts the subject claim from an OIDC Identity Token
@@ -129,6 +143,16 @@ func SubjectFromToken(tok *oidc.IDToken) (string, error) {
return subjectFromClaims(claims)
}
+// SubjectFromUnverifiedToken extracts the subject claim from the raw bytes of
+// an OIDC identity token.
+func SubjectFromUnverifiedToken(tok []byte) (string, error) {
+ claims := claims{}
+ if err := json.Unmarshal(tok, &claims); err != nil {
+ return "", err
+ }
+ return subjectFromClaims(claims)
+}
+
func subjectFromClaims(c claims) (string, error) {
if c.Email != "" {
if !c.Verified {
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
index 0eb1e1d16..91dd430c1 100644
--- a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
@@ -12,33 +12,47 @@ import (
"errors"
"fmt"
"io"
+ "math"
"os"
"time"
"github.com/google/uuid"
)
+var errAlignmentOverflow = errors.New("integer overflow when calculating alignment")
+
// nextAligned finds the next offset that satisfies alignment.
-func nextAligned(offset int64, alignment int) int64 {
+func nextAligned(offset int64, alignment int) (int64, error) {
align64 := uint64(alignment)
offset64 := uint64(offset)
- if align64 != 0 && offset64%align64 != 0 {
- offset64 = (offset64 & ^(align64 - 1)) + align64
+ if align64 <= 0 || offset64%align64 == 0 {
+ return offset, nil
+ }
+
+ offset64 += (align64 - offset64%align64)
+
+ if offset64 > math.MaxInt64 {
+ return 0, errAlignmentOverflow
}
- return int64(offset64)
+ //nolint:gosec // Overflow handled above.
+ return int64(offset64), nil
}
// writeDataObjectAt writes the data object described by di to ws, using time t, recording details
// in d. The object is written at the first position that satisfies the alignment requirements
// described by di following offsetUnaligned.
func writeDataObjectAt(ws io.WriteSeeker, offsetUnaligned int64, di DescriptorInput, t time.Time, d *rawDescriptor) error { //nolint:lll
- offset, err := ws.Seek(nextAligned(offsetUnaligned, di.opts.alignment), io.SeekStart)
+ offset, err := nextAligned(offsetUnaligned, di.opts.alignment)
if err != nil {
return err
}
+ if _, err := ws.Seek(offset, io.SeekStart); err != nil {
+ return err
+ }
+
n, err := io.Copy(ws, di.r)
if err != nil {
return err
@@ -72,6 +86,7 @@ func (f *FileImage) calculatedDataSize() int64 {
var (
errInsufficientCapacity = errors.New("insufficient descriptor capacity to add data object(s) to image")
errPrimaryPartition = errors.New("image already contains a primary partition")
+ errObjectIDOverflow = errors.New("object ID would overflow")
)
// writeDataObject writes the data object described by di to f, using time t, recording details in
@@ -81,6 +96,11 @@ func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) erro
return errInsufficientCapacity
}
+ // We derive the ID from i, so make sure the ID will not overflow.
+ if int64(i) >= math.MaxUint32 {
+ return errObjectIDOverflow
+ }
+
// If this is a primary partition, verify there isn't another primary partition, and update the
// architecture in the global header.
if p, ok := di.opts.md.(partition); ok && p.Parttype == PartPrimSys {
@@ -92,7 +112,7 @@ func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) erro
}
d := &f.rds[i]
- d.ID = uint32(i) + 1
+ d.ID = uint32(i) + 1 //nolint:gosec // Overflow handled above.
f.h.DataSize = f.calculatedDataSize()
@@ -213,8 +233,16 @@ func OptCreateWithCloseOnUnload(b bool) CreateOpt {
}
}
+var errDescriptorCapacityNotSupported = errors.New("descriptor capacity not supported")
+
// createContainer creates a new SIF container file in rw, according to opts.
func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) {
+ // The supported number of descriptors is limited by the unsigned 32-bit ID field in each
+ // rawDescriptor.
+ if co.descriptorCapacity >= math.MaxUint32 {
+ return nil, errDescriptorCapacityNotSupported
+ }
+
rds := make([]rawDescriptor, co.descriptorCapacity)
rdsSize := int64(binary.Size(rds))
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
index 5bbb33217..109997d77 100644
--- a/vendor/golang.org/x/oauth2/token.go
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -49,6 +49,13 @@ type Token struct {
// mechanisms for that TokenSource will not be used.
Expiry time.Time `json:"expiry,omitempty"`
+ // ExpiresIn is the OAuth2 wire format "expires_in" field,
+ // which specifies how many seconds later the token expires,
+ // relative to an unknown time base approximately around "now".
+ // It is the application's responsibility to populate
+ // `Expiry` from `ExpiresIn` when required.
+ ExpiresIn int64 `json:"expires_in,omitempty"`
+
// raw optionally contains extra metadata from the server
// when updating a token.
raw interface{}
diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE
index 6a66aea5e..2a7cf70da 100644
--- a/vendor/golang.org/x/time/LICENSE
+++ b/vendor/golang.org/x/time/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/modules.txt b/vendor/modules.txt
index b0961ceba..a57610d65 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,4 +1,4 @@
-# dario.cat/mergo v1.0.0
+# dario.cat/mergo v1.0.1
## explicit; go 1.13
dario.cat/mergo
# github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161
@@ -171,7 +171,7 @@ github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/pkg/volumes
github.com/containers/buildah/util
-# github.com/containers/common v0.60.1-0.20240918122915-db8145750e1d
+# github.com/containers/common v0.60.1-0.20240920125326-ff6611ae40ad
## explicit; go 1.22.0
github.com/containers/common/internal
github.com/containers/common/internal/attributedstring
@@ -244,8 +244,8 @@ github.com/containers/conmon/runner/config
# github.com/containers/gvisor-tap-vsock v0.7.5
## explicit; go 1.21
github.com/containers/gvisor-tap-vsock/pkg/types
-# github.com/containers/image/v5 v5.32.1-0.20240806084436-e3e9287ca8e6
-## explicit; go 1.21.0
+# github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46
+## explicit; go 1.22.6
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
github.com/containers/image/v5/directory/explicitfilepath
@@ -355,8 +355,8 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.55.1-0.20240903205438-465c38f89483
-## explicit; go 1.21
+# github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0
+## explicit; go 1.22.0
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -406,7 +406,7 @@ github.com/containers/storage/types
## explicit; go 1.19
github.com/containers/winquit/pkg/winquit
github.com/containers/winquit/pkg/winquit/win32
-# github.com/coreos/go-oidc/v3 v3.10.0
+# github.com/coreos/go-oidc/v3 v3.11.0
## explicit; go 1.21
github.com/coreos/go-oidc/v3/oidc
# github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
@@ -468,7 +468,7 @@ github.com/distribution/reference
github.com/docker/distribution/registry/api/errcode
github.com/docker/distribution/registry/api/v2
github.com/docker/distribution/registry/client/auth/challenge
-# github.com/docker/docker v27.3.0+incompatible
+# github.com/docker/docker v27.3.1+incompatible
## explicit
github.com/docker/docker/api
github.com/docker/docker/api/types
@@ -551,7 +551,7 @@ github.com/gin-gonic/gin/render
github.com/go-jose/go-jose/v3
github.com/go-jose/go-jose/v3/cipher
github.com/go-jose/go-jose/v3/json
-# github.com/go-jose/go-jose/v4 v4.0.2
+# github.com/go-jose/go-jose/v4 v4.0.4
## explicit; go 1.21
github.com/go-jose/go-jose/v4
github.com/go-jose/go-jose/v4/cipher
@@ -655,7 +655,7 @@ github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value
-# github.com/google/go-containerregistry v0.20.1
+# github.com/google/go-containerregistry v0.20.2
## explicit; go 1.18
github.com/google/go-containerregistry/pkg/name
github.com/google/go-containerregistry/pkg/v1
@@ -718,8 +718,8 @@ github.com/josharian/intern
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
-# github.com/klauspost/compress v1.17.9
-## explicit; go 1.20
+# github.com/klauspost/compress v1.17.10
+## explicit; go 1.21
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
@@ -740,8 +740,8 @@ github.com/kr/fs
# github.com/leodido/go-urn v1.2.4
## explicit; go 1.16
github.com/leodido/go-urn
-# github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0
-## explicit; go 1.21
+# github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec
+## explicit; go 1.22.0
github.com/letsencrypt/boulder/core
github.com/letsencrypt/boulder/goodkey
github.com/letsencrypt/boulder/identifier
@@ -806,6 +806,9 @@ github.com/moby/docker-image-spec/specs-go/v1
# github.com/moby/patternmatcher v0.6.0
## explicit; go 1.19
github.com/moby/patternmatcher
+# github.com/moby/sys/capability v0.3.0
+## explicit; go 1.21
+github.com/moby/sys/capability
# github.com/moby/sys/mountinfo v0.7.2
## explicit; go 1.17
github.com/moby/sys/mountinfo
@@ -989,8 +992,8 @@ github.com/shirou/gopsutil/v4/process
# github.com/shoenig/go-m1cpu v0.1.6
## explicit; go 1.20
github.com/shoenig/go-m1cpu
-# github.com/sigstore/fulcio v1.4.5
-## explicit; go 1.21
+# github.com/sigstore/fulcio v1.6.4
+## explicit; go 1.22.6
github.com/sigstore/fulcio/pkg/api
github.com/sigstore/fulcio/pkg/certificate
# github.com/sigstore/rekor v1.3.6
@@ -1003,8 +1006,8 @@ github.com/sigstore/rekor/pkg/generated/client/pubkey
github.com/sigstore/rekor/pkg/generated/client/tlog
github.com/sigstore/rekor/pkg/generated/models
github.com/sigstore/rekor/pkg/util
-# github.com/sigstore/sigstore v1.8.4
-## explicit; go 1.21
+# github.com/sigstore/sigstore v1.8.9
+## explicit; go 1.22.5
github.com/sigstore/sigstore/pkg/cryptoutils
github.com/sigstore/sigstore/pkg/oauth
github.com/sigstore/sigstore/pkg/oauthflow
@@ -1031,8 +1034,8 @@ github.com/stefanberger/go-pkcs11uri
## explicit; go 1.17
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
-# github.com/sylabs/sif/v2 v2.18.0
-## explicit; go 1.21.0
+# github.com/sylabs/sif/v2 v2.19.1
+## explicit; go 1.22.5
github.com/sylabs/sif/v2/pkg/sif
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
## explicit
@@ -1209,7 +1212,7 @@ golang.org/x/net/internal/socks
golang.org/x/net/internal/timeseries
golang.org/x/net/proxy
golang.org/x/net/trace
-# golang.org/x/oauth2 v0.22.0
+# golang.org/x/oauth2 v0.23.0
## explicit; go 1.18
golang.org/x/oauth2
golang.org/x/oauth2/internal
@@ -1250,15 +1253,15 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/time v0.5.0
+# golang.org/x/time v0.6.0
## explicit; go 1.18
golang.org/x/time/rate
# golang.org/x/tools v0.24.0
## explicit; go 1.19
golang.org/x/tools/cover
golang.org/x/tools/go/ast/inspector
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094
-## explicit; go 1.20
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c
+## explicit; go 1.21
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.65.0
## explicit; go 1.21