summaryrefslogtreecommitdiff
path: root/vendor/github.com/containers/storage/pkg
diff options
context:
space:
mode:
authorPaul Holzinger <pholzing@redhat.com>2023-06-26 16:11:36 +0200
committerPaul Holzinger <pholzing@redhat.com>2023-06-27 18:04:42 +0200
commitb80fd54a56ec5a1538e25549cf8b4d638b022fd7 (patch)
tree8dd201d8af2aa14166046b038ccefcc444313fc5 /vendor/github.com/containers/storage/pkg
parent4445a5040affed76aafaac88838a38f29639da63 (diff)
update c/image and c/storage to latest
Signed-off-by: Paul Holzinger <pholzing@redhat.com>
Diffstat (limited to 'vendor/github.com/containers/storage/pkg')
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/cache_linux.go34
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compression_linux.go123
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go72
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/internal/compression.go30
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_linux.go76
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go9
6 files changed, 267 insertions, 77 deletions
diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
index 14064717a..cd13212e6 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
@@ -516,14 +516,14 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
- if field != "entries" {
+ if strings.ToLower(field) != "entries" {
iter.Skip()
continue
}
for iter.ReadArray() {
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
- switch field {
- case "type", "name", "linkName", "digest", "chunkDigest", "chunkType":
+ switch strings.ToLower(field) {
+ case "type", "name", "linkname", "digest", "chunkdigest", "chunktype", "modtime", "accesstime", "changetime":
count += len(iter.ReadStringAsSlice())
case "xattrs":
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
@@ -548,33 +548,33 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
- if field == "version" {
+ if strings.ToLower(field) == "version" {
toc.Version = iter.ReadInt()
continue
}
- if field != "entries" {
+ if strings.ToLower(field) != "entries" {
iter.Skip()
continue
}
for iter.ReadArray() {
var m internal.FileMetadata
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
- switch field {
+ switch strings.ToLower(field) {
case "type":
m.Type = getString(iter.ReadStringAsSlice())
case "name":
m.Name = getString(iter.ReadStringAsSlice())
- case "linkName":
+ case "linkname":
m.Linkname = getString(iter.ReadStringAsSlice())
case "mode":
m.Mode = iter.ReadInt64()
case "size":
m.Size = iter.ReadInt64()
- case "UID":
+ case "uid":
m.UID = iter.ReadInt()
- case "GID":
+ case "gid":
m.GID = iter.ReadInt()
- case "ModTime":
+ case "modtime":
time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
if err != nil {
return nil, err
@@ -592,23 +592,23 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
return nil, err
}
m.ChangeTime = &time
- case "devMajor":
+ case "devmajor":
m.Devmajor = iter.ReadInt64()
- case "devMinor":
+ case "devminor":
m.Devminor = iter.ReadInt64()
case "digest":
m.Digest = getString(iter.ReadStringAsSlice())
case "offset":
m.Offset = iter.ReadInt64()
- case "endOffset":
+ case "endoffset":
m.EndOffset = iter.ReadInt64()
- case "chunkSize":
+ case "chunksize":
m.ChunkSize = iter.ReadInt64()
- case "chunkOffset":
+ case "chunkoffset":
m.ChunkOffset = iter.ReadInt64()
- case "chunkDigest":
+ case "chunkdigest":
m.ChunkDigest = getString(iter.ReadStringAsSlice())
- case "chunkType":
+ case "chunktype":
m.ChunkType = getString(iter.ReadStringAsSlice())
case "xattrs":
m.Xattrs = make(map[string]string)
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
index 9333ed65c..2ee79dd23 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
@@ -150,22 +150,32 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
// be specified.
// This function uses the io.github.containers.zstd-chunked. annotations when specified.
-func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
+func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, []byte, int64, error) {
footerSize := int64(internal.FooterSizeSupported)
if blobSize <= footerSize {
- return nil, 0, errors.New("blob too small")
+ return nil, nil, 0, errors.New("blob too small")
}
manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey]
if manifestChecksumAnnotation == "" {
- return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
+ return nil, nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
}
var offset, length, lengthUncompressed, manifestType uint64
+ var offsetTarSplit, lengthTarSplit, lengthUncompressedTarSplit uint64
+ tarSplitChecksumAnnotation := ""
+
if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
- return nil, 0, err
+ return nil, nil, 0, err
+ }
+
+ if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found {
+ if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &offsetTarSplit, &lengthTarSplit, &lengthUncompressedTarSplit); err != nil {
+ return nil, nil, 0, err
+ }
+ tarSplitChecksumAnnotation = annotations[internal.TarSplitChecksumKey]
}
} else {
chunk := ImageSourceChunk{
@@ -174,39 +184,39 @@ func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable
}
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
if err != nil {
- return nil, 0, err
+ return nil, nil, 0, err
}
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
- return nil, 0, err
+ return nil, nil, 0, err
}
footer := make([]byte, footerSize)
if _, err := io.ReadFull(reader, footer); err != nil {
- return nil, 0, err
+ return nil, nil, 0, err
}
offset = binary.LittleEndian.Uint64(footer[0:8])
length = binary.LittleEndian.Uint64(footer[8:16])
lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24])
manifestType = binary.LittleEndian.Uint64(footer[24:32])
- if !isZstdChunkedFrameMagic(footer[32:40]) {
- return nil, 0, errors.New("invalid magic number")
+ if !isZstdChunkedFrameMagic(footer[48:56]) {
+ return nil, nil, 0, errors.New("invalid magic number")
}
}
if manifestType != internal.ManifestTypeCRFS {
- return nil, 0, errors.New("invalid manifest type")
+ return nil, nil, 0, errors.New("invalid manifest type")
}
// set a reasonable limit
if length > (1<<20)*50 {
- return nil, 0, errors.New("manifest too big")
+ return nil, nil, 0, errors.New("manifest too big")
}
if lengthUncompressed > (1<<20)*50 {
- return nil, 0, errors.New("manifest too big")
+ return nil, nil, 0, errors.New("manifest too big")
}
chunk := ImageSourceChunk{
@@ -214,47 +224,86 @@ func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable
Length: length,
}
- parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
+ chunks := []ImageSourceChunk{chunk}
+
+ if offsetTarSplit > 0 {
+ chunkTarSplit := ImageSourceChunk{
+ Offset: offsetTarSplit,
+ Length: lengthTarSplit,
+ }
+ chunks = append(chunks, chunkTarSplit)
+ }
+
+ parts, errs, err := blobStream.GetBlobAt(chunks)
if err != nil {
- return nil, 0, err
+ return nil, nil, 0, err
}
- var reader io.ReadCloser
- select {
- case r := <-parts:
- reader = r
- case err := <-errs:
- return nil, 0, err
+
+ readBlob := func(len uint64) ([]byte, error) {
+ var reader io.ReadCloser
+ select {
+ case r := <-parts:
+ reader = r
+ case err := <-errs:
+ return nil, err
+ }
+
+ blob := make([]byte, len)
+ if _, err := io.ReadFull(reader, blob); err != nil {
+ reader.Close()
+ return nil, err
+ }
+ if err := reader.Close(); err != nil {
+ return nil, err
+ }
+ return blob, nil
}
- manifest := make([]byte, length)
- if _, err := io.ReadFull(reader, manifest); err != nil {
- return nil, 0, err
+ manifest, err := readBlob(length)
+ if err != nil {
+ return nil, nil, 0, err
}
- manifestDigester := digest.Canonical.Digester()
- manifestChecksum := manifestDigester.Hash()
- if _, err := manifestChecksum.Write(manifest); err != nil {
- return nil, 0, err
+ decodedBlob, err := decodeAndValidateBlob(manifest, lengthUncompressed, manifestChecksumAnnotation)
+ if err != nil {
+ return nil, nil, 0, err
+ }
+ decodedTarSplit := []byte{}
+ if offsetTarSplit > 0 {
+ tarSplit, err := readBlob(lengthTarSplit)
+ if err != nil {
+ return nil, nil, 0, err
+ }
+
+ decodedTarSplit, err = decodeAndValidateBlob(tarSplit, lengthUncompressedTarSplit, tarSplitChecksumAnnotation)
+ if err != nil {
+ return nil, nil, 0, err
+ }
}
+ return decodedBlob, decodedTarSplit, int64(offset), err
+}
- d, err := digest.Parse(manifestChecksumAnnotation)
+func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) {
+ d, err := digest.Parse(expectedUncompressedChecksum)
if err != nil {
- return nil, 0, err
+ return nil, err
}
- if manifestDigester.Digest() != d {
- return nil, 0, errors.New("invalid manifest checksum")
+
+ blobDigester := d.Algorithm().Digester()
+ blobChecksum := blobDigester.Hash()
+ if _, err := blobChecksum.Write(blob); err != nil {
+ return nil, err
+ }
+ if blobDigester.Digest() != d {
+ return nil, fmt.Errorf("invalid blob checksum, expected checksum %s, got %s", d, blobDigester.Digest())
}
decoder, err := zstd.NewReader(nil) //nolint:contextcheck
if err != nil {
- return nil, 0, err
+ return nil, err
}
defer decoder.Close()
b := make([]byte, 0, lengthUncompressed)
- if decoded, err := decoder.DecodeAll(manifest, b); err == nil {
- return decoded, int64(offset), nil
- }
-
- return manifest, int64(offset), nil
+ return decoder.DecodeAll(blob, b)
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
index 0d1acafec..ca7ce30f7 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
@@ -6,13 +6,17 @@ package compressor
import (
"bufio"
+ "bytes"
"encoding/base64"
"io"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/ioutils"
+ "github.com/klauspost/compress/zstd"
"github.com/opencontainers/go-digest"
"github.com/vbatts/tar-split/archive/tar"
+ "github.com/vbatts/tar-split/tar/asm"
+ "github.com/vbatts/tar-split/tar/storage"
)
const (
@@ -198,11 +202,55 @@ type chunk struct {
ChunkType string
}
+type tarSplitData struct {
+ compressed *bytes.Buffer
+ digester digest.Digester
+ uncompressedCounter *ioutils.WriteCounter
+ zstd *zstd.Encoder
+ packer storage.Packer
+}
+
+func newTarSplitData(level int) (*tarSplitData, error) {
+ compressed := bytes.NewBuffer(nil)
+ digester := digest.Canonical.Digester()
+
+ zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
+ if err != nil {
+ return nil, err
+ }
+
+ uncompressedCounter := ioutils.NewWriteCounter(zstdWriter)
+ metaPacker := storage.NewJSONPacker(uncompressedCounter)
+
+ return &tarSplitData{
+ compressed: compressed,
+ digester: digester,
+ uncompressedCounter: uncompressedCounter,
+ zstd: zstdWriter,
+ packer: metaPacker,
+ }, nil
+}
+
func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
// total written so far. Used to retrieve partial offsets in the file
dest := ioutils.NewWriteCounter(destFile)
- tr := tar.NewReader(reader)
+ tarSplitData, err := newTarSplitData(level)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if tarSplitData.zstd != nil {
+ tarSplitData.zstd.Close()
+ }
+ }()
+
+ its, err := asm.NewInputTarStream(reader, tarSplitData.packer, nil)
+ if err != nil {
+ return err
+ }
+
+ tr := tar.NewReader(its)
tr.RawAccounting = true
buf := make([]byte, 4096)
@@ -214,7 +262,6 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
defer func() {
if zstdWriter != nil {
zstdWriter.Close()
- zstdWriter.Flush()
}
}()
@@ -224,9 +271,6 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
if err := zstdWriter.Close(); err != nil {
return 0, err
}
- if err := zstdWriter.Flush(); err != nil {
- return 0, err
- }
offset = dest.Count
zstdWriter.Reset(dest)
}
@@ -373,9 +417,11 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
rawBytes := tr.RawBytes()
if _, err := zstdWriter.Write(rawBytes); err != nil {
+ zstdWriter.Close()
return err
}
if err := zstdWriter.Flush(); err != nil {
+ zstdWriter.Close()
return err
}
if err := zstdWriter.Close(); err != nil {
@@ -383,7 +429,21 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
}
zstdWriter = nil
- return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level)
+ if err := tarSplitData.zstd.Flush(); err != nil {
+ return err
+ }
+ if err := tarSplitData.zstd.Close(); err != nil {
+ return err
+ }
+ tarSplitData.zstd = nil
+
+ ts := internal.TarSplitData{
+ Data: tarSplitData.compressed.Bytes(),
+ Digest: tarSplitData.digester.Digest(),
+ UncompressedSize: tarSplitData.uncompressedCounter.Count,
+ }
+
+ return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
}
type zstdChunkedWriter struct {
diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
index 5eb9edb38..49074eadf 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
@@ -90,6 +90,8 @@ func GetType(t byte) (string, error) {
const (
ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum"
ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position"
+ TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum"
+ TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position"
// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
ManifestTypeCRFS = 1
@@ -97,7 +99,7 @@ const (
// FooterSizeSupported is the footer size supported by this implementation.
// Newer versions of the image format might increase this value, so reject
// any version that is not supported.
- FooterSizeSupported = 40
+ FooterSizeSupported = 56
)
var (
@@ -125,9 +127,16 @@ func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
return nil
}
-func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []FileMetadata, level int) error {
+type TarSplitData struct {
+ Data []byte
+ Digest digest.Digest
+ UncompressedSize int64
+}
+
+func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, level int) error {
// 8 is the size of the zstd skippable frame header + the frame size
- manifestOffset := offset + 8
+ const zstdSkippableFrameHeader = 8
+ manifestOffset := offset + zstdSkippableFrameHeader
toc := TOC{
Version: 1,
@@ -167,13 +176,20 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
return err
}
+ outMetadata[TarSplitChecksumKey] = tarSplitData.Digest.String()
+ tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader
+ outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize)
+ if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil {
+ return err
+ }
+
// Store the offset to the manifest and its size in LE order
manifestDataLE := make([]byte, FooterSizeSupported)
binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
- binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
- binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
- binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(ManifestTypeCRFS))
- copy(manifestDataLE[32:], ZstdChunkedFrameMagic)
+ binary.LittleEndian.PutUint64(manifestDataLE[8*1:], uint64(len(compressedManifest)))
+ binary.LittleEndian.PutUint64(manifestDataLE[8*2:], uint64(len(manifest)))
+ binary.LittleEndian.PutUint64(manifestDataLE[8*3:], uint64(ManifestTypeCRFS))
+ copy(manifestDataLE[8*4:], ZstdChunkedFrameMagic)
return appendZstdSkippableFrame(dest, manifestDataLE)
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index 711962298..a80b28fb5 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -55,6 +55,7 @@ type compressedFileType int
type chunkedDiffer struct {
stream ImageSourceSeekable
manifest []byte
+ tarSplit []byte
layersCache *layersCache
tocOffset int64
fileType compressedFileType
@@ -64,6 +65,8 @@ type chunkedDiffer struct {
gzipReader *pgzip.Reader
zstdReader *zstd.Decoder
rawReader io.Reader
+
+ tocDigest digest.Digest
}
var xattrsToIgnore = map[string]interface{}{
@@ -135,6 +138,26 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us
return dstFile, st.Size(), nil
}
+// GetTOCDigest returns the digest of the TOC as recorded in the annotations.
+// This is an experimental feature and may be changed/removed in the future.
+func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
+ if tocDigest, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok {
+ d, err := digest.Parse(tocDigest)
+ if err != nil {
+ return nil, err
+ }
+ return &d, nil
+ }
+ if tocDigest, ok := annotations[internal.ManifestChecksumKey]; ok {
+ d, err := digest.Parse(tocDigest)
+ if err != nil {
+ return nil, err
+ }
+ return &d, nil
+ }
+ return nil, nil
+}
+
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
if _, ok := annotations[internal.ManifestChecksumKey]; ok {
@@ -147,7 +170,7 @@ func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotat
}
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) {
- manifest, tocOffset, err := readZstdChunkedManifest(ctx, iss, blobSize, annotations)
+ manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(ctx, iss, blobSize, annotations)
if err != nil {
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
}
@@ -156,13 +179,20 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
return nil, err
}
+ tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey])
+ if err != nil {
+ return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err)
+ }
+
return &chunkedDiffer{
copyBuffer: makeCopyBuffer(),
- stream: iss,
- manifest: manifest,
+ fileType: fileTypeZstdChunked,
layersCache: layersCache,
+ manifest: manifest,
+ stream: iss,
+ tarSplit: tarSplit,
tocOffset: tocOffset,
- fileType: fileTypeZstdChunked,
+ tocDigest: tocDigest,
}, nil
}
@@ -176,6 +206,11 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
return nil, err
}
+ tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
+ if err != nil {
+ return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err)
+ }
+
return &chunkedDiffer{
copyBuffer: makeCopyBuffer(),
stream: iss,
@@ -183,6 +218,7 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
layersCache: layersCache,
tocOffset: tocOffset,
fileType: fileTypeEstargz,
+ tocDigest: tocDigest,
}, nil
}
@@ -363,6 +399,24 @@ func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOption
return nil
}
+func mapToSlice(inputMap map[uint32]struct{}) []uint32 {
+ var out []uint32
+ for value := range inputMap {
+ out = append(out, value)
+ }
+ return out
+}
+
+func collectIDs(entries []internal.FileMetadata) ([]uint32, []uint32) {
+ uids := make(map[uint32]struct{})
+ gids := make(map[uint32]struct{})
+ for _, entry := range entries {
+ uids[uint32(entry.UID)] = struct{}{}
+ gids[uint32(entry.GID)] = struct{}{}
+ }
+ return mapToSlice(uids), mapToSlice(gids)
+}
+
type originFile struct {
Root string
Path string
@@ -1271,12 +1325,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
}
}()
- bigData := map[string][]byte{
- bigDataKey: c.manifest,
- }
output := graphdriver.DriverWithDifferOutput{
- Differ: c,
- BigData: bigData,
+ Differ: c,
+ TarSplit: c.tarSplit,
+ BigData: map[string][]byte{
+ bigDataKey: c.manifest,
+ },
+ TOCDigest: c.tocDigest,
}
storeOpts, err := types.DefaultStoreOptionsAutoDetectUID()
@@ -1305,6 +1360,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
var missingParts []missingPart
+ output.UIDs, output.GIDs = collectIDs(toc.Entries)
+
mergedEntries, totalSize, err := c.mergeTocEntries(c.fileType, toc.Entries)
if err != nil {
return output, err
@@ -1579,6 +1636,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
if totalChunksSize > 0 {
logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
}
+
return output, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
index 4d952aba3..cc37ab1d8 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
@@ -9,9 +9,16 @@ import (
storage "github.com/containers/storage"
graphdriver "github.com/containers/storage/drivers"
+ digest "github.com/opencontainers/go-digest"
)
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
- return nil, errors.New("format not supported on this architecture")
+ return nil, errors.New("format not supported on this system")
+}
+
+// GetTOCDigest returns the digest of the TOC as recorded in the annotations.
+// This is an experimental feature and may be changed/removed in the future.
+func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
+ return nil, errors.New("format not supported on this system")
}