Upgrade k8s package verison (#5358)

* upgrade k8s package version

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>

* Script upgrade and code formatting.

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>
This commit is contained in:
hongzhouzi
2022-11-15 14:56:38 +08:00
committed by GitHub
parent 5f91c1663a
commit 44167aa47a
3106 changed files with 321340 additions and 172080 deletions

View File

@@ -0,0 +1,57 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
const (
// DefaultBlobMediaType specifies the default blob media type
DefaultBlobMediaType = ocispec.MediaTypeImageLayer
// DefaultBlobDirMediaType specifies the default blob directory media type
DefaultBlobDirMediaType = ocispec.MediaTypeImageLayerGzip
)
const (
// TempFilePattern specifies the pattern to create temporary files
TempFilePattern = "oras"
)
const (
// AnnotationDigest is the annotation key for the digest of the uncompressed content
AnnotationDigest = "io.deis.oras.content.digest"
// AnnotationUnpack is the annotation key for indication of unpacking
AnnotationUnpack = "io.deis.oras.content.unpack"
)
const (
// OCIImageIndexFile is the file name of the index from the OCI Image Layout Specification
// Reference: https://github.com/opencontainers/image-spec/blob/master/image-layout.md#indexjson-file
OCIImageIndexFile = "index.json"
)
const (
// DefaultBlocksize default size of each slice of bytes read in each write through in gunzipand untar.
// Simply uses the same size as io.Copy()
DefaultBlocksize = 32768
)
const (
// what you get for a blank digest
BlankHash = digest.Digest("sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
)

View File

@@ -0,0 +1,151 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"context"
"errors"
"strings"
ctrcontent "github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// Decompress store to decompress content and extract from tar, if needed, wrapping
// another store. By default, a FileStore will simply take each artifact and write it to
// a file, as a MemoryStore will do into memory. If the artifact is gzipped or tarred,
// you might want to store the actual object inside tar or gzip. Wrap your Store
// with Decompress, and it will check the media-type and, if relevant,
// gunzip and/or untar.
//
// For example:
//
// fileStore := NewFileStore(rootPath)
// Decompress := store.NewDecompress(fileStore, WithBlocksize(blocksize))
//
// The above example works if there is no tar, i.e. each artifact is just a single file, perhaps gzipped,
// or if there is only one file in each tar archive. In other words, when each content.Writer has only one target output stream.
// However, if you have multiple files in each tar archive, each archive of which is an artifact layer, then
// you need a way to select how to handle each file in the tar archive. In other words, when each content.Writer has more than one
// target output stream. In that case, use the following example:
//
// multiStore := NewMultiStore(rootPath) // some store that can handle different filenames
// Decompress := store.NewDecompress(multiStore, WithBlocksize(blocksize), WithMultiWriterIngester())
//
type Decompress struct {
pusher remotes.Pusher
blocksize int
multiWriterIngester bool
}
func NewDecompress(pusher remotes.Pusher, opts ...WriterOpt) Decompress {
// we have to reprocess the opts to find the blocksize
var wOpts WriterOpts
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
// TODO: we probably should handle errors here
continue
}
}
return Decompress{pusher, wOpts.Blocksize, wOpts.MultiWriterIngester}
}
// Push get a content.Writer
func (d Decompress) Push(ctx context.Context, desc ocispec.Descriptor) (ctrcontent.Writer, error) {
// the logic is straightforward:
// - if there is a desc in the opts, and the mediatype is tar or tar+gzip, then pass the correct decompress writer
// - else, pass the regular writer
var (
writer ctrcontent.Writer
err error
multiIngester MultiWriterPusher
ok bool
)
// check to see if we are supposed to use a MultiWriterIngester
if d.multiWriterIngester {
multiIngester, ok = d.pusher.(MultiWriterPusher)
if !ok {
return nil, errors.New("configured to use multiwriter ingester, but ingester does not implement multiwriter")
}
}
// figure out if compression and/or archive exists
// before we pass it down, we need to strip anything we are removing here
// and possibly update the digest, since the store indexes things by digest
hasGzip, hasTar, modifiedMediaType := checkCompression(desc.MediaType)
desc.MediaType = modifiedMediaType
// determine if we pass it blocksize, only if positive
writerOpts := []WriterOpt{}
if d.blocksize > 0 {
writerOpts = append(writerOpts, WithBlocksize(d.blocksize))
}
writer, err = d.pusher.Push(ctx, desc)
if err != nil {
return nil, err
}
// do we need to wrap with an untar writer?
if hasTar {
// if not multiingester, get a regular writer
if multiIngester == nil {
writer = NewUntarWriter(writer, writerOpts...)
} else {
writers, err := multiIngester.Pushers(ctx, desc)
if err != nil {
return nil, err
}
writer = NewUntarWriterByName(writers, writerOpts...)
}
}
if hasGzip {
if writer == nil {
writer, err = d.pusher.Push(ctx, desc)
if err != nil {
return nil, err
}
}
writer = NewGunzipWriter(writer, writerOpts...)
}
return writer, nil
}
// checkCompression check if the mediatype uses gzip compression or tar.
// Returns if it has gzip and/or tar, as well as the base media type without
// those suffixes.
func checkCompression(mediaType string) (gzip, tar bool, mt string) {
mt = mediaType
gzipSuffix := "+gzip"
gzipAltSuffix := ".gzip"
tarSuffix := ".tar"
switch {
case strings.HasSuffix(mt, gzipSuffix):
mt = mt[:len(mt)-len(gzipSuffix)]
gzip = true
case strings.HasSuffix(mt, gzipAltSuffix):
mt = mt[:len(mt)-len(gzipAltSuffix)]
gzip = true
}
if strings.HasSuffix(mt, tarSuffix) {
mt = mt[:len(mt)-len(tarSuffix)]
tar = true
}
return
}

View File

@@ -0,0 +1,33 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import "errors"
// Common errors
var (
ErrNotFound = errors.New("not_found")
ErrNoName = errors.New("no_name")
ErrUnsupportedSize = errors.New("unsupported_size")
ErrUnsupportedVersion = errors.New("unsupported_version")
ErrInvalidReference = errors.New("invalid_reference")
)
// FileStore errors
var (
ErrPathTraversalDisallowed = errors.New("path_traversal_disallowed")
ErrOverwriteDisallowed = errors.New("overwrite_disallowed")
)

View File

@@ -0,0 +1,534 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"bytes"
"compress/gzip"
"context"
_ "crypto/sha256"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/remotes"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// File provides content via files from the file system
type File struct {
DisableOverwrite bool
AllowPathTraversalOnWrite bool
// Reproducible enables stripping times from added files
Reproducible bool
root string
descriptor *sync.Map // map[digest.Digest]ocispec.Descriptor
pathMap *sync.Map // map[name string](file string)
memoryMap *sync.Map // map[digest.Digest]([]byte)
refMap *sync.Map // map[string]ocispec.Descriptor
tmpFiles *sync.Map
ignoreNoName bool
}
// NewFile creats a new file target. It represents a single root reference and all of its components.
func NewFile(rootPath string, opts ...WriterOpt) *File {
// we have to process the opts to find if they told us to change defaults
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
continue
}
}
return &File{
root: rootPath,
descriptor: &sync.Map{},
pathMap: &sync.Map{},
memoryMap: &sync.Map{},
refMap: &sync.Map{},
tmpFiles: &sync.Map{},
ignoreNoName: wOpts.IgnoreNoName,
}
}
func (s *File) Resolver() remotes.Resolver {
return s
}
func (s *File) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
desc, ok := s.getRef(ref)
if !ok {
return "", ocispec.Descriptor{}, fmt.Errorf("unknown reference: %s", ref)
}
return ref, desc, nil
}
func (s *File) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
if _, ok := s.refMap.Load(ref); !ok {
return nil, fmt.Errorf("unknown reference: %s", ref)
}
return s, nil
}
// Fetch get an io.ReadCloser for the specific content
func (s *File) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
// first see if it is in the in-memory manifest map
manifest, ok := s.getMemory(desc)
if ok {
return ioutil.NopCloser(bytes.NewReader(manifest)), nil
}
desc, ok = s.get(desc)
if !ok {
return nil, ErrNotFound
}
name, ok := ResolveName(desc)
if !ok {
return nil, ErrNoName
}
path := s.ResolvePath(name)
return os.Open(path)
}
func (s *File) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
var tag, hash string
parts := strings.SplitN(ref, "@", 2)
if len(parts) > 0 {
tag = parts[0]
}
if len(parts) > 1 {
hash = parts[1]
}
return &filePusher{
store: s,
ref: tag,
hash: hash,
}, nil
}
type filePusher struct {
store *File
ref string
hash string
}
func (s *filePusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
name, ok := ResolveName(desc)
now := time.Now()
if !ok {
// if we were not told to ignore NoName, then return an error
if !s.store.ignoreNoName {
return nil, ErrNoName
}
// just return a nil writer - we do not want to calculate the hash, so just use
// whatever was passed in the descriptor
return NewIoContentWriter(ioutil.Discard, WithOutputHash(desc.Digest)), nil
}
path, err := s.store.resolveWritePath(name)
if err != nil {
return nil, err
}
file, afterCommit, err := s.store.createWritePath(path, desc, name)
if err != nil {
return nil, err
}
return &fileWriter{
store: s.store,
file: file,
desc: desc,
digester: digest.Canonical.Digester(),
status: content.Status{
Ref: name,
Total: desc.Size,
StartedAt: now,
UpdatedAt: now,
},
afterCommit: afterCommit,
}, nil
}
// Add adds a file reference from a path, either directory or single file,
// and returns the reference descriptor.
func (s *File) Add(name, mediaType, path string) (ocispec.Descriptor, error) {
if path == "" {
path = name
}
path = s.MapPath(name, path)
fileInfo, err := os.Stat(path)
if err != nil {
return ocispec.Descriptor{}, err
}
var desc ocispec.Descriptor
if fileInfo.IsDir() {
desc, err = s.descFromDir(name, mediaType, path)
} else {
desc, err = s.descFromFile(fileInfo, mediaType, path)
}
if err != nil {
return ocispec.Descriptor{}, err
}
if desc.Annotations == nil {
desc.Annotations = make(map[string]string)
}
desc.Annotations[ocispec.AnnotationTitle] = name
s.set(desc)
return desc, nil
}
// Load is a lower-level memory-only version of Add. Rather than taking a path,
// generating a descriptor and creating a reference, it takes raw data and a descriptor
// that describes that data and stores it in memory. It will disappear at process
// termination.
//
// It is especially useful for adding ephemeral data, such as config, that must
// exist in order to walk a manifest.
func (s *File) Load(desc ocispec.Descriptor, data []byte) error {
s.memoryMap.Store(desc.Digest, data)
return nil
}
// Ref gets a reference's descriptor and content
func (s *File) Ref(ref string) (ocispec.Descriptor, []byte, error) {
desc, ok := s.getRef(ref)
if !ok {
return ocispec.Descriptor{}, nil, ErrNotFound
}
// first see if it is in the in-memory manifest map
manifest, ok := s.getMemory(desc)
if !ok {
return ocispec.Descriptor{}, nil, ErrNotFound
}
return desc, manifest, nil
}
func (s *File) descFromFile(info os.FileInfo, mediaType, path string) (ocispec.Descriptor, error) {
file, err := os.Open(path)
if err != nil {
return ocispec.Descriptor{}, err
}
defer file.Close()
digest, err := digest.FromReader(file)
if err != nil {
return ocispec.Descriptor{}, err
}
if mediaType == "" {
mediaType = DefaultBlobMediaType
}
return ocispec.Descriptor{
MediaType: mediaType,
Digest: digest,
Size: info.Size(),
}, nil
}
func (s *File) descFromDir(name, mediaType, root string) (ocispec.Descriptor, error) {
// generate temp file
file, err := s.tempFile()
if err != nil {
return ocispec.Descriptor{}, err
}
defer file.Close()
s.MapPath(name, file.Name())
// compress directory
digester := digest.Canonical.Digester()
zw := gzip.NewWriter(io.MultiWriter(file, digester.Hash()))
defer zw.Close()
tarDigester := digest.Canonical.Digester()
if err := tarDirectory(root, name, io.MultiWriter(zw, tarDigester.Hash()), s.Reproducible); err != nil {
return ocispec.Descriptor{}, err
}
// flush all
if err := zw.Close(); err != nil {
return ocispec.Descriptor{}, err
}
if err := file.Sync(); err != nil {
return ocispec.Descriptor{}, err
}
// generate descriptor
if mediaType == "" {
mediaType = DefaultBlobDirMediaType
}
info, err := file.Stat()
if err != nil {
return ocispec.Descriptor{}, err
}
return ocispec.Descriptor{
MediaType: mediaType,
Digest: digester.Digest(),
Size: info.Size(),
Annotations: map[string]string{
AnnotationDigest: tarDigester.Digest().String(),
AnnotationUnpack: "true",
},
}, nil
}
func (s *File) tempFile() (*os.File, error) {
file, err := ioutil.TempFile("", TempFilePattern)
if err != nil {
return nil, err
}
s.tmpFiles.Store(file.Name(), file)
return file, nil
}
// Close frees up resources used by the file store
func (s *File) Close() error {
var errs []string
s.tmpFiles.Range(func(name, _ interface{}) bool {
if err := os.Remove(name.(string)); err != nil {
errs = append(errs, err.Error())
}
return true
})
if len(errs) > 0 {
return errors.New(strings.Join(errs, "; "))
}
return nil
}
func (s *File) resolveWritePath(name string) (string, error) {
path := s.ResolvePath(name)
if !s.AllowPathTraversalOnWrite {
base, err := filepath.Abs(s.root)
if err != nil {
return "", err
}
target, err := filepath.Abs(path)
if err != nil {
return "", err
}
rel, err := filepath.Rel(base, target)
if err != nil {
return "", ErrPathTraversalDisallowed
}
rel = filepath.ToSlash(rel)
if strings.HasPrefix(rel, "../") || rel == ".." {
return "", ErrPathTraversalDisallowed
}
}
if s.DisableOverwrite {
if _, err := os.Stat(path); err == nil {
return "", ErrOverwriteDisallowed
} else if !os.IsNotExist(err) {
return "", err
}
}
return path, nil
}
func (s *File) createWritePath(path string, desc ocispec.Descriptor, prefix string) (*os.File, func() error, error) {
if value, ok := desc.Annotations[AnnotationUnpack]; !ok || value != "true" {
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return nil, nil, err
}
file, err := os.Create(path)
return file, nil, err
}
if err := os.MkdirAll(path, 0755); err != nil {
return nil, nil, err
}
file, err := s.tempFile()
checksum := desc.Annotations[AnnotationDigest]
afterCommit := func() error {
return extractTarGzip(path, prefix, file.Name(), checksum)
}
return file, afterCommit, err
}
// MapPath maps name to path
func (s *File) MapPath(name, path string) string {
path = s.resolvePath(path)
s.pathMap.Store(name, path)
return path
}
// ResolvePath returns the path by name
func (s *File) ResolvePath(name string) string {
if value, ok := s.pathMap.Load(name); ok {
if path, ok := value.(string); ok {
return path
}
}
// using the name as a fallback solution
return s.resolvePath(name)
}
func (s *File) resolvePath(path string) string {
if filepath.IsAbs(path) {
return path
}
return filepath.Join(s.root, path)
}
func (s *File) set(desc ocispec.Descriptor) {
s.descriptor.Store(desc.Digest, desc)
}
func (s *File) get(desc ocispec.Descriptor) (ocispec.Descriptor, bool) {
value, ok := s.descriptor.Load(desc.Digest)
if !ok {
return ocispec.Descriptor{}, false
}
desc, ok = value.(ocispec.Descriptor)
return desc, ok
}
func (s *File) getMemory(desc ocispec.Descriptor) ([]byte, bool) {
value, ok := s.memoryMap.Load(desc.Digest)
if !ok {
return nil, false
}
content, ok := value.([]byte)
return content, ok
}
func (s *File) getRef(ref string) (ocispec.Descriptor, bool) {
value, ok := s.refMap.Load(ref)
if !ok {
return ocispec.Descriptor{}, false
}
desc, ok := value.(ocispec.Descriptor)
return desc, ok
}
// StoreManifest stores a manifest linked to by the provided ref. The children of the
// manifest, such as layers and config, should already exist in the file store, either
// as files linked via Add(), or via Load(). If they do not exist, then a typical
// Fetcher that walks the manifest will hit an unresolved hash.
//
// StoreManifest does *not* validate their presence.
func (s *File) StoreManifest(ref string, desc ocispec.Descriptor, manifest []byte) error {
s.refMap.Store(ref, desc)
s.memoryMap.Store(desc.Digest, manifest)
return nil
}
type fileWriter struct {
store *File
file *os.File
desc ocispec.Descriptor
digester digest.Digester
status content.Status
afterCommit func() error
}
func (w *fileWriter) Status() (content.Status, error) {
return w.status, nil
}
// Digest returns the current digest of the content, up to the current write.
//
// Cannot be called concurrently with `Write`.
func (w *fileWriter) Digest() digest.Digest {
return w.digester.Digest()
}
// Write p to the transaction.
func (w *fileWriter) Write(p []byte) (n int, err error) {
n, err = w.file.Write(p)
w.digester.Hash().Write(p[:n])
w.status.Offset += int64(len(p))
w.status.UpdatedAt = time.Now()
return n, err
}
func (w *fileWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
var base content.Info
for _, opt := range opts {
if err := opt(&base); err != nil {
return err
}
}
if w.file == nil {
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
}
file := w.file
w.file = nil
if err := file.Sync(); err != nil {
file.Close()
return errors.Wrap(err, "sync failed")
}
fileInfo, err := file.Stat()
if err != nil {
file.Close()
return errors.Wrap(err, "stat failed")
}
if err := file.Close(); err != nil {
return errors.Wrap(err, "failed to close file")
}
if size > 0 && size != fileInfo.Size() {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fileInfo.Size(), size)
}
if dgst := w.digester.Digest(); expected != "" && expected != dgst {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected)
}
w.store.set(w.desc)
if w.afterCommit != nil {
return w.afterCommit()
}
return nil
}
// Close the writer, flushing any unwritten data and leaving the progress in
// tact.
func (w *fileWriter) Close() error {
if w.file == nil {
return nil
}
w.file.Sync()
err := w.file.Close()
w.file = nil
return err
}
func (w *fileWriter) Truncate(size int64) error {
if size != 0 {
return ErrUnsupportedSize
}
w.status.Offset = 0
w.digester.Hash().Reset()
if _, err := w.file.Seek(0, io.SeekStart); err != nil {
return err
}
return w.file.Truncate(0)
}

View File

@@ -0,0 +1,72 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"compress/gzip"
"fmt"
"io"
"github.com/containerd/containerd/content"
)
// NewGunzipWriter wrap a writer with a gunzip, so that the stream is gunzipped
//
// By default, it calculates the hash when writing. If the option `skipHash` is true,
// it will skip doing the hash. Skipping the hash is intended to be used only
// if you are confident about the validity of the data being passed to the writer,
// and wish to save on the hashing time.
func NewGunzipWriter(writer content.Writer, opts ...WriterOpt) content.Writer {
// process opts for default
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil
}
}
return NewPassthroughWriter(writer, func(r io.Reader, w io.Writer, done chan<- error) {
gr, err := gzip.NewReader(r)
if err != nil {
done <- fmt.Errorf("error creating gzip reader: %v", err)
return
}
// write out the uncompressed data
b := make([]byte, wOpts.Blocksize, wOpts.Blocksize)
for {
var n int
n, err = gr.Read(b)
if err != nil && err != io.EOF {
err = fmt.Errorf("GunzipWriter data read error: %v\n", err)
break
}
l := n
if n > len(b) {
l = len(b)
}
if _, err2 := w.Write(b[:l]); err2 != nil {
err = fmt.Errorf("GunzipWriter: error writing to underlying writer: %v", err2)
break
}
if err == io.EOF {
// clear the error
err = nil
break
}
}
gr.Close()
done <- err
}, opts...)
}

View File

@@ -0,0 +1,26 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"github.com/containerd/containerd/remotes"
)
// ProvideIngester is the interface that groups the basic Read and Write methods.
type Store interface {
remotes.Pusher
remotes.Fetcher
}

View File

@@ -0,0 +1,112 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"context"
"io"
"io/ioutil"
"github.com/containerd/containerd/content"
"github.com/opencontainers/go-digest"
)
// IoContentWriter writer that wraps an io.Writer, so the results can be streamed to
// an open io.Writer. For example, can be used to pull a layer and write it to a file, or device.
type IoContentWriter struct {
writer io.Writer
digester digest.Digester
size int64
hash *digest.Digest
}
// NewIoContentWriter create a new IoContentWriter.
//
// By default, it calculates the hash when writing. If the option `skipHash` is true,
// it will skip doing the hash. Skipping the hash is intended to be used only
// if you are confident about the validity of the data being passed to the writer,
// and wish to save on the hashing time.
func NewIoContentWriter(writer io.Writer, opts ...WriterOpt) content.Writer {
w := writer
if w == nil {
w = ioutil.Discard
}
// process opts for default
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil
}
}
ioc := &IoContentWriter{
writer: w,
digester: digest.Canonical.Digester(),
// we take the OutputHash, since the InputHash goes to the passthrough writer,
// which then passes the processed output to us
hash: wOpts.OutputHash,
}
return NewPassthroughWriter(ioc, func(r io.Reader, w io.Writer, done chan<- error) {
// write out the data to the io writer
var (
err error
)
// we could use io.Copy, but calling it with the default blocksize is identical to
// io.CopyBuffer. Otherwise, we would need some way to let the user flag "I want to use
// io.Copy", when it should not matter to them
b := make([]byte, wOpts.Blocksize, wOpts.Blocksize)
_, err = io.CopyBuffer(w, r, b)
done <- err
}, opts...)
}
func (w *IoContentWriter) Write(p []byte) (n int, err error) {
n, err = w.writer.Write(p)
if err != nil {
return 0, err
}
w.size += int64(n)
if w.hash == nil {
w.digester.Hash().Write(p[:n])
}
return
}
func (w *IoContentWriter) Close() error {
return nil
}
// Digest may return empty digest or panics until committed.
func (w *IoContentWriter) Digest() digest.Digest {
return w.digester.Digest()
}
// Commit commits the blob (but no roll-back is guaranteed on an error).
// size and expected can be zero-value when unknown.
// Commit always closes the writer, even on error.
// ErrAlreadyExists aborts the writer.
func (w *IoContentWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
return nil
}
// Status returns the current state of write
func (w *IoContentWriter) Status() (content.Status, error) {
return content.Status{}, nil
}
// Truncate updates the size of the target blob
func (w *IoContentWriter) Truncate(size int64) error {
return nil
}

View File

@@ -0,0 +1,95 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"encoding/json"
"sort"
"github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
artifact "oras.land/oras-go/pkg/artifact"
)
// GenerateManifest generates a manifest. The manifest will include the provided config,
// and descs as layers. Raw bytes will be returned.
func GenerateManifest(config *ocispec.Descriptor, annotations map[string]string, descs ...ocispec.Descriptor) ([]byte, ocispec.Descriptor, error) {
// Config - either it was set, or we have to set it
if config == nil {
_, configGen, err := GenerateConfig(nil)
if err != nil {
return nil, ocispec.Descriptor{}, err
}
config = &configGen
}
return pack(*config, annotations, descs)
}
// GenerateConfig generates a blank config with optional annotations.
func GenerateConfig(annotations map[string]string) ([]byte, ocispec.Descriptor, error) {
configBytes := []byte("{}")
dig := digest.FromBytes(configBytes)
config := ocispec.Descriptor{
MediaType: artifact.UnknownConfigMediaType,
Digest: dig,
Size: int64(len(configBytes)),
Annotations: annotations,
}
return configBytes, config, nil
}
// GenerateManifestAndConfig generates a config and then a manifest. Raw bytes will be returned.
func GenerateManifestAndConfig(manifestAnnotations map[string]string, configAnnotations map[string]string, descs ...ocispec.Descriptor) (manifest []byte, manifestDesc ocispec.Descriptor, config []byte, configDesc ocispec.Descriptor, err error) {
config, configDesc, err = GenerateConfig(configAnnotations)
if err != nil {
return nil, ocispec.Descriptor{}, nil, ocispec.Descriptor{}, err
}
manifest, manifestDesc, err = GenerateManifest(&configDesc, manifestAnnotations, descs...)
if err != nil {
return nil, ocispec.Descriptor{}, nil, ocispec.Descriptor{}, err
}
return
}
// pack given a bunch of descriptors, create a manifest that references all of them
func pack(config ocispec.Descriptor, annotations map[string]string, descriptors []ocispec.Descriptor) ([]byte, ocispec.Descriptor, error) {
if descriptors == nil {
descriptors = []ocispec.Descriptor{} // make it an empty array to prevent potential server-side bugs
}
// sort descriptors alphanumerically by sha hash so it always is consistent
sort.Slice(descriptors, func(i, j int) bool {
return descriptors[i].Digest < descriptors[j].Digest
})
manifest := ocispec.Manifest{
Versioned: specs.Versioned{
SchemaVersion: 2, // historical value. does not pertain to OCI or docker version
},
Config: config,
Layers: descriptors,
Annotations: annotations,
}
manifestBytes, err := json.Marshal(manifest)
if err != nil {
return nil, ocispec.Descriptor{}, err
}
manifestDescriptor := ocispec.Descriptor{
MediaType: ocispec.MediaTypeImageManifest,
Digest: digest.FromBytes(manifestBytes),
Size: int64(len(manifestBytes)),
}
return manifestBytes, manifestDescriptor, nil
}

View File

@@ -0,0 +1,284 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"strings"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/remotes"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// Memory provides content from the memory
type Memory struct {
descriptor map[digest.Digest]ocispec.Descriptor
content map[digest.Digest][]byte
nameMap map[string]ocispec.Descriptor
refMap map[string]ocispec.Descriptor
lock *sync.Mutex
}
// NewMemory creats a new memory store
func NewMemory() *Memory {
return &Memory{
descriptor: make(map[digest.Digest]ocispec.Descriptor),
content: make(map[digest.Digest][]byte),
nameMap: make(map[string]ocispec.Descriptor),
refMap: make(map[string]ocispec.Descriptor),
lock: &sync.Mutex{},
}
}
func (s *Memory) Resolver() remotes.Resolver {
return s
}
func (s *Memory) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
desc, ok := s.refMap[ref]
if !ok {
return "", ocispec.Descriptor{}, fmt.Errorf("unknown reference: %s", ref)
}
return ref, desc, nil
}
func (s *Memory) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
if _, ok := s.refMap[ref]; !ok {
return nil, fmt.Errorf("unknown reference: %s", ref)
}
return s, nil
}
// Fetch get an io.ReadCloser for the specific content
func (s *Memory) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
_, content, ok := s.Get(desc)
if !ok {
return nil, ErrNotFound
}
return ioutil.NopCloser(bytes.NewReader(content)), nil
}
func (s *Memory) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
var tag, hash string
parts := strings.SplitN(ref, "@", 2)
if len(parts) > 0 {
tag = parts[0]
}
if len(parts) > 1 {
hash = parts[1]
}
return &memoryPusher{
store: s,
ref: tag,
hash: hash,
}, nil
}
type memoryPusher struct {
store *Memory
ref string
hash string
}
func (s *memoryPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
name, _ := ResolveName(desc)
now := time.Now()
// is this the root?
if desc.Digest.String() == s.hash {
s.store.refMap[s.ref] = desc
}
return &memoryWriter{
store: s.store,
buffer: bytes.NewBuffer(nil),
desc: desc,
digester: digest.Canonical.Digester(),
status: content.Status{
Ref: name,
Total: desc.Size,
StartedAt: now,
UpdatedAt: now,
},
}, nil
}
// Add adds content, generating a descriptor and returning it.
func (s *Memory) Add(name, mediaType string, content []byte) (ocispec.Descriptor, error) {
var annotations map[string]string
if name != "" {
annotations = map[string]string{
ocispec.AnnotationTitle: name,
}
}
if mediaType == "" {
mediaType = DefaultBlobMediaType
}
desc := ocispec.Descriptor{
MediaType: mediaType,
Digest: digest.FromBytes(content),
Size: int64(len(content)),
Annotations: annotations,
}
s.Set(desc, content)
return desc, nil
}
// Set adds the content to the store
func (s *Memory) Set(desc ocispec.Descriptor, content []byte) {
s.lock.Lock()
defer s.lock.Unlock()
s.descriptor[desc.Digest] = desc
s.content[desc.Digest] = content
if name, ok := ResolveName(desc); ok && name != "" {
s.nameMap[name] = desc
}
}
// Get finds the content from the store
func (s *Memory) Get(desc ocispec.Descriptor) (ocispec.Descriptor, []byte, bool) {
s.lock.Lock()
defer s.lock.Unlock()
desc, ok := s.descriptor[desc.Digest]
if !ok {
return ocispec.Descriptor{}, nil, false
}
content, ok := s.content[desc.Digest]
return desc, content, ok
}
// GetByName finds the content from the store by name (i.e. AnnotationTitle)
func (s *Memory) GetByName(name string) (ocispec.Descriptor, []byte, bool) {
s.lock.Lock()
defer s.lock.Unlock()
desc, ok := s.nameMap[name]
if !ok {
return ocispec.Descriptor{}, nil, false
}
content, ok := s.content[desc.Digest]
return desc, content, ok
}
// StoreManifest stores a manifest linked to by the provided ref. The children of the
// manifest, such as layers and config, should already exist in the file store, either
// as files linked via Add(), or via Set(). If they do not exist, then a typical
// Fetcher that walks the manifest will hit an unresolved hash.
//
// StoreManifest does *not* validate their presence.
func (s *Memory) StoreManifest(ref string, desc ocispec.Descriptor, manifest []byte) error {
s.refMap[ref] = desc
s.Add("", desc.MediaType, manifest)
return nil
}
func descFromBytes(b []byte, mediaType string) (ocispec.Descriptor, error) {
digest, err := digest.FromReader(bytes.NewReader(b))
if err != nil {
return ocispec.Descriptor{}, err
}
if mediaType == "" {
mediaType = DefaultBlobMediaType
}
return ocispec.Descriptor{
MediaType: mediaType,
Digest: digest,
Size: int64(len(b)),
}, nil
}
type memoryWriter struct {
store *Memory
buffer *bytes.Buffer
desc ocispec.Descriptor
digester digest.Digester
status content.Status
}
func (w *memoryWriter) Status() (content.Status, error) {
return w.status, nil
}
// Digest returns the current digest of the content, up to the current write.
//
// Cannot be called concurrently with `Write`.
func (w *memoryWriter) Digest() digest.Digest {
return w.digester.Digest()
}
// Write p to the transaction.
func (w *memoryWriter) Write(p []byte) (n int, err error) {
n, err = w.buffer.Write(p)
w.digester.Hash().Write(p[:n])
w.status.Offset += int64(len(p))
w.status.UpdatedAt = time.Now()
return n, err
}
func (w *memoryWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
var base content.Info
for _, opt := range opts {
if err := opt(&base); err != nil {
return err
}
}
if w.buffer == nil {
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
}
content := w.buffer.Bytes()
w.buffer = nil
if size > 0 && size != int64(len(content)) {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", len(content), size)
}
if dgst := w.digester.Digest(); expected != "" && expected != dgst {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected)
}
w.store.Set(w.desc, content)
return nil
}
func (w *memoryWriter) Close() error {
w.buffer = nil
return nil
}
func (w *memoryWriter) Truncate(size int64) error {
if size != 0 {
return ErrUnsupportedSize
}
w.status.Offset = 0
w.digester.Hash().Reset()
w.buffer.Truncate(0)
return nil
}

View File

@@ -0,0 +1,56 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"context"
"fmt"
"io"
"github.com/containerd/containerd/remotes"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// MultiReader store to read content from multiple stores. It finds the content by asking each underlying
// store to find the content, which it does based on the hash.
//
// Example:
// fileStore := NewFileStore(rootPath)
// memoryStore := NewMemoryStore()
// // load up content in fileStore and memoryStore
// multiStore := MultiReader([]content.Provider{fileStore, memoryStore})
//
// You now can use multiStore anywhere that content.Provider is accepted
type MultiReader struct {
stores []remotes.Fetcher
}
// AddStore add a store to read from
func (m *MultiReader) AddStore(store ...remotes.Fetcher) {
m.stores = append(m.stores, store...)
}
// ReaderAt get a reader
func (m MultiReader) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
for _, store := range m.stores {
r, err := store.Fetch(ctx, desc)
if r != nil && err == nil {
return r, nil
}
}
// we did not find any
return nil, fmt.Errorf("not found")
}

View File

@@ -0,0 +1,42 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"context"
ctrcontent "github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// MultiWriterIngester an ingester that can provide a single writer or multiple writers for a single
// descriptor. Useful when the target of a descriptor can have multiple items within it, e.g. a layer
// that is a tar file with multiple files, each of which should go to a different stream, some of which
// should not be handled at all.
type MultiWriterIngester interface {
ctrcontent.Ingester
Writers(ctx context.Context, opts ...ctrcontent.WriterOpt) (func(string) (ctrcontent.Writer, error), error)
}
// MultiWriterPusher a pusher that can provide a single writer or multiple writers for a single
// descriptor. Useful when the target of a descriptor can have multiple items within it, e.g. a layer
// that is a tar file with multiple files, each of which should go to a different stream, some of which
// should not be handled at all.
type MultiWriterPusher interface {
remotes.Pusher
Pushers(ctx context.Context, desc ocispec.Descriptor) (func(string) (ctrcontent.Writer, error), error)
}

View File

@@ -0,0 +1,336 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/local"
"github.com/containerd/containerd/remotes"
"github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// OCI provides content from the file system with the OCI-Image layout.
// Reference: https://github.com/opencontainers/image-spec/blob/master/image-layout.md
type OCI struct {
content.Store
root string
index *ocispec.Index
nameMap map[string]ocispec.Descriptor
}
// NewOCI creates a new OCI store
func NewOCI(rootPath string) (*OCI, error) {
fileStore, err := local.NewStore(rootPath)
if err != nil {
return nil, err
}
store := &OCI{
Store: fileStore,
root: rootPath,
}
if err := store.validateOCILayoutFile(); err != nil {
return nil, err
}
if err := store.LoadIndex(); err != nil {
return nil, err
}
return store, nil
}
// LoadIndex reads the index.json from the file system
func (s *OCI) LoadIndex() error {
path := filepath.Join(s.root, OCIImageIndexFile)
indexFile, err := os.Open(path)
if err != nil {
if !os.IsNotExist(err) {
return err
}
s.index = &ocispec.Index{
Versioned: specs.Versioned{
SchemaVersion: 2, // historical value
},
}
s.nameMap = make(map[string]ocispec.Descriptor)
return nil
}
defer indexFile.Close()
if err := json.NewDecoder(indexFile).Decode(&s.index); err != nil {
return err
}
s.nameMap = make(map[string]ocispec.Descriptor)
for _, desc := range s.index.Manifests {
if name := desc.Annotations[ocispec.AnnotationRefName]; name != "" {
s.nameMap[name] = desc
}
}
return nil
}
// SaveIndex writes the index.json to the file system
func (s *OCI) SaveIndex() error {
// first need to update the index
var descs []ocispec.Descriptor
for name, desc := range s.nameMap {
if desc.Annotations == nil {
desc.Annotations = map[string]string{}
}
desc.Annotations[ocispec.AnnotationRefName] = name
descs = append(descs, desc)
}
s.index.Manifests = descs
indexJSON, err := json.Marshal(s.index)
if err != nil {
return err
}
path := filepath.Join(s.root, OCIImageIndexFile)
return ioutil.WriteFile(path, indexJSON, 0644)
}
func (s *OCI) Resolver() remotes.Resolver {
return s
}
func (s *OCI) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
if err := s.LoadIndex(); err != nil {
return "", ocispec.Descriptor{}, err
}
desc, ok := s.nameMap[ref]
if !ok {
return "", ocispec.Descriptor{}, fmt.Errorf("reference %s not in store", ref)
}
return ref, desc, nil
}
func (s *OCI) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
if err := s.LoadIndex(); err != nil {
return nil, err
}
if _, ok := s.nameMap[ref]; !ok {
return nil, fmt.Errorf("reference %s not in store", ref)
}
return s, nil
}
// Fetch get an io.ReadCloser for the specific content
func (s *OCI) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
readerAt, err := s.Store.ReaderAt(ctx, desc)
if err != nil {
return nil, err
}
// just wrap the ReaderAt with a Reader
return ioutil.NopCloser(&ReaderAtWrapper{readerAt: readerAt}), nil
}
// Pusher get a remotes.Pusher for the given ref
func (s *OCI) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
// separate the tag based ref from the hash
var (
baseRef, hash string
)
parts := strings.SplitN(ref, "@", 2)
baseRef = parts[0]
if len(parts) > 1 {
hash = parts[1]
}
return &ociPusher{oci: s, ref: baseRef, digest: hash}, nil
}
// AddReference adds or updates an reference to index.
func (s *OCI) AddReference(name string, desc ocispec.Descriptor) {
if desc.Annotations == nil {
desc.Annotations = map[string]string{
ocispec.AnnotationRefName: name,
}
} else {
desc.Annotations[ocispec.AnnotationRefName] = name
}
if _, ok := s.nameMap[name]; ok {
s.nameMap[name] = desc
for i, ref := range s.index.Manifests {
if name == ref.Annotations[ocispec.AnnotationRefName] {
s.index.Manifests[i] = desc
return
}
}
// Process should not reach here.
// Fallthrough to `Add` scenario and recover.
s.index.Manifests = append(s.index.Manifests, desc)
return
}
s.index.Manifests = append(s.index.Manifests, desc)
s.nameMap[name] = desc
}
// DeleteReference deletes an reference from index.
func (s *OCI) DeleteReference(name string) {
if _, ok := s.nameMap[name]; !ok {
return
}
delete(s.nameMap, name)
for i, desc := range s.index.Manifests {
if name == desc.Annotations[ocispec.AnnotationRefName] {
s.index.Manifests[i] = s.index.Manifests[len(s.index.Manifests)-1]
s.index.Manifests = s.index.Manifests[:len(s.index.Manifests)-1]
return
}
}
}
// ListReferences lists all references in index.
func (s *OCI) ListReferences() map[string]ocispec.Descriptor {
return s.nameMap
}
// validateOCILayoutFile ensures the `oci-layout` file
func (s *OCI) validateOCILayoutFile() error {
layoutFilePath := filepath.Join(s.root, ocispec.ImageLayoutFile)
layoutFile, err := os.Open(layoutFilePath)
if err != nil {
if !os.IsNotExist(err) {
return err
}
layout := ocispec.ImageLayout{
Version: ocispec.ImageLayoutVersion,
}
layoutJSON, err := json.Marshal(layout)
if err != nil {
return err
}
return ioutil.WriteFile(layoutFilePath, layoutJSON, 0644)
}
defer layoutFile.Close()
var layout *ocispec.ImageLayout
err = json.NewDecoder(layoutFile).Decode(&layout)
if err != nil {
return err
}
if layout.Version != ocispec.ImageLayoutVersion {
return ErrUnsupportedVersion
}
return nil
}
// TODO: implement (needed to create a content.Store)
// TODO: do not return empty content.Info
// Abort completely cancels the ingest operation targeted by ref.
func (s *OCI) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
return content.Info{}, nil
}
// TODO: implement (needed to create a content.Store)
// Update updates mutable information related to content.
// If one or more fieldpaths are provided, only those
// fields will be updated.
// Mutable fields:
// labels.*
func (s *OCI) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
return content.Info{}, errors.New("not yet implemented: Update (content.Store interface)")
}
// TODO: implement (needed to create a content.Store)
// Walk will call fn for each item in the content store which
// match the provided filters. If no filters are given all
// items will be walked.
func (s *OCI) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
return errors.New("not yet implemented: Walk (content.Store interface)")
}
// Delete removes the content from the store.
func (s *OCI) Delete(ctx context.Context, dgst digest.Digest) error {
return s.Store.Delete(ctx, dgst)
}
// TODO: implement (needed to create a content.Store)
func (s *OCI) Status(ctx context.Context, ref string) (content.Status, error) {
// Status returns the status of the provided ref.
return content.Status{}, errors.New("not yet implemented: Status (content.Store interface)")
}
// TODO: implement (needed to create a content.Store)
// ListStatuses returns the status of any active ingestions whose ref match the
// provided regular expression. If empty, all active ingestions will be
// returned.
func (s *OCI) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) {
return []content.Status{}, errors.New("not yet implemented: ListStatuses (content.Store interface)")
}
// TODO: implement (needed to create a content.Store)
// Abort completely cancels the ingest operation targeted by ref.
func (s *OCI) Abort(ctx context.Context, ref string) error {
return errors.New("not yet implemented: Abort (content.Store interface)")
}
// ReaderAt provides contents
func (s *OCI) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
return s.Store.ReaderAt(ctx, desc)
}
// ociPusher to push content for a single referencem can handle multiple descriptors.
// Needs to be able to recognize when a root manifest is being pushed and to create the tag
// for it.
type ociPusher struct {
oci *OCI
ref string
digest string
}
// Push get a writer for a single Descriptor
func (p *ociPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
// do we need to create a tag?
switch desc.MediaType {
case ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
// if the hash of the content matches that which was provided as the hash for the root, mark it
if p.digest != "" && p.digest == desc.Digest.String() {
if err := p.oci.LoadIndex(); err != nil {
return nil, err
}
p.oci.nameMap[p.ref] = desc
if err := p.oci.SaveIndex(); err != nil {
return nil, err
}
}
}
return p.oci.Store.Writer(ctx, content.WithDescriptor(desc), content.WithRef(p.ref))
}

View File

@@ -0,0 +1,112 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"errors"
"github.com/opencontainers/go-digest"
)
type WriterOpts struct {
InputHash *digest.Digest
OutputHash *digest.Digest
Blocksize int
MultiWriterIngester bool
IgnoreNoName bool
}
type WriterOpt func(*WriterOpts) error
func DefaultWriterOpts() WriterOpts {
return WriterOpts{
InputHash: nil,
OutputHash: nil,
Blocksize: DefaultBlocksize,
IgnoreNoName: true,
}
}
// WithInputHash provide the expected input hash to a writer. Writers
// may suppress their own calculation of a hash on the stream, taking this
// hash instead. If the Writer processes the data before passing it on to another
// Writer layer, this is the hash of the *input* stream.
//
// To have a blank hash, use WithInputHash(BlankHash).
func WithInputHash(hash digest.Digest) WriterOpt {
return func(w *WriterOpts) error {
w.InputHash = &hash
return nil
}
}
// WithOutputHash provide the expected output hash to a writer. Writers
// may suppress their own calculation of a hash on the stream, taking this
// hash instead. If the Writer processes the data before passing it on to another
// Writer layer, this is the hash of the *output* stream.
//
// To have a blank hash, use WithInputHash(BlankHash).
func WithOutputHash(hash digest.Digest) WriterOpt {
return func(w *WriterOpts) error {
w.OutputHash = &hash
return nil
}
}
// WithBlocksize set the blocksize used by the processor of data.
// The default is DefaultBlocksize, which is the same as that used by io.Copy.
// Includes a safety check to ensure the caller doesn't actively set it to <= 0.
func WithBlocksize(blocksize int) WriterOpt {
return func(w *WriterOpts) error {
if blocksize <= 0 {
return errors.New("blocksize must be greater than or equal to 0")
}
w.Blocksize = blocksize
return nil
}
}
// WithMultiWriterIngester the passed ingester also implements MultiWriter
// and should be used as such. If this is set to true, but the ingester does not
// implement MultiWriter, calling Writer should return an error.
func WithMultiWriterIngester() WriterOpt {
return func(w *WriterOpts) error {
w.MultiWriterIngester = true
return nil
}
}
// WithErrorOnNoName some ingesters, when creating a Writer, do not return an error if
// the descriptor does not have a valid name on the descriptor. Passing WithErrorOnNoName
// tells the writer to return an error instead of passing the data to a nil writer.
func WithErrorOnNoName() WriterOpt {
return func(w *WriterOpts) error {
w.IgnoreNoName = false
return nil
}
}
// WithIgnoreNoName some ingesters, when creating a Writer, return an error if
// the descriptor does not have a valid name on the descriptor. Passing WithIgnoreNoName
// tells the writer not to return an error, but rather to pass the data to a nil writer.
//
// Deprecated: Use WithErrorOnNoName
func WithIgnoreNoName() WriterOpt {
return func(w *WriterOpts) error {
w.IgnoreNoName = true
return nil
}
}

View File

@@ -0,0 +1,286 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"context"
"errors"
"io"
"time"
"github.com/containerd/containerd/content"
"github.com/opencontainers/go-digest"
)
// PassthroughWriter takes an input stream and passes it through to an underlying writer,
// while providing the ability to manipulate the stream before it gets passed through
type PassthroughWriter struct {
writer content.Writer
pipew *io.PipeWriter
digester digest.Digester
size int64
underlyingWriter *underlyingWriter
reader *io.PipeReader
hash *digest.Digest
done chan error
}
// NewPassthroughWriter creates a pass-through writer that allows for processing
// the content via an arbitrary function. The function should do whatever processing it
// wants, reading from the Reader to the Writer. When done, it must indicate via
// sending an error or nil to the Done
func NewPassthroughWriter(writer content.Writer, f func(r io.Reader, w io.Writer, done chan<- error), opts ...WriterOpt) content.Writer {
// process opts for default
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil
}
}
r, w := io.Pipe()
pw := &PassthroughWriter{
writer: writer,
pipew: w,
digester: digest.Canonical.Digester(),
underlyingWriter: &underlyingWriter{
writer: writer,
digester: digest.Canonical.Digester(),
hash: wOpts.OutputHash,
},
reader: r,
hash: wOpts.InputHash,
done: make(chan error, 1),
}
go f(r, pw.underlyingWriter, pw.done)
return pw
}
func (pw *PassthroughWriter) Write(p []byte) (n int, err error) {
n, err = pw.pipew.Write(p)
if pw.hash == nil {
pw.digester.Hash().Write(p[:n])
}
pw.size += int64(n)
return
}
func (pw *PassthroughWriter) Close() error {
if pw.pipew != nil {
pw.pipew.Close()
}
pw.writer.Close()
return nil
}
// Digest may return empty digest or panics until committed.
func (pw *PassthroughWriter) Digest() digest.Digest {
if pw.hash != nil {
return *pw.hash
}
return pw.digester.Digest()
}
// Commit commits the blob (but no roll-back is guaranteed on an error).
// size and expected can be zero-value when unknown.
// Commit always closes the writer, even on error.
// ErrAlreadyExists aborts the writer.
func (pw *PassthroughWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
if pw.pipew != nil {
pw.pipew.Close()
}
err := <-pw.done
if pw.reader != nil {
pw.reader.Close()
}
if err != nil && err != io.EOF {
return err
}
// Some underlying writers will validate an expected digest, so we need the option to pass it
// that digest. That is why we caluclate the digest of the underlying writer throughout the write process.
return pw.writer.Commit(ctx, pw.underlyingWriter.size, pw.underlyingWriter.Digest(), opts...)
}
// Status returns the current state of write
func (pw *PassthroughWriter) Status() (content.Status, error) {
return pw.writer.Status()
}
// Truncate updates the size of the target blob
func (pw *PassthroughWriter) Truncate(size int64) error {
return pw.writer.Truncate(size)
}
// underlyingWriter implementation of io.Writer to write to the underlying
// io.Writer
type underlyingWriter struct {
writer content.Writer
digester digest.Digester
size int64
hash *digest.Digest
}
// Write write to the underlying writer
func (u *underlyingWriter) Write(p []byte) (int, error) {
n, err := u.writer.Write(p)
if err != nil {
return 0, err
}
if u.hash == nil {
u.digester.Hash().Write(p)
}
u.size += int64(len(p))
return n, nil
}
// Size get total size written
func (u *underlyingWriter) Size() int64 {
return u.size
}
// Digest may return empty digest or panics until committed.
func (u *underlyingWriter) Digest() digest.Digest {
if u.hash != nil {
return *u.hash
}
return u.digester.Digest()
}
// PassthroughMultiWriter single writer that passes through to multiple writers, allowing the passthrough
// function to select which writer to use.
type PassthroughMultiWriter struct {
writers []*PassthroughWriter
pipew *io.PipeWriter
digester digest.Digester
size int64
reader *io.PipeReader
hash *digest.Digest
done chan error
startedAt time.Time
updatedAt time.Time
}
func NewPassthroughMultiWriter(writers func(name string) (content.Writer, error), f func(r io.Reader, getwriter func(name string) io.Writer, done chan<- error), opts ...WriterOpt) content.Writer {
// process opts for default
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil
}
}
r, w := io.Pipe()
pmw := &PassthroughMultiWriter{
startedAt: time.Now(),
updatedAt: time.Now(),
done: make(chan error, 1),
digester: digest.Canonical.Digester(),
hash: wOpts.InputHash,
pipew: w,
reader: r,
}
// get our output writers
getwriter := func(name string) io.Writer {
writer, err := writers(name)
if err != nil || writer == nil {
return nil
}
pw := &PassthroughWriter{
writer: writer,
digester: digest.Canonical.Digester(),
underlyingWriter: &underlyingWriter{
writer: writer,
digester: digest.Canonical.Digester(),
hash: wOpts.OutputHash,
},
done: make(chan error, 1),
}
pmw.writers = append(pmw.writers, pw)
return pw.underlyingWriter
}
go f(r, getwriter, pmw.done)
return pmw
}
func (pmw *PassthroughMultiWriter) Write(p []byte) (n int, err error) {
n, err = pmw.pipew.Write(p)
if pmw.hash == nil {
pmw.digester.Hash().Write(p[:n])
}
pmw.size += int64(n)
pmw.updatedAt = time.Now()
return
}
func (pmw *PassthroughMultiWriter) Close() error {
pmw.pipew.Close()
for _, w := range pmw.writers {
w.Close()
}
return nil
}
// Digest may return empty digest or panics until committed.
func (pmw *PassthroughMultiWriter) Digest() digest.Digest {
if pmw.hash != nil {
return *pmw.hash
}
return pmw.digester.Digest()
}
// Commit commits the blob (but no roll-back is guaranteed on an error).
// size and expected can be zero-value when unknown.
// Commit always closes the writer, even on error.
// ErrAlreadyExists aborts the writer.
func (pmw *PassthroughMultiWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
pmw.pipew.Close()
err := <-pmw.done
if pmw.reader != nil {
pmw.reader.Close()
}
if err != nil && err != io.EOF {
return err
}
// Some underlying writers will validate an expected digest, so we need the option to pass it
// that digest. That is why we caluclate the digest of the underlying writer throughout the write process.
for _, w := range pmw.writers {
// maybe this should be Commit(ctx, pw.underlyingWriter.size, pw.underlyingWriter.Digest(), opts...)
w.done <- err
if err := w.Commit(ctx, size, expected, opts...); err != nil {
return err
}
}
return nil
}
// Status returns the current state of write
func (pmw *PassthroughMultiWriter) Status() (content.Status, error) {
return content.Status{
StartedAt: pmw.startedAt,
UpdatedAt: pmw.updatedAt,
Total: pmw.size,
}, nil
}
// Truncate updates the size of the target blob, but cannot do anything with a multiwriter
func (pmw *PassthroughMultiWriter) Truncate(size int64) error {
return errors.New("truncate unavailable on multiwriter")
}

View File

@@ -0,0 +1,68 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"io"
"github.com/containerd/containerd/content"
)
// ensure interface
var (
_ content.ReaderAt = sizeReaderAt{}
)
type readAtCloser interface {
io.ReaderAt
io.Closer
}
type sizeReaderAt struct {
readAtCloser
size int64
}
func (ra sizeReaderAt) Size() int64 {
return ra.size
}
func NopCloserAt(r io.ReaderAt) nopCloserAt {
return nopCloserAt{r}
}
type nopCloserAt struct {
io.ReaderAt
}
func (n nopCloserAt) Close() error {
return nil
}
// readerAtWrapper wraps a ReaderAt to give a Reader
type ReaderAtWrapper struct {
offset int64
readerAt io.ReaderAt
}
func (r *ReaderAtWrapper) Read(p []byte) (n int, err error) {
n, err = r.readerAt.ReadAt(p, r.offset)
r.offset += int64(n)
return
}
func NewReaderAtWrapper(readerAt io.ReaderAt) *ReaderAtWrapper {
return &ReaderAtWrapper{readerAt: readerAt}
}

View File

@@ -0,0 +1,84 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"os"
auth "oras.land/oras-go/pkg/auth/docker"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
)
// RegistryOptions provide configuration options to a Registry
type RegistryOptions struct {
Configs []string
Username string
Password string
Insecure bool
PlainHTTP bool
}
// Registry provides content from a spec-compliant registry. Create an use a new one for each
// registry with unique configuration of RegistryOptions.
type Registry struct {
remotes.Resolver
}
// NewRegistry creates a new Registry store
func NewRegistry(opts RegistryOptions) (*Registry, error) {
return &Registry{
Resolver: newResolver(opts.Username, opts.Password, opts.Insecure, opts.PlainHTTP, opts.Configs...),
}, nil
}
func newResolver(username, password string, insecure bool, plainHTTP bool, configs ...string) remotes.Resolver {
opts := docker.ResolverOptions{
PlainHTTP: plainHTTP,
}
client := http.DefaultClient
if insecure {
client.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
}
opts.Client = client
if username != "" || password != "" {
opts.Credentials = func(hostName string) (string, string, error) {
return username, password, nil
}
return docker.NewResolver(opts)
}
cli, err := auth.NewClient(configs...)
if err != nil {
fmt.Fprintf(os.Stderr, "WARNING: Error loading auth file: %v\n", err)
}
resolver, err := cli.Resolver(context.Background(), client, plainHTTP)
if err != nil {
fmt.Fprintf(os.Stderr, "WARNING: Error loading resolver: %v\n", err)
resolver = docker.NewResolver(opts)
}
return resolver
}

View File

@@ -0,0 +1,157 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"archive/tar"
"fmt"
"io"
"github.com/containerd/containerd/content"
)
// NewUntarWriter wrap a writer with an untar, so that the stream is untarred
//
// By default, it calculates the hash when writing. If the option `skipHash` is true,
// it will skip doing the hash. Skipping the hash is intended to be used only
// if you are confident about the validity of the data being passed to the writer,
// and wish to save on the hashing time.
func NewUntarWriter(writer content.Writer, opts ...WriterOpt) content.Writer {
// process opts for default
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil
}
}
return NewPassthroughWriter(writer, func(r io.Reader, w io.Writer, done chan<- error) {
tr := tar.NewReader(r)
var err error
for {
_, err := tr.Next()
if err == io.EOF {
// clear the error, since we do not pass an io.EOF
err = nil
break // End of archive
}
if err != nil {
// pass the error on
err = fmt.Errorf("UntarWriter tar file header read error: %v", err)
break
}
// write out the untarred data
// we can handle io.EOF, just go to the next file
// any other errors should stop and get reported
b := make([]byte, wOpts.Blocksize, wOpts.Blocksize)
for {
var n int
n, err = tr.Read(b)
if err != nil && err != io.EOF {
err = fmt.Errorf("UntarWriter file data read error: %v\n", err)
break
}
l := n
if n > len(b) {
l = len(b)
}
if _, err2 := w.Write(b[:l]); err2 != nil {
err = fmt.Errorf("UntarWriter error writing to underlying writer: %v", err2)
break
}
if err == io.EOF {
// go to the next file
break
}
}
// did we break with a non-nil and non-EOF error?
if err != nil && err != io.EOF {
break
}
}
done <- err
}, opts...)
}
// NewUntarWriterByName wrap multiple writers with an untar, so that the stream is untarred and passed
// to the appropriate writer, based on the filename. If a filename is not found, it is up to the called func
// to determine how to process it.
func NewUntarWriterByName(writers func(string) (content.Writer, error), opts ...WriterOpt) content.Writer {
// process opts for default
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil
}
}
// need a PassthroughMultiWriter here
return NewPassthroughMultiWriter(writers, func(r io.Reader, getwriter func(name string) io.Writer, done chan<- error) {
tr := tar.NewReader(r)
var err error
for {
header, err := tr.Next()
if err == io.EOF {
// clear the error, since we do not pass an io.EOF
err = nil
break // End of archive
}
if err != nil {
// pass the error on
err = fmt.Errorf("UntarWriter tar file header read error: %v", err)
break
}
// get the filename
filename := header.Name
// get the writer for this filename
w := getwriter(filename)
if w == nil {
continue
}
// write out the untarred data
// we can handle io.EOF, just go to the next file
// any other errors should stop and get reported
b := make([]byte, wOpts.Blocksize, wOpts.Blocksize)
for {
var n int
n, err = tr.Read(b)
if err != nil && err != io.EOF {
err = fmt.Errorf("UntarWriter file data read error: %v\n", err)
break
}
l := n
if n > len(b) {
l = len(b)
}
if _, err2 := w.Write(b[:l]); err2 != nil {
err = fmt.Errorf("UntarWriter error writing to underlying writer at for name '%s': %v", filename, err2)
break
}
if err == io.EOF {
// go to the next file
break
}
}
// did we break with a non-nil and non-EOF error?
if err != nil && err != io.EOF {
break
}
}
done <- err
}, opts...)
}

View File

@@ -0,0 +1,223 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"archive/tar"
"compress/gzip"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// ResolveName resolves name from descriptor
func ResolveName(desc ocispec.Descriptor) (string, bool) {
name, ok := desc.Annotations[ocispec.AnnotationTitle]
return name, ok
}
// tarDirectory walks the directory specified by path, and tar those files with a new
// path prefix.
func tarDirectory(root, prefix string, w io.Writer, stripTimes bool) error {
tw := tar.NewWriter(w)
defer tw.Close()
if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Rename path
name, err := filepath.Rel(root, path)
if err != nil {
return err
}
name = filepath.Join(prefix, name)
name = filepath.ToSlash(name)
// Generate header
var link string
mode := info.Mode()
if mode&os.ModeSymlink != 0 {
if link, err = os.Readlink(path); err != nil {
return err
}
}
header, err := tar.FileInfoHeader(info, link)
if err != nil {
return errors.Wrap(err, path)
}
header.Name = name
header.Uid = 0
header.Gid = 0
header.Uname = ""
header.Gname = ""
if stripTimes {
header.ModTime = time.Time{}
header.AccessTime = time.Time{}
header.ChangeTime = time.Time{}
}
// Write file
if err := tw.WriteHeader(header); err != nil {
return errors.Wrap(err, "tar")
}
if mode.IsRegular() {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
if _, err := io.Copy(tw, file); err != nil {
return errors.Wrap(err, path)
}
}
return nil
}); err != nil {
return err
}
return nil
}
// extractTarDirectory extracts tar file to a directory specified by the `root`
// parameter. The file name prefix is ensured to be the string specified by the
// `prefix` parameter and is trimmed.
func extractTarDirectory(root, prefix string, r io.Reader) error {
tr := tar.NewReader(r)
for {
header, err := tr.Next()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
// Name check
name := header.Name
path, err := ensureBasePath(root, prefix, name)
if err != nil {
return err
}
path = filepath.Join(root, path)
// Link check
switch header.Typeflag {
case tar.TypeLink, tar.TypeSymlink:
link := header.Linkname
if !filepath.IsAbs(link) {
link = filepath.Join(filepath.Dir(name), link)
}
if _, err := ensureBasePath(root, prefix, link); err != nil {
return err
}
}
// Create content
switch header.Typeflag {
case tar.TypeReg:
err = writeFile(path, tr, header.FileInfo().Mode())
case tar.TypeDir:
err = os.MkdirAll(path, header.FileInfo().Mode())
case tar.TypeLink:
err = os.Link(header.Linkname, path)
case tar.TypeSymlink:
err = os.Symlink(header.Linkname, path)
default:
continue // Non-regular files are skipped
}
if err != nil {
return err
}
// Change access time and modification time if possible (error ignored)
os.Chtimes(path, header.AccessTime, header.ModTime)
}
}
// ensureBasePath ensures the target path is in the base path,
// returning its relative path to the base path.
func ensureBasePath(root, base, target string) (string, error) {
path, err := filepath.Rel(base, target)
if err != nil {
return "", err
}
cleanPath := filepath.ToSlash(filepath.Clean(path))
if cleanPath == ".." || strings.HasPrefix(cleanPath, "../") {
return "", fmt.Errorf("%q is outside of %q", target, base)
}
// No symbolic link allowed in the relative path
dir := filepath.Dir(path)
for dir != "." {
if info, err := os.Lstat(filepath.Join(root, dir)); err != nil {
if !os.IsNotExist(err) {
return "", err
}
} else if info.Mode()&os.ModeSymlink != 0 {
return "", fmt.Errorf("no symbolic link allowed between %q and %q", base, target)
}
dir = filepath.Dir(dir)
}
return path, nil
}
func writeFile(path string, r io.Reader, perm os.FileMode) error {
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(file, r)
return err
}
func extractTarGzip(root, prefix, filename, checksum string) error {
file, err := os.Open(filename)
if err != nil {
return err
}
defer file.Close()
zr, err := gzip.NewReader(file)
if err != nil {
return err
}
defer zr.Close()
var r io.Reader = zr
var verifier digest.Verifier
if checksum != "" {
if digest, err := digest.Parse(checksum); err == nil {
verifier = digest.Verifier()
r = io.TeeReader(r, verifier)
}
}
if err := extractTarDirectory(root, prefix, r); err != nil {
return err
}
if verifier != nil && !verifier.Verified() {
return errors.New("content digest mismatch")
}
return nil
}