28
vendor/github.com/deislabs/oras/pkg/content/consts.go
generated
vendored
Normal file
28
vendor/github.com/deislabs/oras/pkg/content/consts.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package content
|
||||
|
||||
import ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
const (
|
||||
// DefaultBlobMediaType specifies the default blob media type
|
||||
DefaultBlobMediaType = ocispec.MediaTypeImageLayer
|
||||
// DefaultBlobDirMediaType specifies the default blob directory media type
|
||||
DefaultBlobDirMediaType = ocispec.MediaTypeImageLayerGzip
|
||||
)
|
||||
|
||||
const (
|
||||
// TempFilePattern specifies the pattern to create temporary files
|
||||
TempFilePattern = "oras"
|
||||
)
|
||||
|
||||
const (
|
||||
// AnnotationDigest is the annotation key for the digest of the uncompressed content
|
||||
AnnotationDigest = "io.deis.oras.content.digest"
|
||||
// AnnotationUnpack is the annotation key for indication of unpacking
|
||||
AnnotationUnpack = "io.deis.oras.content.unpack"
|
||||
)
|
||||
|
||||
const (
|
||||
// OCIImageIndexFile is the file name of the index from the OCI Image Layout Specification
|
||||
// Reference: https://github.com/opencontainers/image-spec/blob/master/image-layout.md#indexjson-file
|
||||
OCIImageIndexFile = "index.json"
|
||||
)
|
||||
17
vendor/github.com/deislabs/oras/pkg/content/errors.go
generated
vendored
Normal file
17
vendor/github.com/deislabs/oras/pkg/content/errors.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package content
|
||||
|
||||
import "errors"
|
||||
|
||||
// Common errors
|
||||
var (
|
||||
ErrNotFound = errors.New("not_found")
|
||||
ErrNoName = errors.New("no_name")
|
||||
ErrUnsupportedSize = errors.New("unsupported_size")
|
||||
ErrUnsupportedVersion = errors.New("unsupported_version")
|
||||
)
|
||||
|
||||
// FileStore errors
|
||||
var (
|
||||
ErrPathTraversalDisallowed = errors.New("path_traversal_disallowed")
|
||||
ErrOverwriteDisallowed = errors.New("overwrite_disallowed")
|
||||
)
|
||||
411
vendor/github.com/deislabs/oras/pkg/content/file.go
generated
vendored
Normal file
411
vendor/github.com/deislabs/oras/pkg/content/file.go
generated
vendored
Normal file
@@ -0,0 +1,411 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ensure interface
|
||||
var (
|
||||
_ ProvideIngester = &FileStore{}
|
||||
)
|
||||
|
||||
// FileStore provides content from the file system
|
||||
type FileStore struct {
|
||||
DisableOverwrite bool
|
||||
AllowPathTraversalOnWrite bool
|
||||
|
||||
// Reproducible enables stripping times from added files
|
||||
Reproducible bool
|
||||
|
||||
root string
|
||||
descriptor *sync.Map // map[digest.Digest]ocispec.Descriptor
|
||||
pathMap *sync.Map
|
||||
tmpFiles *sync.Map
|
||||
}
|
||||
|
||||
// NewFileStore creats a new file store
|
||||
func NewFileStore(rootPath string) *FileStore {
|
||||
return &FileStore{
|
||||
root: rootPath,
|
||||
descriptor: &sync.Map{},
|
||||
pathMap: &sync.Map{},
|
||||
tmpFiles: &sync.Map{},
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a file reference
|
||||
func (s *FileStore) Add(name, mediaType, path string) (ocispec.Descriptor, error) {
|
||||
if path == "" {
|
||||
path = name
|
||||
}
|
||||
path = s.MapPath(name, path)
|
||||
|
||||
fileInfo, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
var desc ocispec.Descriptor
|
||||
if fileInfo.IsDir() {
|
||||
desc, err = s.descFromDir(name, mediaType, path)
|
||||
} else {
|
||||
desc, err = s.descFromFile(fileInfo, mediaType, path)
|
||||
}
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
if desc.Annotations == nil {
|
||||
desc.Annotations = make(map[string]string)
|
||||
}
|
||||
desc.Annotations[ocispec.AnnotationTitle] = name
|
||||
|
||||
s.set(desc)
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) descFromFile(info os.FileInfo, mediaType, path string) (ocispec.Descriptor, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
defer file.Close()
|
||||
digest, err := digest.FromReader(file)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
if mediaType == "" {
|
||||
mediaType = DefaultBlobMediaType
|
||||
}
|
||||
return ocispec.Descriptor{
|
||||
MediaType: mediaType,
|
||||
Digest: digest,
|
||||
Size: info.Size(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) descFromDir(name, mediaType, root string) (ocispec.Descriptor, error) {
|
||||
// generate temp file
|
||||
file, err := s.tempFile()
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
defer file.Close()
|
||||
s.MapPath(name, file.Name())
|
||||
|
||||
// compress directory
|
||||
digester := digest.Canonical.Digester()
|
||||
zw := gzip.NewWriter(io.MultiWriter(file, digester.Hash()))
|
||||
defer zw.Close()
|
||||
tarDigester := digest.Canonical.Digester()
|
||||
if err := tarDirectory(root, name, io.MultiWriter(zw, tarDigester.Hash()), s.Reproducible); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
// flush all
|
||||
if err := zw.Close(); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
if err := file.Sync(); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
// generate descriptor
|
||||
if mediaType == "" {
|
||||
mediaType = DefaultBlobDirMediaType
|
||||
}
|
||||
info, err := file.Stat()
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
return ocispec.Descriptor{
|
||||
MediaType: mediaType,
|
||||
Digest: digester.Digest(),
|
||||
Size: info.Size(),
|
||||
Annotations: map[string]string{
|
||||
AnnotationDigest: tarDigester.Digest().String(),
|
||||
AnnotationUnpack: "true",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) tempFile() (*os.File, error) {
|
||||
file, err := ioutil.TempFile("", TempFilePattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.tmpFiles.Store(file.Name(), file)
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// Close frees up resources used by the file store
|
||||
func (s *FileStore) Close() error {
|
||||
var errs []string
|
||||
s.tmpFiles.Range(func(name, _ interface{}) bool {
|
||||
if err := os.Remove(name.(string)); err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
return true
|
||||
})
|
||||
return errors.New(strings.Join(errs, "; "))
|
||||
}
|
||||
|
||||
// ReaderAt provides contents
|
||||
func (s *FileStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
|
||||
desc, ok := s.get(desc)
|
||||
if !ok {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
name, ok := ResolveName(desc)
|
||||
if !ok {
|
||||
return nil, ErrNoName
|
||||
}
|
||||
path := s.ResolvePath(name)
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sizeReaderAt{
|
||||
readAtCloser: file,
|
||||
size: desc.Size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Writer begins or resumes the active writer identified by desc
|
||||
func (s *FileStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
|
||||
var wOpts content.WriterOpts
|
||||
for _, opt := range opts {
|
||||
if err := opt(&wOpts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
desc := wOpts.Desc
|
||||
|
||||
name, ok := ResolveName(desc)
|
||||
if !ok {
|
||||
return nil, ErrNoName
|
||||
}
|
||||
path, err := s.resolveWritePath(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file, afterCommit, err := s.createWritePath(path, desc, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
return &fileWriter{
|
||||
store: s,
|
||||
file: file,
|
||||
desc: desc,
|
||||
digester: digest.Canonical.Digester(),
|
||||
status: content.Status{
|
||||
Ref: name,
|
||||
Total: desc.Size,
|
||||
StartedAt: now,
|
||||
UpdatedAt: now,
|
||||
},
|
||||
afterCommit: afterCommit,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) resolveWritePath(name string) (string, error) {
|
||||
path := s.ResolvePath(name)
|
||||
if !s.AllowPathTraversalOnWrite {
|
||||
base, err := filepath.Abs(s.root)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
target, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
rel, err := filepath.Rel(base, target)
|
||||
if err != nil {
|
||||
return "", ErrPathTraversalDisallowed
|
||||
}
|
||||
rel = filepath.ToSlash(rel)
|
||||
if strings.HasPrefix(rel, "../") || rel == ".." {
|
||||
return "", ErrPathTraversalDisallowed
|
||||
}
|
||||
}
|
||||
if s.DisableOverwrite {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return "", ErrOverwriteDisallowed
|
||||
} else if !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) createWritePath(path string, desc ocispec.Descriptor, prefix string) (*os.File, func() error, error) {
|
||||
if value, ok := desc.Annotations[AnnotationUnpack]; !ok || value != "true" {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
file, err := os.Create(path)
|
||||
return file, nil, err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
file, err := s.tempFile()
|
||||
checksum := desc.Annotations[AnnotationDigest]
|
||||
afterCommit := func() error {
|
||||
return extractTarGzip(path, prefix, file.Name(), checksum)
|
||||
}
|
||||
return file, afterCommit, err
|
||||
}
|
||||
|
||||
// MapPath maps name to path
|
||||
func (s *FileStore) MapPath(name, path string) string {
|
||||
path = s.resolvePath(path)
|
||||
s.pathMap.Store(name, path)
|
||||
return path
|
||||
}
|
||||
|
||||
// ResolvePath returns the path by name
|
||||
func (s *FileStore) ResolvePath(name string) string {
|
||||
if value, ok := s.pathMap.Load(name); ok {
|
||||
if path, ok := value.(string); ok {
|
||||
return path
|
||||
}
|
||||
}
|
||||
|
||||
// using the name as a fallback solution
|
||||
return s.resolvePath(name)
|
||||
}
|
||||
|
||||
func (s *FileStore) resolvePath(path string) string {
|
||||
if filepath.IsAbs(path) {
|
||||
return path
|
||||
}
|
||||
return filepath.Join(s.root, path)
|
||||
}
|
||||
|
||||
func (s *FileStore) set(desc ocispec.Descriptor) {
|
||||
s.descriptor.Store(desc.Digest, desc)
|
||||
}
|
||||
|
||||
func (s *FileStore) get(desc ocispec.Descriptor) (ocispec.Descriptor, bool) {
|
||||
value, ok := s.descriptor.Load(desc.Digest)
|
||||
if !ok {
|
||||
return ocispec.Descriptor{}, false
|
||||
}
|
||||
desc, ok = value.(ocispec.Descriptor)
|
||||
return desc, ok
|
||||
}
|
||||
|
||||
type fileWriter struct {
|
||||
store *FileStore
|
||||
file *os.File
|
||||
desc ocispec.Descriptor
|
||||
digester digest.Digester
|
||||
status content.Status
|
||||
afterCommit func() error
|
||||
}
|
||||
|
||||
func (w *fileWriter) Status() (content.Status, error) {
|
||||
return w.status, nil
|
||||
}
|
||||
|
||||
// Digest returns the current digest of the content, up to the current write.
|
||||
//
|
||||
// Cannot be called concurrently with `Write`.
|
||||
func (w *fileWriter) Digest() digest.Digest {
|
||||
return w.digester.Digest()
|
||||
}
|
||||
|
||||
// Write p to the transaction.
|
||||
func (w *fileWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = w.file.Write(p)
|
||||
w.digester.Hash().Write(p[:n])
|
||||
w.status.Offset += int64(len(p))
|
||||
w.status.UpdatedAt = time.Now()
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *fileWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
var base content.Info
|
||||
for _, opt := range opts {
|
||||
if err := opt(&base); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if w.file == nil {
|
||||
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
|
||||
}
|
||||
file := w.file
|
||||
w.file = nil
|
||||
|
||||
if err := file.Sync(); err != nil {
|
||||
file.Close()
|
||||
return errors.Wrap(err, "sync failed")
|
||||
}
|
||||
|
||||
fileInfo, err := file.Stat()
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return errors.Wrap(err, "stat failed")
|
||||
}
|
||||
if err := file.Close(); err != nil {
|
||||
return errors.Wrap(err, "failed to close file")
|
||||
}
|
||||
|
||||
if size > 0 && size != fileInfo.Size() {
|
||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fileInfo.Size(), size)
|
||||
}
|
||||
if dgst := w.digester.Digest(); expected != "" && expected != dgst {
|
||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected)
|
||||
}
|
||||
|
||||
w.store.set(w.desc)
|
||||
if w.afterCommit != nil {
|
||||
return w.afterCommit()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the writer, flushing any unwritten data and leaving the progress in
|
||||
// tact.
|
||||
func (w *fileWriter) Close() error {
|
||||
if w.file == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.file.Sync()
|
||||
err := w.file.Close()
|
||||
w.file = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *fileWriter) Truncate(size int64) error {
|
||||
if size != 0 {
|
||||
return ErrUnsupportedSize
|
||||
}
|
||||
w.status.Offset = 0
|
||||
w.digester.Hash().Reset()
|
||||
if _, err := w.file.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
return w.file.Truncate(0)
|
||||
}
|
||||
9
vendor/github.com/deislabs/oras/pkg/content/interface.go
generated
vendored
Normal file
9
vendor/github.com/deislabs/oras/pkg/content/interface.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
package content
|
||||
|
||||
import "github.com/containerd/containerd/content"
|
||||
|
||||
// ProvideIngester is the interface that groups the basic Read and Write methods.
|
||||
type ProvideIngester interface {
|
||||
content.Provider
|
||||
content.Ingester
|
||||
}
|
||||
210
vendor/github.com/deislabs/oras/pkg/content/memory.go
generated
vendored
Normal file
210
vendor/github.com/deislabs/oras/pkg/content/memory.go
generated
vendored
Normal file
@@ -0,0 +1,210 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ensure interface
|
||||
var (
|
||||
_ content.Provider = &Memorystore{}
|
||||
_ content.Ingester = &Memorystore{}
|
||||
)
|
||||
|
||||
// Memorystore provides content from the memory
|
||||
type Memorystore struct {
|
||||
descriptor map[digest.Digest]ocispec.Descriptor
|
||||
content map[digest.Digest][]byte
|
||||
nameMap map[string]ocispec.Descriptor
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
// NewMemoryStore creats a new memory store
|
||||
func NewMemoryStore() *Memorystore {
|
||||
return &Memorystore{
|
||||
descriptor: make(map[digest.Digest]ocispec.Descriptor),
|
||||
content: make(map[digest.Digest][]byte),
|
||||
nameMap: make(map[string]ocispec.Descriptor),
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds content
|
||||
func (s *Memorystore) Add(name, mediaType string, content []byte) ocispec.Descriptor {
|
||||
var annotations map[string]string
|
||||
if name != "" {
|
||||
annotations = map[string]string{
|
||||
ocispec.AnnotationTitle: name,
|
||||
}
|
||||
}
|
||||
|
||||
if mediaType == "" {
|
||||
mediaType = DefaultBlobMediaType
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
MediaType: mediaType,
|
||||
Digest: digest.FromBytes(content),
|
||||
Size: int64(len(content)),
|
||||
Annotations: annotations,
|
||||
}
|
||||
|
||||
s.Set(desc, content)
|
||||
return desc
|
||||
}
|
||||
|
||||
// ReaderAt provides contents
|
||||
func (s *Memorystore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
|
||||
desc, content, ok := s.Get(desc)
|
||||
if !ok {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
return sizeReaderAt{
|
||||
readAtCloser: nopCloser{
|
||||
ReaderAt: bytes.NewReader(content),
|
||||
},
|
||||
size: desc.Size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Writer begins or resumes the active writer identified by desc
|
||||
func (s *Memorystore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
|
||||
var wOpts content.WriterOpts
|
||||
for _, opt := range opts {
|
||||
if err := opt(&wOpts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
desc := wOpts.Desc
|
||||
|
||||
name, _ := ResolveName(desc)
|
||||
now := time.Now()
|
||||
return &memoryWriter{
|
||||
store: s,
|
||||
buffer: bytes.NewBuffer(nil),
|
||||
desc: desc,
|
||||
digester: digest.Canonical.Digester(),
|
||||
status: content.Status{
|
||||
Ref: name,
|
||||
Total: desc.Size,
|
||||
StartedAt: now,
|
||||
UpdatedAt: now,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Set adds the content to the store
|
||||
func (s *Memorystore) Set(desc ocispec.Descriptor, content []byte) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.descriptor[desc.Digest] = desc
|
||||
s.content[desc.Digest] = content
|
||||
|
||||
if name, ok := ResolveName(desc); ok && name != "" {
|
||||
s.nameMap[name] = desc
|
||||
}
|
||||
}
|
||||
|
||||
// Get finds the content from the store
|
||||
func (s *Memorystore) Get(desc ocispec.Descriptor) (ocispec.Descriptor, []byte, bool) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
desc, ok := s.descriptor[desc.Digest]
|
||||
if !ok {
|
||||
return ocispec.Descriptor{}, nil, false
|
||||
}
|
||||
content, ok := s.content[desc.Digest]
|
||||
return desc, content, ok
|
||||
}
|
||||
|
||||
// GetByName finds the content from the store by name (i.e. AnnotationTitle)
|
||||
func (s *Memorystore) GetByName(name string) (ocispec.Descriptor, []byte, bool) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
desc, ok := s.nameMap[name]
|
||||
if !ok {
|
||||
return ocispec.Descriptor{}, nil, false
|
||||
}
|
||||
content, ok := s.content[desc.Digest]
|
||||
return desc, content, ok
|
||||
}
|
||||
|
||||
type memoryWriter struct {
|
||||
store *Memorystore
|
||||
buffer *bytes.Buffer
|
||||
desc ocispec.Descriptor
|
||||
digester digest.Digester
|
||||
status content.Status
|
||||
}
|
||||
|
||||
func (w *memoryWriter) Status() (content.Status, error) {
|
||||
return w.status, nil
|
||||
}
|
||||
|
||||
// Digest returns the current digest of the content, up to the current write.
|
||||
//
|
||||
// Cannot be called concurrently with `Write`.
|
||||
func (w *memoryWriter) Digest() digest.Digest {
|
||||
return w.digester.Digest()
|
||||
}
|
||||
|
||||
// Write p to the transaction.
|
||||
func (w *memoryWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = w.buffer.Write(p)
|
||||
w.digester.Hash().Write(p[:n])
|
||||
w.status.Offset += int64(len(p))
|
||||
w.status.UpdatedAt = time.Now()
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *memoryWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
var base content.Info
|
||||
for _, opt := range opts {
|
||||
if err := opt(&base); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if w.buffer == nil {
|
||||
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
|
||||
}
|
||||
content := w.buffer.Bytes()
|
||||
w.buffer = nil
|
||||
|
||||
if size > 0 && size != int64(len(content)) {
|
||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", len(content), size)
|
||||
}
|
||||
if dgst := w.digester.Digest(); expected != "" && expected != dgst {
|
||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected)
|
||||
}
|
||||
|
||||
w.store.Set(w.desc, content)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *memoryWriter) Close() error {
|
||||
w.buffer = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *memoryWriter) Truncate(size int64) error {
|
||||
if size != 0 {
|
||||
return ErrUnsupportedSize
|
||||
}
|
||||
w.status.Offset = 0
|
||||
w.digester.Hash().Reset()
|
||||
w.buffer.Truncate(0)
|
||||
return nil
|
||||
}
|
||||
169
vendor/github.com/deislabs/oras/pkg/content/oci.go
generated
vendored
Normal file
169
vendor/github.com/deislabs/oras/pkg/content/oci.go
generated
vendored
Normal file
@@ -0,0 +1,169 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/content/local"
|
||||
specs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// OCIStore provides content from the file system with the OCI-Image layout.
|
||||
// Reference: https://github.com/opencontainers/image-spec/blob/master/image-layout.md
|
||||
type OCIStore struct {
|
||||
content.Store
|
||||
|
||||
root string
|
||||
index *ocispec.Index
|
||||
nameMap map[string]ocispec.Descriptor
|
||||
}
|
||||
|
||||
// NewOCIStore creates a new OCI store
|
||||
func NewOCIStore(rootPath string) (*OCIStore, error) {
|
||||
fileStore, err := local.NewStore(rootPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store := &OCIStore{
|
||||
Store: fileStore,
|
||||
root: rootPath,
|
||||
}
|
||||
if err := store.validateOCILayoutFile(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := store.LoadIndex(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return store, nil
|
||||
}
|
||||
|
||||
// LoadIndex reads the index.json from the file system
|
||||
func (s *OCIStore) LoadIndex() error {
|
||||
path := filepath.Join(s.root, OCIImageIndexFile)
|
||||
indexFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
s.index = &ocispec.Index{
|
||||
Versioned: specs.Versioned{
|
||||
SchemaVersion: 2, // historical value
|
||||
},
|
||||
}
|
||||
s.nameMap = make(map[string]ocispec.Descriptor)
|
||||
|
||||
return nil
|
||||
}
|
||||
defer indexFile.Close()
|
||||
|
||||
if err := json.NewDecoder(indexFile).Decode(&s.index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.nameMap = make(map[string]ocispec.Descriptor)
|
||||
for _, desc := range s.index.Manifests {
|
||||
if name := desc.Annotations[ocispec.AnnotationRefName]; name != "" {
|
||||
s.nameMap[name] = desc
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveIndex writes the index.json to the file system
|
||||
func (s *OCIStore) SaveIndex() error {
|
||||
indexJSON, err := json.Marshal(s.index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := filepath.Join(s.root, OCIImageIndexFile)
|
||||
return ioutil.WriteFile(path, indexJSON, 0644)
|
||||
}
|
||||
|
||||
// AddReference adds or updates an reference to index.
|
||||
func (s *OCIStore) AddReference(name string, desc ocispec.Descriptor) {
|
||||
if desc.Annotations == nil {
|
||||
desc.Annotations = map[string]string{
|
||||
ocispec.AnnotationRefName: name,
|
||||
}
|
||||
} else {
|
||||
desc.Annotations[ocispec.AnnotationRefName] = name
|
||||
}
|
||||
|
||||
if _, ok := s.nameMap[name]; ok {
|
||||
for i, ref := range s.index.Manifests {
|
||||
if name == ref.Annotations[ocispec.AnnotationRefName] {
|
||||
s.index.Manifests[i] = desc
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Process should not reach here.
|
||||
// Fallthrough to `Add` scenario and recover.
|
||||
}
|
||||
|
||||
s.index.Manifests = append(s.index.Manifests, desc)
|
||||
s.nameMap[name] = desc
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteReference deletes an reference from index.
|
||||
func (s *OCIStore) DeleteReference(name string) {
|
||||
if _, ok := s.nameMap[name]; !ok {
|
||||
return
|
||||
}
|
||||
|
||||
delete(s.nameMap, name)
|
||||
for i, desc := range s.index.Manifests {
|
||||
if name == desc.Annotations[ocispec.AnnotationRefName] {
|
||||
s.index.Manifests[i] = s.index.Manifests[len(s.index.Manifests)-1]
|
||||
s.index.Manifests = s.index.Manifests[:len(s.index.Manifests)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ListReferences lists all references in index.
|
||||
func (s *OCIStore) ListReferences() map[string]ocispec.Descriptor {
|
||||
return s.nameMap
|
||||
}
|
||||
|
||||
// validateOCILayoutFile ensures the `oci-layout` file
|
||||
func (s *OCIStore) validateOCILayoutFile() error {
|
||||
layoutFilePath := filepath.Join(s.root, ocispec.ImageLayoutFile)
|
||||
layoutFile, err := os.Open(layoutFilePath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
layout := ocispec.ImageLayout{
|
||||
Version: ocispec.ImageLayoutVersion,
|
||||
}
|
||||
layoutJSON, err := json.Marshal(layout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(layoutFilePath, layoutJSON, 0644)
|
||||
}
|
||||
defer layoutFile.Close()
|
||||
|
||||
var layout *ocispec.ImageLayout
|
||||
err = json.NewDecoder(layoutFile).Decode(&layout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if layout.Version != ocispec.ImageLayoutVersion {
|
||||
return ErrUnsupportedVersion
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
34
vendor/github.com/deislabs/oras/pkg/content/readerat.go
generated
vendored
Normal file
34
vendor/github.com/deislabs/oras/pkg/content/readerat.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
)
|
||||
|
||||
// ensure interface
|
||||
var (
|
||||
_ content.ReaderAt = sizeReaderAt{}
|
||||
)
|
||||
|
||||
type readAtCloser interface {
|
||||
io.ReaderAt
|
||||
io.Closer
|
||||
}
|
||||
|
||||
type sizeReaderAt struct {
|
||||
readAtCloser
|
||||
size int64
|
||||
}
|
||||
|
||||
func (ra sizeReaderAt) Size() int64 {
|
||||
return ra.size
|
||||
}
|
||||
|
||||
type nopCloser struct {
|
||||
io.ReaderAt
|
||||
}
|
||||
|
||||
func (nopCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
171
vendor/github.com/deislabs/oras/pkg/content/utils.go
generated
vendored
Normal file
171
vendor/github.com/deislabs/oras/pkg/content/utils.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ResolveName resolves name from descriptor
|
||||
func ResolveName(desc ocispec.Descriptor) (string, bool) {
|
||||
name, ok := desc.Annotations[ocispec.AnnotationTitle]
|
||||
return name, ok
|
||||
}
|
||||
|
||||
// tarDirectory walks the directory specified by path, and tar those files with a new
|
||||
// path prefix.
|
||||
func tarDirectory(root, prefix string, w io.Writer, stripTimes bool) error {
|
||||
tw := tar.NewWriter(w)
|
||||
defer tw.Close()
|
||||
if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rename path
|
||||
name, err := filepath.Rel(root, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name = filepath.Join(prefix, name)
|
||||
name = filepath.ToSlash(name)
|
||||
|
||||
// Generate header
|
||||
var link string
|
||||
mode := info.Mode()
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
if link, err = os.Readlink(path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
header, err := tar.FileInfoHeader(info, link)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, path)
|
||||
}
|
||||
header.Name = name
|
||||
header.Uid = 0
|
||||
header.Gid = 0
|
||||
header.Uname = ""
|
||||
header.Gname = ""
|
||||
|
||||
if stripTimes {
|
||||
header.ModTime = time.Time{}
|
||||
header.AccessTime = time.Time{}
|
||||
header.ChangeTime = time.Time{}
|
||||
}
|
||||
|
||||
// Write file
|
||||
if err := tw.WriteHeader(header); err != nil {
|
||||
return errors.Wrap(err, "tar")
|
||||
}
|
||||
if mode.IsRegular() {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
if _, err := io.Copy(tw, file); err != nil {
|
||||
return errors.Wrap(err, path)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractTarDirectory extracts tar file to a directory specified by the `root`
|
||||
// parameter. The file name prefix is ensured to be the string specified by the
|
||||
// `prefix` parameter and is trimmed.
|
||||
func extractTarDirectory(root, prefix string, r io.Reader) error {
|
||||
tr := tar.NewReader(r)
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Name check
|
||||
name := header.Name
|
||||
path, err := filepath.Rel(prefix, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.HasPrefix(path, "../") {
|
||||
return fmt.Errorf("%q does not have prefix %q", name, prefix)
|
||||
}
|
||||
path = filepath.Join(root, path)
|
||||
|
||||
// Create content
|
||||
switch header.Typeflag {
|
||||
case tar.TypeReg:
|
||||
err = writeFile(path, tr, header.FileInfo().Mode())
|
||||
case tar.TypeDir:
|
||||
err = os.MkdirAll(path, header.FileInfo().Mode())
|
||||
case tar.TypeLink:
|
||||
err = os.Link(header.Linkname, path)
|
||||
case tar.TypeSymlink:
|
||||
err = os.Symlink(header.Linkname, path)
|
||||
default:
|
||||
continue // Non-regular files are skipped
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Change access time and modification time if possible (error ignored)
|
||||
os.Chtimes(path, header.AccessTime, header.ModTime)
|
||||
}
|
||||
}
|
||||
|
||||
func writeFile(path string, r io.Reader, perm os.FileMode) error {
|
||||
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
_, err = io.Copy(file, r)
|
||||
return err
|
||||
}
|
||||
|
||||
func extractTarGzip(root, prefix, filename, checksum string) error {
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
zr, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer zr.Close()
|
||||
var r io.Reader = zr
|
||||
var verifier digest.Verifier
|
||||
if checksum != "" {
|
||||
if digest, err := digest.Parse(checksum); err == nil {
|
||||
verifier = digest.Verifier()
|
||||
r = io.TeeReader(r, verifier)
|
||||
}
|
||||
}
|
||||
if err := extractTarDirectory(root, prefix, r); err != nil {
|
||||
return err
|
||||
}
|
||||
if verifier != nil && !verifier.Verified() {
|
||||
return errors.New("content digest mismatch")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user