Upgrade k8s package verison (#5358)

* upgrade k8s package version

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>

* Script upgrade and code formatting.

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>
This commit is contained in:
hongzhouzi
2022-11-15 14:56:38 +08:00
committed by GitHub
parent 5f91c1663a
commit 44167aa47a
3106 changed files with 321340 additions and 172080 deletions

View File

@@ -0,0 +1,22 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package artifact
const (
// UnknownConfigMediaType is the default mediaType used when no
// config media type is specified.
UnknownConfigMediaType = "application/vnd.unknown.config.v1+json"
)

View File

@@ -0,0 +1,45 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"context"
"errors"
"net/http"
"github.com/containerd/containerd/remotes"
)
// Common errors
var (
ErrNotLoggedIn = errors.New("not logged in")
)
// Client provides authentication operations for remotes.
type Client interface {
// Login logs in to a remote server identified by the hostname.
// Deprecated: use LoginWithOpts
Login(ctx context.Context, hostname, username, secret string, insecure bool) error
// LoginWithOpts logs in to a remote server identified by the hostname with custom options
LoginWithOpts(options ...LoginOption) error
// Logout logs out from a remote server identified by the hostname.
Logout(ctx context.Context, hostname string) error
// Resolver returns a new authenticated resolver.
// Deprecated: use ResolverWithOpts
Resolver(ctx context.Context, client *http.Client, plainHTTP bool) (remotes.Resolver, error)
// ResolverWithOpts returns a new authenticated resolver with custom options.
ResolverWithOpts(options ...ResolverOption) (remotes.Resolver, error)
}

View File

@@ -0,0 +1,123 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"context"
"net/http"
)
type (
// LoginOption allows specifying various settings on login.
LoginOption func(*LoginSettings)
// LoginSettings represent all the various settings on login.
LoginSettings struct {
Context context.Context
Hostname string
Username string
Secret string
CertFile string
KeyFile string
CAFile string
Insecure bool
UserAgent string
}
)
// WithLoginContext returns a function that sets the Context setting on login.
func WithLoginContext(context context.Context) LoginOption {
return func(settings *LoginSettings) {
settings.Context = context
}
}
// WithLoginHostname returns a function that sets the Hostname setting on login.
func WithLoginHostname(hostname string) LoginOption {
return func(settings *LoginSettings) {
settings.Hostname = hostname
}
}
// WithLoginUsername returns a function that sets the Username setting on login.
func WithLoginUsername(username string) LoginOption {
return func(settings *LoginSettings) {
settings.Username = username
}
}
// WithLoginSecret returns a function that sets the Secret setting on login.
func WithLoginSecret(secret string) LoginOption {
return func(settings *LoginSettings) {
settings.Secret = secret
}
}
// WithLoginInsecure returns a function that sets the Insecure setting to true on login.
func WithLoginInsecure() LoginOption {
return func(settings *LoginSettings) {
settings.Insecure = true
}
}
// WithLoginTLS returns a function that sets the tls settings on login.
func WithLoginTLS(certFile, keyFile, caFile string) LoginOption {
return func(settings *LoginSettings) {
settings.CertFile = certFile
settings.KeyFile = keyFile
settings.CAFile = caFile
}
}
// WithLoginUserAgent returns a function that sets the UserAgent setting on login.
func WithLoginUserAgent(userAgent string) LoginOption {
return func(settings *LoginSettings) {
settings.UserAgent = userAgent
}
}
type (
// ResolverOption allows specifying various settings on the resolver.
ResolverOption func(*ResolverSettings)
// ResolverSettings represent all the various settings on a resolver.
ResolverSettings struct {
Client *http.Client
PlainHTTP bool
Headers http.Header
}
)
// WithResolverClient returns a function that sets the Client setting on resolver.
func WithResolverClient(client *http.Client) ResolverOption {
return func(settings *ResolverSettings) {
settings.Client = client
}
}
// WithResolverPlainHTTP returns a function that sets the PlainHTTP setting to true on resolver.
func WithResolverPlainHTTP() ResolverOption {
return func(settings *ResolverSettings) {
settings.PlainHTTP = true
}
}
// WithResolverHeaders returns a function that sets the Headers setting on resolver.
func WithResolverHeaders(headers http.Header) ResolverOption {
return func(settings *ResolverSettings) {
settings.Headers = headers
}
}

View File

@@ -0,0 +1,123 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"os"
"github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/config/configfile"
"github.com/docker/cli/cli/config/credentials"
"github.com/pkg/errors"
"oras.land/oras-go/pkg/auth"
)
// Client provides authentication operations for docker registries.
type Client struct {
configs []*configfile.ConfigFile
}
// NewClient creates a new auth client based on provided config paths.
// If not config path is provided, the default path is used.
// Credentials are read from the first config and fall backs to next.
// All changes will only be written to the first config file.
func NewClient(configPaths ...string) (auth.Client, error) {
if len(configPaths) == 0 {
cfg, err := config.Load(config.Dir())
if err != nil {
return nil, err
}
if !cfg.ContainsAuth() {
cfg.CredentialsStore = credentials.DetectDefaultStore(cfg.CredentialsStore)
}
return &Client{
configs: []*configfile.ConfigFile{cfg},
}, nil
}
var configs []*configfile.ConfigFile
for _, path := range configPaths {
cfg, err := loadConfigFile(path)
if err != nil {
return nil, errors.Wrap(err, path)
}
configs = append(configs, cfg)
}
return &Client{
configs: configs,
}, nil
}
// NewClientWithDockerFallback creates a new auth client
// which falls back on Docker's default config path.
// This allows support for ~/.docker/config.json as a fallback,
// as well as support for the DOCKER_CONFIG environment variable.
func NewClientWithDockerFallback(configPaths ...string) (auth.Client, error) {
if len(configPaths) == 0 {
return NewClient()
}
var configs []*configfile.ConfigFile
for _, path := range configPaths {
cfg, err := loadConfigFile(path)
if err != nil {
return nil, errors.Wrap(err, path)
}
configs = append(configs, cfg)
}
// Add the Docker default config last
dockerFallbackCfg, err := config.Load(config.Dir())
if err != nil {
return nil, err
}
if !dockerFallbackCfg.ContainsAuth() {
dockerFallbackCfg.CredentialsStore = credentials.DetectDefaultStore(dockerFallbackCfg.CredentialsStore)
}
configs = append(configs, dockerFallbackCfg)
return &Client{
configs: configs,
}, nil
}
func (c *Client) primaryCredentialsStore(hostname string) credentials.Store {
return c.configs[0].GetCredentialsStore(hostname)
}
// loadConfigFile reads the configuration files from the given path.
func loadConfigFile(path string) (*configfile.ConfigFile, error) {
cfg := configfile.New(path)
if _, err := os.Stat(path); err == nil {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
if err := cfg.LoadFromReader(file); err != nil {
return nil, err
}
} else if !os.IsNotExist(err) {
return nil, err
}
if !cfg.ContainsAuth() {
cfg.CredentialsStore = credentials.DetectDefaultStore(cfg.CredentialsStore)
}
return cfg, nil
}

View File

@@ -0,0 +1,103 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"context"
ctypes "github.com/docker/cli/cli/config/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/registry"
iface "oras.land/oras-go/pkg/auth"
)
const IndexHostname = "index.docker.io"
// Login logs in to a docker registry identified by the hostname.
// Deprecated: use LoginWithOpts
func (c *Client) Login(ctx context.Context, hostname, username, secret string, insecure bool) error {
settings := &iface.LoginSettings{
Context: ctx,
Hostname: hostname,
Username: username,
Secret: secret,
Insecure: insecure,
}
return c.login(settings)
}
// LoginWithOpts logs in to a docker registry identified by the hostname with custom options.
func (c *Client) LoginWithOpts(options ...iface.LoginOption) error {
settings := &iface.LoginSettings{}
for _, option := range options {
option(settings)
}
return c.login(settings)
}
func (c *Client) login(settings *iface.LoginSettings) error {
hostname := resolveHostname(settings.Hostname)
cred := types.AuthConfig{
Username: settings.Username,
ServerAddress: hostname,
}
if settings.Username == "" {
cred.IdentityToken = settings.Secret
} else {
cred.Password = settings.Secret
}
opts := registry.ServiceOptions{}
if settings.Insecure {
opts.InsecureRegistries = []string{hostname}
}
// Login to ensure valid credential
remote, err := registry.NewService(opts)
if err != nil {
return err
}
ctx := settings.Context
if ctx == nil {
ctx = context.Background()
}
userAgent := settings.UserAgent
if userAgent == "" {
userAgent = "oras"
}
var token string
if (settings.CertFile != "" && settings.KeyFile != "") || settings.CAFile != "" {
_, token, err = c.loginWithTLS(ctx, remote, settings.CertFile, settings.KeyFile, settings.CAFile, &cred, userAgent)
} else {
_, token, err = remote.Auth(ctx, &cred, userAgent)
}
if err != nil {
return err
}
if token != "" {
cred.Username = ""
cred.Password = ""
cred.IdentityToken = token
}
// Store credential
return c.primaryCredentialsStore(hostname).Store(ctypes.AuthConfig(cred))
}

View File

@@ -0,0 +1,220 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"net/url"
"strings"
"time"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/client/auth"
"github.com/docker/distribution/registry/client/transport"
"github.com/docker/docker/api/types"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/registry"
"github.com/docker/go-connections/tlsconfig"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// The following functions are adapted from github.com/docker/docker/registry
// We need these to support passing in a transport that has custom TLS configuration
// They are not exposed in the docker/registry package that's why they are copied here
type loginCredentialStore struct {
authConfig *types.AuthConfig
}
func (lcs loginCredentialStore) Basic(*url.URL) (string, string) {
return lcs.authConfig.Username, lcs.authConfig.Password
}
func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string {
return lcs.authConfig.IdentityToken
}
func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) {
lcs.authConfig.IdentityToken = token
}
// loginWithTLS tries to login to the v2 registry server.
// A custom tls.Config is used to override the default TLS configuration of the different registry endpoints.
// The tls.Config is created using the provided certificate, certificate key and certificate authority.
func (c *Client) loginWithTLS(ctx context.Context, service registry.Service, certFile, keyFile, caFile string, authConfig *types.AuthConfig, userAgent string) (string, string, error) {
tlsConfig, err := tlsconfig.Client(tlsconfig.Options{CAFile: caFile, CertFile: certFile, KeyFile: keyFile})
if err != nil {
return "", "", err
}
endpoints, err := c.getEndpoints(authConfig.ServerAddress, service)
if err != nil {
return "", "", err
}
var status, token string
for _, endpoint := range endpoints {
endpoint.TLSConfig = tlsConfig
status, token, err = loginV2(authConfig, endpoint, userAgent)
if err != nil {
if isNotAuthorizedError(err) {
return "", "", err
}
logrus.WithError(err).Infof("Error logging in to endpoint, trying next endpoint")
continue
}
return status, token, nil
}
return "", "", err
}
// getEndpoints returns the endpoints for the given hostname.
func (c *Client) getEndpoints(address string, service registry.Service) ([]registry.APIEndpoint, error) {
var registryHostName = IndexHostname
if address != "" {
if !strings.HasPrefix(address, "https://") && !strings.HasPrefix(address, "http://") {
address = fmt.Sprintf("https://%s", address)
}
u, err := url.Parse(address)
if err != nil {
return nil, errdefs.InvalidParameter(errors.Wrapf(err, "unable to parse server address"))
}
registryHostName = u.Host
}
// Lookup endpoints for authentication using "LookupPushEndpoints", which
// excludes mirrors to prevent sending credentials of the upstream registry
// to a mirror.
endpoints, err := service.LookupPushEndpoints(registryHostName)
if err != nil {
return nil, errdefs.InvalidParameter(err)
}
return endpoints, nil
}
// loginV2 tries to login to the v2 registry server. The given registry
// endpoint will be pinged to get authorization challenges. These challenges
// will be used to authenticate against the registry to validate credentials.
func loginV2(authConfig *types.AuthConfig, endpoint registry.APIEndpoint, userAgent string) (string, string, error) {
var (
endpointStr = strings.TrimRight(endpoint.URL.String(), "/") + "/v2/"
modifiers = registry.Headers(userAgent, nil)
authTransport = transport.NewTransport(newTransport(endpoint.TLSConfig), modifiers...)
credentialAuthConfig = *authConfig
creds = loginCredentialStore{authConfig: &credentialAuthConfig}
)
logrus.Debugf("attempting v2 login to registry endpoint %s", endpointStr)
loginClient, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil)
if err != nil {
return "", "", err
}
req, err := http.NewRequest(http.MethodGet, endpointStr, nil)
if err != nil {
return "", "", err
}
resp, err := loginClient.Do(req)
if err != nil {
err = translateV2AuthError(err)
return "", "", err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return "Login Succeeded", credentialAuthConfig.IdentityToken, nil
}
// TODO(dmcgowan): Attempt to further interpret result, status code and error code string
return "", "", errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode))
}
// newTransport returns a new HTTP transport. If tlsConfig is nil, it uses the
// default TLS configuration.
func newTransport(tlsConfig *tls.Config) *http.Transport {
if tlsConfig == nil {
tlsConfig = tlsconfig.ServerDefault()
}
direct := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: direct.DialContext,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig,
// TODO(dmcgowan): Call close idle connections when complete and use keep alive
DisableKeepAlives: true,
}
}
func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, error) {
challengeManager, _, err := registry.PingV2Registry(endpoint, authTransport)
if err != nil {
return nil, err
}
tokenHandlerOptions := auth.TokenHandlerOptions{
Transport: authTransport,
Credentials: creds,
OfflineAccess: true,
ClientID: registry.AuthClientID,
Scopes: scopes,
}
tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions)
basicHandler := auth.NewBasicHandler(creds)
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
return &http.Client{
Transport: transport.NewTransport(authTransport, modifiers...),
Timeout: 15 * time.Second,
}, nil
}
func translateV2AuthError(err error) error {
switch e := err.(type) {
case *url.Error:
switch e2 := e.Err.(type) {
case errcode.Error:
switch e2.Code {
case errcode.ErrorCodeUnauthorized:
return errdefs.Unauthorized(err)
}
}
}
return err
}
func isNotAuthorizedError(err error) bool {
return strings.Contains(err.Error(), "401 Unauthorized")
}

View File

@@ -0,0 +1,42 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"context"
"github.com/docker/cli/cli/config/configfile"
"oras.land/oras-go/pkg/auth"
)
// Logout logs out from a docker registry identified by the hostname.
func (c *Client) Logout(_ context.Context, hostname string) error {
hostname = resolveHostname(hostname)
var configs []*configfile.ConfigFile
for _, config := range c.configs {
if _, ok := config.AuthConfigs[hostname]; ok {
configs = append(configs, config)
}
}
if len(configs) == 0 {
return auth.ErrNotLoggedIn
}
// Log out form the primary config only as backups are read-only.
return c.primaryCredentialsStore(hostname).Erase(hostname)
}

View File

@@ -0,0 +1,86 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"context"
"net/http"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
ctypes "github.com/docker/cli/cli/config/types"
"github.com/docker/docker/registry"
iface "oras.land/oras-go/pkg/auth"
)
// Resolver returns a new authenticated resolver.
// Deprecated: use ResolverWithOpts
func (c *Client) Resolver(_ context.Context, client *http.Client, plainHTTP bool) (remotes.Resolver, error) {
return docker.NewResolver(docker.ResolverOptions{
Credentials: c.Credential,
Client: client,
PlainHTTP: plainHTTP,
}), nil
}
// ResolverWithOpts returns a new authenticated resolver with custom options.
func (c *Client) ResolverWithOpts(options ...iface.ResolverOption) (remotes.Resolver, error) {
settings := &iface.ResolverSettings{}
for _, option := range options {
option(settings)
}
return docker.NewResolver(docker.ResolverOptions{
Credentials: c.Credential,
Client: settings.Client,
PlainHTTP: settings.PlainHTTP,
Headers: settings.Headers,
}), nil
}
// Credential returns the login credential of the request host.
func (c *Client) Credential(hostname string) (string, string, error) {
hostname = resolveHostname(hostname)
var (
auth ctypes.AuthConfig
err error
)
for _, cfg := range c.configs {
auth, err = cfg.GetAuthConfig(hostname)
if err != nil {
// fall back to next config
continue
}
if auth.IdentityToken != "" {
return "", auth.IdentityToken, nil
}
if auth.Username == "" && auth.Password == "" {
// fall back to next config
continue
}
return auth.Username, auth.Password, nil
}
return "", "", err
}
// resolveHostname resolves Docker specific hostnames
func resolveHostname(hostname string) string {
switch hostname {
case registry.IndexHostname, registry.IndexName, registry.DefaultV2Registry.Host:
return registry.IndexServer
}
return hostname
}

View File

@@ -0,0 +1,57 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
const (
// DefaultBlobMediaType specifies the default blob media type
DefaultBlobMediaType = ocispec.MediaTypeImageLayer
// DefaultBlobDirMediaType specifies the default blob directory media type
DefaultBlobDirMediaType = ocispec.MediaTypeImageLayerGzip
)
const (
// TempFilePattern specifies the pattern to create temporary files
TempFilePattern = "oras"
)
const (
// AnnotationDigest is the annotation key for the digest of the uncompressed content
AnnotationDigest = "io.deis.oras.content.digest"
// AnnotationUnpack is the annotation key for indication of unpacking
AnnotationUnpack = "io.deis.oras.content.unpack"
)
const (
// OCIImageIndexFile is the file name of the index from the OCI Image Layout Specification
// Reference: https://github.com/opencontainers/image-spec/blob/master/image-layout.md#indexjson-file
OCIImageIndexFile = "index.json"
)
const (
// DefaultBlocksize default size of each slice of bytes read in each write through in gunzipand untar.
// Simply uses the same size as io.Copy()
DefaultBlocksize = 32768
)
const (
// what you get for a blank digest
BlankHash = digest.Digest("sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
)

View File

@@ -0,0 +1,151 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"context"
"errors"
"strings"
ctrcontent "github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// Decompress store to decompress content and extract from tar, if needed, wrapping
// another store. By default, a FileStore will simply take each artifact and write it to
// a file, as a MemoryStore will do into memory. If the artifact is gzipped or tarred,
// you might want to store the actual object inside tar or gzip. Wrap your Store
// with Decompress, and it will check the media-type and, if relevant,
// gunzip and/or untar.
//
// For example:
//
// fileStore := NewFileStore(rootPath)
// Decompress := store.NewDecompress(fileStore, WithBlocksize(blocksize))
//
// The above example works if there is no tar, i.e. each artifact is just a single file, perhaps gzipped,
// or if there is only one file in each tar archive. In other words, when each content.Writer has only one target output stream.
// However, if you have multiple files in each tar archive, each archive of which is an artifact layer, then
// you need a way to select how to handle each file in the tar archive. In other words, when each content.Writer has more than one
// target output stream. In that case, use the following example:
//
// multiStore := NewMultiStore(rootPath) // some store that can handle different filenames
// Decompress := store.NewDecompress(multiStore, WithBlocksize(blocksize), WithMultiWriterIngester())
//
type Decompress struct {
pusher remotes.Pusher
blocksize int
multiWriterIngester bool
}
func NewDecompress(pusher remotes.Pusher, opts ...WriterOpt) Decompress {
// we have to reprocess the opts to find the blocksize
var wOpts WriterOpts
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
// TODO: we probably should handle errors here
continue
}
}
return Decompress{pusher, wOpts.Blocksize, wOpts.MultiWriterIngester}
}
// Push get a content.Writer
func (d Decompress) Push(ctx context.Context, desc ocispec.Descriptor) (ctrcontent.Writer, error) {
// the logic is straightforward:
// - if there is a desc in the opts, and the mediatype is tar or tar+gzip, then pass the correct decompress writer
// - else, pass the regular writer
var (
writer ctrcontent.Writer
err error
multiIngester MultiWriterPusher
ok bool
)
// check to see if we are supposed to use a MultiWriterIngester
if d.multiWriterIngester {
multiIngester, ok = d.pusher.(MultiWriterPusher)
if !ok {
return nil, errors.New("configured to use multiwriter ingester, but ingester does not implement multiwriter")
}
}
// figure out if compression and/or archive exists
// before we pass it down, we need to strip anything we are removing here
// and possibly update the digest, since the store indexes things by digest
hasGzip, hasTar, modifiedMediaType := checkCompression(desc.MediaType)
desc.MediaType = modifiedMediaType
// determine if we pass it blocksize, only if positive
writerOpts := []WriterOpt{}
if d.blocksize > 0 {
writerOpts = append(writerOpts, WithBlocksize(d.blocksize))
}
writer, err = d.pusher.Push(ctx, desc)
if err != nil {
return nil, err
}
// do we need to wrap with an untar writer?
if hasTar {
// if not multiingester, get a regular writer
if multiIngester == nil {
writer = NewUntarWriter(writer, writerOpts...)
} else {
writers, err := multiIngester.Pushers(ctx, desc)
if err != nil {
return nil, err
}
writer = NewUntarWriterByName(writers, writerOpts...)
}
}
if hasGzip {
if writer == nil {
writer, err = d.pusher.Push(ctx, desc)
if err != nil {
return nil, err
}
}
writer = NewGunzipWriter(writer, writerOpts...)
}
return writer, nil
}
// checkCompression check if the mediatype uses gzip compression or tar.
// Returns if it has gzip and/or tar, as well as the base media type without
// those suffixes.
func checkCompression(mediaType string) (gzip, tar bool, mt string) {
mt = mediaType
gzipSuffix := "+gzip"
gzipAltSuffix := ".gzip"
tarSuffix := ".tar"
switch {
case strings.HasSuffix(mt, gzipSuffix):
mt = mt[:len(mt)-len(gzipSuffix)]
gzip = true
case strings.HasSuffix(mt, gzipAltSuffix):
mt = mt[:len(mt)-len(gzipAltSuffix)]
gzip = true
}
if strings.HasSuffix(mt, tarSuffix) {
mt = mt[:len(mt)-len(tarSuffix)]
tar = true
}
return
}

View File

@@ -0,0 +1,33 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import "errors"
// Common errors
var (
ErrNotFound = errors.New("not_found")
ErrNoName = errors.New("no_name")
ErrUnsupportedSize = errors.New("unsupported_size")
ErrUnsupportedVersion = errors.New("unsupported_version")
ErrInvalidReference = errors.New("invalid_reference")
)
// FileStore errors
var (
ErrPathTraversalDisallowed = errors.New("path_traversal_disallowed")
ErrOverwriteDisallowed = errors.New("overwrite_disallowed")
)

View File

@@ -0,0 +1,534 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"bytes"
"compress/gzip"
"context"
_ "crypto/sha256"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/remotes"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// File provides content via files from the file system
type File struct {
DisableOverwrite bool
AllowPathTraversalOnWrite bool
// Reproducible enables stripping times from added files
Reproducible bool
root string
descriptor *sync.Map // map[digest.Digest]ocispec.Descriptor
pathMap *sync.Map // map[name string](file string)
memoryMap *sync.Map // map[digest.Digest]([]byte)
refMap *sync.Map // map[string]ocispec.Descriptor
tmpFiles *sync.Map
ignoreNoName bool
}
// NewFile creats a new file target. It represents a single root reference and all of its components.
func NewFile(rootPath string, opts ...WriterOpt) *File {
// we have to process the opts to find if they told us to change defaults
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
continue
}
}
return &File{
root: rootPath,
descriptor: &sync.Map{},
pathMap: &sync.Map{},
memoryMap: &sync.Map{},
refMap: &sync.Map{},
tmpFiles: &sync.Map{},
ignoreNoName: wOpts.IgnoreNoName,
}
}
func (s *File) Resolver() remotes.Resolver {
return s
}
func (s *File) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
desc, ok := s.getRef(ref)
if !ok {
return "", ocispec.Descriptor{}, fmt.Errorf("unknown reference: %s", ref)
}
return ref, desc, nil
}
func (s *File) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
if _, ok := s.refMap.Load(ref); !ok {
return nil, fmt.Errorf("unknown reference: %s", ref)
}
return s, nil
}
// Fetch get an io.ReadCloser for the specific content
func (s *File) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
// first see if it is in the in-memory manifest map
manifest, ok := s.getMemory(desc)
if ok {
return ioutil.NopCloser(bytes.NewReader(manifest)), nil
}
desc, ok = s.get(desc)
if !ok {
return nil, ErrNotFound
}
name, ok := ResolveName(desc)
if !ok {
return nil, ErrNoName
}
path := s.ResolvePath(name)
return os.Open(path)
}
func (s *File) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
var tag, hash string
parts := strings.SplitN(ref, "@", 2)
if len(parts) > 0 {
tag = parts[0]
}
if len(parts) > 1 {
hash = parts[1]
}
return &filePusher{
store: s,
ref: tag,
hash: hash,
}, nil
}
type filePusher struct {
store *File
ref string
hash string
}
func (s *filePusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
name, ok := ResolveName(desc)
now := time.Now()
if !ok {
// if we were not told to ignore NoName, then return an error
if !s.store.ignoreNoName {
return nil, ErrNoName
}
// just return a nil writer - we do not want to calculate the hash, so just use
// whatever was passed in the descriptor
return NewIoContentWriter(ioutil.Discard, WithOutputHash(desc.Digest)), nil
}
path, err := s.store.resolveWritePath(name)
if err != nil {
return nil, err
}
file, afterCommit, err := s.store.createWritePath(path, desc, name)
if err != nil {
return nil, err
}
return &fileWriter{
store: s.store,
file: file,
desc: desc,
digester: digest.Canonical.Digester(),
status: content.Status{
Ref: name,
Total: desc.Size,
StartedAt: now,
UpdatedAt: now,
},
afterCommit: afterCommit,
}, nil
}
// Add adds a file reference from a path, either directory or single file,
// and returns the reference descriptor.
func (s *File) Add(name, mediaType, path string) (ocispec.Descriptor, error) {
if path == "" {
path = name
}
path = s.MapPath(name, path)
fileInfo, err := os.Stat(path)
if err != nil {
return ocispec.Descriptor{}, err
}
var desc ocispec.Descriptor
if fileInfo.IsDir() {
desc, err = s.descFromDir(name, mediaType, path)
} else {
desc, err = s.descFromFile(fileInfo, mediaType, path)
}
if err != nil {
return ocispec.Descriptor{}, err
}
if desc.Annotations == nil {
desc.Annotations = make(map[string]string)
}
desc.Annotations[ocispec.AnnotationTitle] = name
s.set(desc)
return desc, nil
}
// Load is a lower-level memory-only version of Add. Rather than taking a path,
// generating a descriptor and creating a reference, it takes raw data and a descriptor
// that describes that data and stores it in memory. It will disappear at process
// termination.
//
// It is especially useful for adding ephemeral data, such as config, that must
// exist in order to walk a manifest.
func (s *File) Load(desc ocispec.Descriptor, data []byte) error {
s.memoryMap.Store(desc.Digest, data)
return nil
}
// Ref gets a reference's descriptor and content
func (s *File) Ref(ref string) (ocispec.Descriptor, []byte, error) {
desc, ok := s.getRef(ref)
if !ok {
return ocispec.Descriptor{}, nil, ErrNotFound
}
// first see if it is in the in-memory manifest map
manifest, ok := s.getMemory(desc)
if !ok {
return ocispec.Descriptor{}, nil, ErrNotFound
}
return desc, manifest, nil
}
func (s *File) descFromFile(info os.FileInfo, mediaType, path string) (ocispec.Descriptor, error) {
file, err := os.Open(path)
if err != nil {
return ocispec.Descriptor{}, err
}
defer file.Close()
digest, err := digest.FromReader(file)
if err != nil {
return ocispec.Descriptor{}, err
}
if mediaType == "" {
mediaType = DefaultBlobMediaType
}
return ocispec.Descriptor{
MediaType: mediaType,
Digest: digest,
Size: info.Size(),
}, nil
}
func (s *File) descFromDir(name, mediaType, root string) (ocispec.Descriptor, error) {
// generate temp file
file, err := s.tempFile()
if err != nil {
return ocispec.Descriptor{}, err
}
defer file.Close()
s.MapPath(name, file.Name())
// compress directory
digester := digest.Canonical.Digester()
zw := gzip.NewWriter(io.MultiWriter(file, digester.Hash()))
defer zw.Close()
tarDigester := digest.Canonical.Digester()
if err := tarDirectory(root, name, io.MultiWriter(zw, tarDigester.Hash()), s.Reproducible); err != nil {
return ocispec.Descriptor{}, err
}
// flush all
if err := zw.Close(); err != nil {
return ocispec.Descriptor{}, err
}
if err := file.Sync(); err != nil {
return ocispec.Descriptor{}, err
}
// generate descriptor
if mediaType == "" {
mediaType = DefaultBlobDirMediaType
}
info, err := file.Stat()
if err != nil {
return ocispec.Descriptor{}, err
}
return ocispec.Descriptor{
MediaType: mediaType,
Digest: digester.Digest(),
Size: info.Size(),
Annotations: map[string]string{
AnnotationDigest: tarDigester.Digest().String(),
AnnotationUnpack: "true",
},
}, nil
}
func (s *File) tempFile() (*os.File, error) {
file, err := ioutil.TempFile("", TempFilePattern)
if err != nil {
return nil, err
}
s.tmpFiles.Store(file.Name(), file)
return file, nil
}
// Close frees up resources used by the file store
func (s *File) Close() error {
var errs []string
s.tmpFiles.Range(func(name, _ interface{}) bool {
if err := os.Remove(name.(string)); err != nil {
errs = append(errs, err.Error())
}
return true
})
if len(errs) > 0 {
return errors.New(strings.Join(errs, "; "))
}
return nil
}
func (s *File) resolveWritePath(name string) (string, error) {
path := s.ResolvePath(name)
if !s.AllowPathTraversalOnWrite {
base, err := filepath.Abs(s.root)
if err != nil {
return "", err
}
target, err := filepath.Abs(path)
if err != nil {
return "", err
}
rel, err := filepath.Rel(base, target)
if err != nil {
return "", ErrPathTraversalDisallowed
}
rel = filepath.ToSlash(rel)
if strings.HasPrefix(rel, "../") || rel == ".." {
return "", ErrPathTraversalDisallowed
}
}
if s.DisableOverwrite {
if _, err := os.Stat(path); err == nil {
return "", ErrOverwriteDisallowed
} else if !os.IsNotExist(err) {
return "", err
}
}
return path, nil
}
func (s *File) createWritePath(path string, desc ocispec.Descriptor, prefix string) (*os.File, func() error, error) {
if value, ok := desc.Annotations[AnnotationUnpack]; !ok || value != "true" {
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return nil, nil, err
}
file, err := os.Create(path)
return file, nil, err
}
if err := os.MkdirAll(path, 0755); err != nil {
return nil, nil, err
}
file, err := s.tempFile()
checksum := desc.Annotations[AnnotationDigest]
afterCommit := func() error {
return extractTarGzip(path, prefix, file.Name(), checksum)
}
return file, afterCommit, err
}
// MapPath maps name to path
func (s *File) MapPath(name, path string) string {
path = s.resolvePath(path)
s.pathMap.Store(name, path)
return path
}
// ResolvePath returns the path by name
func (s *File) ResolvePath(name string) string {
if value, ok := s.pathMap.Load(name); ok {
if path, ok := value.(string); ok {
return path
}
}
// using the name as a fallback solution
return s.resolvePath(name)
}
func (s *File) resolvePath(path string) string {
if filepath.IsAbs(path) {
return path
}
return filepath.Join(s.root, path)
}
func (s *File) set(desc ocispec.Descriptor) {
s.descriptor.Store(desc.Digest, desc)
}
func (s *File) get(desc ocispec.Descriptor) (ocispec.Descriptor, bool) {
value, ok := s.descriptor.Load(desc.Digest)
if !ok {
return ocispec.Descriptor{}, false
}
desc, ok = value.(ocispec.Descriptor)
return desc, ok
}
func (s *File) getMemory(desc ocispec.Descriptor) ([]byte, bool) {
value, ok := s.memoryMap.Load(desc.Digest)
if !ok {
return nil, false
}
content, ok := value.([]byte)
return content, ok
}
func (s *File) getRef(ref string) (ocispec.Descriptor, bool) {
value, ok := s.refMap.Load(ref)
if !ok {
return ocispec.Descriptor{}, false
}
desc, ok := value.(ocispec.Descriptor)
return desc, ok
}
// StoreManifest stores a manifest linked to by the provided ref. The children of the
// manifest, such as layers and config, should already exist in the file store, either
// as files linked via Add(), or via Load(). If they do not exist, then a typical
// Fetcher that walks the manifest will hit an unresolved hash.
//
// StoreManifest does *not* validate their presence.
func (s *File) StoreManifest(ref string, desc ocispec.Descriptor, manifest []byte) error {
s.refMap.Store(ref, desc)
s.memoryMap.Store(desc.Digest, manifest)
return nil
}
type fileWriter struct {
store *File
file *os.File
desc ocispec.Descriptor
digester digest.Digester
status content.Status
afterCommit func() error
}
func (w *fileWriter) Status() (content.Status, error) {
return w.status, nil
}
// Digest returns the current digest of the content, up to the current write.
//
// Cannot be called concurrently with `Write`.
func (w *fileWriter) Digest() digest.Digest {
return w.digester.Digest()
}
// Write p to the transaction.
func (w *fileWriter) Write(p []byte) (n int, err error) {
n, err = w.file.Write(p)
w.digester.Hash().Write(p[:n])
w.status.Offset += int64(len(p))
w.status.UpdatedAt = time.Now()
return n, err
}
func (w *fileWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
var base content.Info
for _, opt := range opts {
if err := opt(&base); err != nil {
return err
}
}
if w.file == nil {
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
}
file := w.file
w.file = nil
if err := file.Sync(); err != nil {
file.Close()
return errors.Wrap(err, "sync failed")
}
fileInfo, err := file.Stat()
if err != nil {
file.Close()
return errors.Wrap(err, "stat failed")
}
if err := file.Close(); err != nil {
return errors.Wrap(err, "failed to close file")
}
if size > 0 && size != fileInfo.Size() {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fileInfo.Size(), size)
}
if dgst := w.digester.Digest(); expected != "" && expected != dgst {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected)
}
w.store.set(w.desc)
if w.afterCommit != nil {
return w.afterCommit()
}
return nil
}
// Close the writer, flushing any unwritten data and leaving the progress in
// tact.
func (w *fileWriter) Close() error {
if w.file == nil {
return nil
}
w.file.Sync()
err := w.file.Close()
w.file = nil
return err
}
func (w *fileWriter) Truncate(size int64) error {
if size != 0 {
return ErrUnsupportedSize
}
w.status.Offset = 0
w.digester.Hash().Reset()
if _, err := w.file.Seek(0, io.SeekStart); err != nil {
return err
}
return w.file.Truncate(0)
}

View File

@@ -0,0 +1,72 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"compress/gzip"
"fmt"
"io"
"github.com/containerd/containerd/content"
)
// NewGunzipWriter wrap a writer with a gunzip, so that the stream is gunzipped
//
// By default, it calculates the hash when writing. If the option `skipHash` is true,
// it will skip doing the hash. Skipping the hash is intended to be used only
// if you are confident about the validity of the data being passed to the writer,
// and wish to save on the hashing time.
func NewGunzipWriter(writer content.Writer, opts ...WriterOpt) content.Writer {
// process opts for default
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil
}
}
return NewPassthroughWriter(writer, func(r io.Reader, w io.Writer, done chan<- error) {
gr, err := gzip.NewReader(r)
if err != nil {
done <- fmt.Errorf("error creating gzip reader: %v", err)
return
}
// write out the uncompressed data
b := make([]byte, wOpts.Blocksize, wOpts.Blocksize)
for {
var n int
n, err = gr.Read(b)
if err != nil && err != io.EOF {
err = fmt.Errorf("GunzipWriter data read error: %v\n", err)
break
}
l := n
if n > len(b) {
l = len(b)
}
if _, err2 := w.Write(b[:l]); err2 != nil {
err = fmt.Errorf("GunzipWriter: error writing to underlying writer: %v", err2)
break
}
if err == io.EOF {
// clear the error
err = nil
break
}
}
gr.Close()
done <- err
}, opts...)
}

View File

@@ -0,0 +1,26 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"github.com/containerd/containerd/remotes"
)
// ProvideIngester is the interface that groups the basic Read and Write methods.
type Store interface {
remotes.Pusher
remotes.Fetcher
}

View File

@@ -0,0 +1,112 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"context"
"io"
"io/ioutil"
"github.com/containerd/containerd/content"
"github.com/opencontainers/go-digest"
)
// IoContentWriter writer that wraps an io.Writer, so the results can be streamed to
// an open io.Writer. For example, can be used to pull a layer and write it to a file, or device.
type IoContentWriter struct {
writer io.Writer
digester digest.Digester
size int64
hash *digest.Digest
}
// NewIoContentWriter create a new IoContentWriter.
//
// By default, it calculates the hash when writing. If the option `skipHash` is true,
// it will skip doing the hash. Skipping the hash is intended to be used only
// if you are confident about the validity of the data being passed to the writer,
// and wish to save on the hashing time.
func NewIoContentWriter(writer io.Writer, opts ...WriterOpt) content.Writer {
w := writer
if w == nil {
w = ioutil.Discard
}
// process opts for default
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil
}
}
ioc := &IoContentWriter{
writer: w,
digester: digest.Canonical.Digester(),
// we take the OutputHash, since the InputHash goes to the passthrough writer,
// which then passes the processed output to us
hash: wOpts.OutputHash,
}
return NewPassthroughWriter(ioc, func(r io.Reader, w io.Writer, done chan<- error) {
// write out the data to the io writer
var (
err error
)
// we could use io.Copy, but calling it with the default blocksize is identical to
// io.CopyBuffer. Otherwise, we would need some way to let the user flag "I want to use
// io.Copy", when it should not matter to them
b := make([]byte, wOpts.Blocksize, wOpts.Blocksize)
_, err = io.CopyBuffer(w, r, b)
done <- err
}, opts...)
}
func (w *IoContentWriter) Write(p []byte) (n int, err error) {
n, err = w.writer.Write(p)
if err != nil {
return 0, err
}
w.size += int64(n)
if w.hash == nil {
w.digester.Hash().Write(p[:n])
}
return
}
func (w *IoContentWriter) Close() error {
return nil
}
// Digest may return empty digest or panics until committed.
func (w *IoContentWriter) Digest() digest.Digest {
return w.digester.Digest()
}
// Commit commits the blob (but no roll-back is guaranteed on an error).
// size and expected can be zero-value when unknown.
// Commit always closes the writer, even on error.
// ErrAlreadyExists aborts the writer.
func (w *IoContentWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
return nil
}
// Status returns the current state of write
func (w *IoContentWriter) Status() (content.Status, error) {
return content.Status{}, nil
}
// Truncate updates the size of the target blob
func (w *IoContentWriter) Truncate(size int64) error {
return nil
}

View File

@@ -0,0 +1,95 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"encoding/json"
"sort"
"github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
artifact "oras.land/oras-go/pkg/artifact"
)
// GenerateManifest generates a manifest. The manifest will include the provided config,
// and descs as layers. Raw bytes will be returned.
func GenerateManifest(config *ocispec.Descriptor, annotations map[string]string, descs ...ocispec.Descriptor) ([]byte, ocispec.Descriptor, error) {
// Config - either it was set, or we have to set it
if config == nil {
_, configGen, err := GenerateConfig(nil)
if err != nil {
return nil, ocispec.Descriptor{}, err
}
config = &configGen
}
return pack(*config, annotations, descs)
}
// GenerateConfig generates a blank config with optional annotations.
func GenerateConfig(annotations map[string]string) ([]byte, ocispec.Descriptor, error) {
configBytes := []byte("{}")
dig := digest.FromBytes(configBytes)
config := ocispec.Descriptor{
MediaType: artifact.UnknownConfigMediaType,
Digest: dig,
Size: int64(len(configBytes)),
Annotations: annotations,
}
return configBytes, config, nil
}
// GenerateManifestAndConfig generates a config and then a manifest. Raw bytes will be returned.
func GenerateManifestAndConfig(manifestAnnotations map[string]string, configAnnotations map[string]string, descs ...ocispec.Descriptor) (manifest []byte, manifestDesc ocispec.Descriptor, config []byte, configDesc ocispec.Descriptor, err error) {
config, configDesc, err = GenerateConfig(configAnnotations)
if err != nil {
return nil, ocispec.Descriptor{}, nil, ocispec.Descriptor{}, err
}
manifest, manifestDesc, err = GenerateManifest(&configDesc, manifestAnnotations, descs...)
if err != nil {
return nil, ocispec.Descriptor{}, nil, ocispec.Descriptor{}, err
}
return
}
// pack given a bunch of descriptors, create a manifest that references all of them
func pack(config ocispec.Descriptor, annotations map[string]string, descriptors []ocispec.Descriptor) ([]byte, ocispec.Descriptor, error) {
if descriptors == nil {
descriptors = []ocispec.Descriptor{} // make it an empty array to prevent potential server-side bugs
}
// sort descriptors alphanumerically by sha hash so it always is consistent
sort.Slice(descriptors, func(i, j int) bool {
return descriptors[i].Digest < descriptors[j].Digest
})
manifest := ocispec.Manifest{
Versioned: specs.Versioned{
SchemaVersion: 2, // historical value. does not pertain to OCI or docker version
},
Config: config,
Layers: descriptors,
Annotations: annotations,
}
manifestBytes, err := json.Marshal(manifest)
if err != nil {
return nil, ocispec.Descriptor{}, err
}
manifestDescriptor := ocispec.Descriptor{
MediaType: ocispec.MediaTypeImageManifest,
Digest: digest.FromBytes(manifestBytes),
Size: int64(len(manifestBytes)),
}
return manifestBytes, manifestDescriptor, nil
}

View File

@@ -0,0 +1,284 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"strings"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/remotes"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// Memory provides content from the memory
type Memory struct {
descriptor map[digest.Digest]ocispec.Descriptor
content map[digest.Digest][]byte
nameMap map[string]ocispec.Descriptor
refMap map[string]ocispec.Descriptor
lock *sync.Mutex
}
// NewMemory creats a new memory store
func NewMemory() *Memory {
return &Memory{
descriptor: make(map[digest.Digest]ocispec.Descriptor),
content: make(map[digest.Digest][]byte),
nameMap: make(map[string]ocispec.Descriptor),
refMap: make(map[string]ocispec.Descriptor),
lock: &sync.Mutex{},
}
}
func (s *Memory) Resolver() remotes.Resolver {
return s
}
func (s *Memory) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
desc, ok := s.refMap[ref]
if !ok {
return "", ocispec.Descriptor{}, fmt.Errorf("unknown reference: %s", ref)
}
return ref, desc, nil
}
func (s *Memory) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
if _, ok := s.refMap[ref]; !ok {
return nil, fmt.Errorf("unknown reference: %s", ref)
}
return s, nil
}
// Fetch get an io.ReadCloser for the specific content
func (s *Memory) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
_, content, ok := s.Get(desc)
if !ok {
return nil, ErrNotFound
}
return ioutil.NopCloser(bytes.NewReader(content)), nil
}
func (s *Memory) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
var tag, hash string
parts := strings.SplitN(ref, "@", 2)
if len(parts) > 0 {
tag = parts[0]
}
if len(parts) > 1 {
hash = parts[1]
}
return &memoryPusher{
store: s,
ref: tag,
hash: hash,
}, nil
}
type memoryPusher struct {
store *Memory
ref string
hash string
}
func (s *memoryPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
name, _ := ResolveName(desc)
now := time.Now()
// is this the root?
if desc.Digest.String() == s.hash {
s.store.refMap[s.ref] = desc
}
return &memoryWriter{
store: s.store,
buffer: bytes.NewBuffer(nil),
desc: desc,
digester: digest.Canonical.Digester(),
status: content.Status{
Ref: name,
Total: desc.Size,
StartedAt: now,
UpdatedAt: now,
},
}, nil
}
// Add adds content, generating a descriptor and returning it.
func (s *Memory) Add(name, mediaType string, content []byte) (ocispec.Descriptor, error) {
var annotations map[string]string
if name != "" {
annotations = map[string]string{
ocispec.AnnotationTitle: name,
}
}
if mediaType == "" {
mediaType = DefaultBlobMediaType
}
desc := ocispec.Descriptor{
MediaType: mediaType,
Digest: digest.FromBytes(content),
Size: int64(len(content)),
Annotations: annotations,
}
s.Set(desc, content)
return desc, nil
}
// Set adds the content to the store
func (s *Memory) Set(desc ocispec.Descriptor, content []byte) {
s.lock.Lock()
defer s.lock.Unlock()
s.descriptor[desc.Digest] = desc
s.content[desc.Digest] = content
if name, ok := ResolveName(desc); ok && name != "" {
s.nameMap[name] = desc
}
}
// Get finds the content from the store
func (s *Memory) Get(desc ocispec.Descriptor) (ocispec.Descriptor, []byte, bool) {
s.lock.Lock()
defer s.lock.Unlock()
desc, ok := s.descriptor[desc.Digest]
if !ok {
return ocispec.Descriptor{}, nil, false
}
content, ok := s.content[desc.Digest]
return desc, content, ok
}
// GetByName finds the content from the store by name (i.e. AnnotationTitle)
func (s *Memory) GetByName(name string) (ocispec.Descriptor, []byte, bool) {
s.lock.Lock()
defer s.lock.Unlock()
desc, ok := s.nameMap[name]
if !ok {
return ocispec.Descriptor{}, nil, false
}
content, ok := s.content[desc.Digest]
return desc, content, ok
}
// StoreManifest stores a manifest linked to by the provided ref. The children of the
// manifest, such as layers and config, should already exist in the file store, either
// as files linked via Add(), or via Set(). If they do not exist, then a typical
// Fetcher that walks the manifest will hit an unresolved hash.
//
// StoreManifest does *not* validate their presence.
func (s *Memory) StoreManifest(ref string, desc ocispec.Descriptor, manifest []byte) error {
s.refMap[ref] = desc
s.Add("", desc.MediaType, manifest)
return nil
}
func descFromBytes(b []byte, mediaType string) (ocispec.Descriptor, error) {
digest, err := digest.FromReader(bytes.NewReader(b))
if err != nil {
return ocispec.Descriptor{}, err
}
if mediaType == "" {
mediaType = DefaultBlobMediaType
}
return ocispec.Descriptor{
MediaType: mediaType,
Digest: digest,
Size: int64(len(b)),
}, nil
}
type memoryWriter struct {
store *Memory
buffer *bytes.Buffer
desc ocispec.Descriptor
digester digest.Digester
status content.Status
}
func (w *memoryWriter) Status() (content.Status, error) {
return w.status, nil
}
// Digest returns the current digest of the content, up to the current write.
//
// Cannot be called concurrently with `Write`.
func (w *memoryWriter) Digest() digest.Digest {
return w.digester.Digest()
}
// Write p to the transaction.
func (w *memoryWriter) Write(p []byte) (n int, err error) {
n, err = w.buffer.Write(p)
w.digester.Hash().Write(p[:n])
w.status.Offset += int64(len(p))
w.status.UpdatedAt = time.Now()
return n, err
}
func (w *memoryWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
var base content.Info
for _, opt := range opts {
if err := opt(&base); err != nil {
return err
}
}
if w.buffer == nil {
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
}
content := w.buffer.Bytes()
w.buffer = nil
if size > 0 && size != int64(len(content)) {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", len(content), size)
}
if dgst := w.digester.Digest(); expected != "" && expected != dgst {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected)
}
w.store.Set(w.desc, content)
return nil
}
func (w *memoryWriter) Close() error {
w.buffer = nil
return nil
}
func (w *memoryWriter) Truncate(size int64) error {
if size != 0 {
return ErrUnsupportedSize
}
w.status.Offset = 0
w.digester.Hash().Reset()
w.buffer.Truncate(0)
return nil
}

View File

@@ -0,0 +1,56 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"context"
"fmt"
"io"
"github.com/containerd/containerd/remotes"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// MultiReader store to read content from multiple stores. It finds the content by asking each underlying
// store to find the content, which it does based on the hash.
//
// Example:
// fileStore := NewFileStore(rootPath)
// memoryStore := NewMemoryStore()
// // load up content in fileStore and memoryStore
// multiStore := MultiReader([]content.Provider{fileStore, memoryStore})
//
// You now can use multiStore anywhere that content.Provider is accepted
type MultiReader struct {
stores []remotes.Fetcher
}
// AddStore add a store to read from
func (m *MultiReader) AddStore(store ...remotes.Fetcher) {
m.stores = append(m.stores, store...)
}
// ReaderAt get a reader
func (m MultiReader) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
for _, store := range m.stores {
r, err := store.Fetch(ctx, desc)
if r != nil && err == nil {
return r, nil
}
}
// we did not find any
return nil, fmt.Errorf("not found")
}

View File

@@ -0,0 +1,42 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"context"
ctrcontent "github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// MultiWriterIngester an ingester that can provide a single writer or multiple writers for a single
// descriptor. Useful when the target of a descriptor can have multiple items within it, e.g. a layer
// that is a tar file with multiple files, each of which should go to a different stream, some of which
// should not be handled at all.
type MultiWriterIngester interface {
ctrcontent.Ingester
Writers(ctx context.Context, opts ...ctrcontent.WriterOpt) (func(string) (ctrcontent.Writer, error), error)
}
// MultiWriterPusher a pusher that can provide a single writer or multiple writers for a single
// descriptor. Useful when the target of a descriptor can have multiple items within it, e.g. a layer
// that is a tar file with multiple files, each of which should go to a different stream, some of which
// should not be handled at all.
type MultiWriterPusher interface {
remotes.Pusher
Pushers(ctx context.Context, desc ocispec.Descriptor) (func(string) (ctrcontent.Writer, error), error)
}

View File

@@ -0,0 +1,336 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/local"
"github.com/containerd/containerd/remotes"
"github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// OCI provides content from the file system with the OCI-Image layout.
// Reference: https://github.com/opencontainers/image-spec/blob/master/image-layout.md
type OCI struct {
content.Store
root string
index *ocispec.Index
nameMap map[string]ocispec.Descriptor
}
// NewOCI creates a new OCI store
func NewOCI(rootPath string) (*OCI, error) {
fileStore, err := local.NewStore(rootPath)
if err != nil {
return nil, err
}
store := &OCI{
Store: fileStore,
root: rootPath,
}
if err := store.validateOCILayoutFile(); err != nil {
return nil, err
}
if err := store.LoadIndex(); err != nil {
return nil, err
}
return store, nil
}
// LoadIndex reads the index.json from the file system
func (s *OCI) LoadIndex() error {
path := filepath.Join(s.root, OCIImageIndexFile)
indexFile, err := os.Open(path)
if err != nil {
if !os.IsNotExist(err) {
return err
}
s.index = &ocispec.Index{
Versioned: specs.Versioned{
SchemaVersion: 2, // historical value
},
}
s.nameMap = make(map[string]ocispec.Descriptor)
return nil
}
defer indexFile.Close()
if err := json.NewDecoder(indexFile).Decode(&s.index); err != nil {
return err
}
s.nameMap = make(map[string]ocispec.Descriptor)
for _, desc := range s.index.Manifests {
if name := desc.Annotations[ocispec.AnnotationRefName]; name != "" {
s.nameMap[name] = desc
}
}
return nil
}
// SaveIndex writes the index.json to the file system
func (s *OCI) SaveIndex() error {
// first need to update the index
var descs []ocispec.Descriptor
for name, desc := range s.nameMap {
if desc.Annotations == nil {
desc.Annotations = map[string]string{}
}
desc.Annotations[ocispec.AnnotationRefName] = name
descs = append(descs, desc)
}
s.index.Manifests = descs
indexJSON, err := json.Marshal(s.index)
if err != nil {
return err
}
path := filepath.Join(s.root, OCIImageIndexFile)
return ioutil.WriteFile(path, indexJSON, 0644)
}
func (s *OCI) Resolver() remotes.Resolver {
return s
}
func (s *OCI) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
if err := s.LoadIndex(); err != nil {
return "", ocispec.Descriptor{}, err
}
desc, ok := s.nameMap[ref]
if !ok {
return "", ocispec.Descriptor{}, fmt.Errorf("reference %s not in store", ref)
}
return ref, desc, nil
}
func (s *OCI) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
if err := s.LoadIndex(); err != nil {
return nil, err
}
if _, ok := s.nameMap[ref]; !ok {
return nil, fmt.Errorf("reference %s not in store", ref)
}
return s, nil
}
// Fetch get an io.ReadCloser for the specific content
func (s *OCI) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
readerAt, err := s.Store.ReaderAt(ctx, desc)
if err != nil {
return nil, err
}
// just wrap the ReaderAt with a Reader
return ioutil.NopCloser(&ReaderAtWrapper{readerAt: readerAt}), nil
}
// Pusher get a remotes.Pusher for the given ref
func (s *OCI) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
// separate the tag based ref from the hash
var (
baseRef, hash string
)
parts := strings.SplitN(ref, "@", 2)
baseRef = parts[0]
if len(parts) > 1 {
hash = parts[1]
}
return &ociPusher{oci: s, ref: baseRef, digest: hash}, nil
}
// AddReference adds or updates an reference to index.
func (s *OCI) AddReference(name string, desc ocispec.Descriptor) {
if desc.Annotations == nil {
desc.Annotations = map[string]string{
ocispec.AnnotationRefName: name,
}
} else {
desc.Annotations[ocispec.AnnotationRefName] = name
}
if _, ok := s.nameMap[name]; ok {
s.nameMap[name] = desc
for i, ref := range s.index.Manifests {
if name == ref.Annotations[ocispec.AnnotationRefName] {
s.index.Manifests[i] = desc
return
}
}
// Process should not reach here.
// Fallthrough to `Add` scenario and recover.
s.index.Manifests = append(s.index.Manifests, desc)
return
}
s.index.Manifests = append(s.index.Manifests, desc)
s.nameMap[name] = desc
}
// DeleteReference deletes an reference from index.
func (s *OCI) DeleteReference(name string) {
if _, ok := s.nameMap[name]; !ok {
return
}
delete(s.nameMap, name)
for i, desc := range s.index.Manifests {
if name == desc.Annotations[ocispec.AnnotationRefName] {
s.index.Manifests[i] = s.index.Manifests[len(s.index.Manifests)-1]
s.index.Manifests = s.index.Manifests[:len(s.index.Manifests)-1]
return
}
}
}
// ListReferences lists all references in index.
func (s *OCI) ListReferences() map[string]ocispec.Descriptor {
return s.nameMap
}
// validateOCILayoutFile ensures the `oci-layout` file
func (s *OCI) validateOCILayoutFile() error {
layoutFilePath := filepath.Join(s.root, ocispec.ImageLayoutFile)
layoutFile, err := os.Open(layoutFilePath)
if err != nil {
if !os.IsNotExist(err) {
return err
}
layout := ocispec.ImageLayout{
Version: ocispec.ImageLayoutVersion,
}
layoutJSON, err := json.Marshal(layout)
if err != nil {
return err
}
return ioutil.WriteFile(layoutFilePath, layoutJSON, 0644)
}
defer layoutFile.Close()
var layout *ocispec.ImageLayout
err = json.NewDecoder(layoutFile).Decode(&layout)
if err != nil {
return err
}
if layout.Version != ocispec.ImageLayoutVersion {
return ErrUnsupportedVersion
}
return nil
}
// TODO: implement (needed to create a content.Store)
// TODO: do not return empty content.Info
// Abort completely cancels the ingest operation targeted by ref.
func (s *OCI) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
return content.Info{}, nil
}
// TODO: implement (needed to create a content.Store)
// Update updates mutable information related to content.
// If one or more fieldpaths are provided, only those
// fields will be updated.
// Mutable fields:
// labels.*
func (s *OCI) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
return content.Info{}, errors.New("not yet implemented: Update (content.Store interface)")
}
// TODO: implement (needed to create a content.Store)
// Walk will call fn for each item in the content store which
// match the provided filters. If no filters are given all
// items will be walked.
func (s *OCI) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
return errors.New("not yet implemented: Walk (content.Store interface)")
}
// Delete removes the content from the store.
func (s *OCI) Delete(ctx context.Context, dgst digest.Digest) error {
return s.Store.Delete(ctx, dgst)
}
// TODO: implement (needed to create a content.Store)
func (s *OCI) Status(ctx context.Context, ref string) (content.Status, error) {
// Status returns the status of the provided ref.
return content.Status{}, errors.New("not yet implemented: Status (content.Store interface)")
}
// TODO: implement (needed to create a content.Store)
// ListStatuses returns the status of any active ingestions whose ref match the
// provided regular expression. If empty, all active ingestions will be
// returned.
func (s *OCI) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) {
return []content.Status{}, errors.New("not yet implemented: ListStatuses (content.Store interface)")
}
// TODO: implement (needed to create a content.Store)
// Abort completely cancels the ingest operation targeted by ref.
func (s *OCI) Abort(ctx context.Context, ref string) error {
return errors.New("not yet implemented: Abort (content.Store interface)")
}
// ReaderAt provides contents
func (s *OCI) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
return s.Store.ReaderAt(ctx, desc)
}
// ociPusher to push content for a single referencem can handle multiple descriptors.
// Needs to be able to recognize when a root manifest is being pushed and to create the tag
// for it.
type ociPusher struct {
oci *OCI
ref string
digest string
}
// Push get a writer for a single Descriptor
func (p *ociPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
// do we need to create a tag?
switch desc.MediaType {
case ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
// if the hash of the content matches that which was provided as the hash for the root, mark it
if p.digest != "" && p.digest == desc.Digest.String() {
if err := p.oci.LoadIndex(); err != nil {
return nil, err
}
p.oci.nameMap[p.ref] = desc
if err := p.oci.SaveIndex(); err != nil {
return nil, err
}
}
}
return p.oci.Store.Writer(ctx, content.WithDescriptor(desc), content.WithRef(p.ref))
}

View File

@@ -0,0 +1,112 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"errors"
"github.com/opencontainers/go-digest"
)
type WriterOpts struct {
InputHash *digest.Digest
OutputHash *digest.Digest
Blocksize int
MultiWriterIngester bool
IgnoreNoName bool
}
type WriterOpt func(*WriterOpts) error
func DefaultWriterOpts() WriterOpts {
return WriterOpts{
InputHash: nil,
OutputHash: nil,
Blocksize: DefaultBlocksize,
IgnoreNoName: true,
}
}
// WithInputHash provide the expected input hash to a writer. Writers
// may suppress their own calculation of a hash on the stream, taking this
// hash instead. If the Writer processes the data before passing it on to another
// Writer layer, this is the hash of the *input* stream.
//
// To have a blank hash, use WithInputHash(BlankHash).
func WithInputHash(hash digest.Digest) WriterOpt {
return func(w *WriterOpts) error {
w.InputHash = &hash
return nil
}
}
// WithOutputHash provide the expected output hash to a writer. Writers
// may suppress their own calculation of a hash on the stream, taking this
// hash instead. If the Writer processes the data before passing it on to another
// Writer layer, this is the hash of the *output* stream.
//
// To have a blank hash, use WithInputHash(BlankHash).
func WithOutputHash(hash digest.Digest) WriterOpt {
return func(w *WriterOpts) error {
w.OutputHash = &hash
return nil
}
}
// WithBlocksize set the blocksize used by the processor of data.
// The default is DefaultBlocksize, which is the same as that used by io.Copy.
// Includes a safety check to ensure the caller doesn't actively set it to <= 0.
func WithBlocksize(blocksize int) WriterOpt {
return func(w *WriterOpts) error {
if blocksize <= 0 {
return errors.New("blocksize must be greater than or equal to 0")
}
w.Blocksize = blocksize
return nil
}
}
// WithMultiWriterIngester the passed ingester also implements MultiWriter
// and should be used as such. If this is set to true, but the ingester does not
// implement MultiWriter, calling Writer should return an error.
func WithMultiWriterIngester() WriterOpt {
return func(w *WriterOpts) error {
w.MultiWriterIngester = true
return nil
}
}
// WithErrorOnNoName some ingesters, when creating a Writer, do not return an error if
// the descriptor does not have a valid name on the descriptor. Passing WithErrorOnNoName
// tells the writer to return an error instead of passing the data to a nil writer.
func WithErrorOnNoName() WriterOpt {
return func(w *WriterOpts) error {
w.IgnoreNoName = false
return nil
}
}
// WithIgnoreNoName some ingesters, when creating a Writer, return an error if
// the descriptor does not have a valid name on the descriptor. Passing WithIgnoreNoName
// tells the writer not to return an error, but rather to pass the data to a nil writer.
//
// Deprecated: Use WithErrorOnNoName
func WithIgnoreNoName() WriterOpt {
return func(w *WriterOpts) error {
w.IgnoreNoName = true
return nil
}
}

View File

@@ -0,0 +1,286 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"context"
"errors"
"io"
"time"
"github.com/containerd/containerd/content"
"github.com/opencontainers/go-digest"
)
// PassthroughWriter takes an input stream and passes it through to an underlying writer,
// while providing the ability to manipulate the stream before it gets passed through
type PassthroughWriter struct {
writer content.Writer
pipew *io.PipeWriter
digester digest.Digester
size int64
underlyingWriter *underlyingWriter
reader *io.PipeReader
hash *digest.Digest
done chan error
}
// NewPassthroughWriter creates a pass-through writer that allows for processing
// the content via an arbitrary function. The function should do whatever processing it
// wants, reading from the Reader to the Writer. When done, it must indicate via
// sending an error or nil to the Done
func NewPassthroughWriter(writer content.Writer, f func(r io.Reader, w io.Writer, done chan<- error), opts ...WriterOpt) content.Writer {
// process opts for default
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil
}
}
r, w := io.Pipe()
pw := &PassthroughWriter{
writer: writer,
pipew: w,
digester: digest.Canonical.Digester(),
underlyingWriter: &underlyingWriter{
writer: writer,
digester: digest.Canonical.Digester(),
hash: wOpts.OutputHash,
},
reader: r,
hash: wOpts.InputHash,
done: make(chan error, 1),
}
go f(r, pw.underlyingWriter, pw.done)
return pw
}
func (pw *PassthroughWriter) Write(p []byte) (n int, err error) {
n, err = pw.pipew.Write(p)
if pw.hash == nil {
pw.digester.Hash().Write(p[:n])
}
pw.size += int64(n)
return
}
func (pw *PassthroughWriter) Close() error {
if pw.pipew != nil {
pw.pipew.Close()
}
pw.writer.Close()
return nil
}
// Digest may return empty digest or panics until committed.
func (pw *PassthroughWriter) Digest() digest.Digest {
if pw.hash != nil {
return *pw.hash
}
return pw.digester.Digest()
}
// Commit commits the blob (but no roll-back is guaranteed on an error).
// size and expected can be zero-value when unknown.
// Commit always closes the writer, even on error.
// ErrAlreadyExists aborts the writer.
func (pw *PassthroughWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
if pw.pipew != nil {
pw.pipew.Close()
}
err := <-pw.done
if pw.reader != nil {
pw.reader.Close()
}
if err != nil && err != io.EOF {
return err
}
// Some underlying writers will validate an expected digest, so we need the option to pass it
// that digest. That is why we caluclate the digest of the underlying writer throughout the write process.
return pw.writer.Commit(ctx, pw.underlyingWriter.size, pw.underlyingWriter.Digest(), opts...)
}
// Status returns the current state of write
func (pw *PassthroughWriter) Status() (content.Status, error) {
return pw.writer.Status()
}
// Truncate updates the size of the target blob
func (pw *PassthroughWriter) Truncate(size int64) error {
return pw.writer.Truncate(size)
}
// underlyingWriter implementation of io.Writer to write to the underlying
// io.Writer
type underlyingWriter struct {
writer content.Writer
digester digest.Digester
size int64
hash *digest.Digest
}
// Write write to the underlying writer
func (u *underlyingWriter) Write(p []byte) (int, error) {
n, err := u.writer.Write(p)
if err != nil {
return 0, err
}
if u.hash == nil {
u.digester.Hash().Write(p)
}
u.size += int64(len(p))
return n, nil
}
// Size get total size written
func (u *underlyingWriter) Size() int64 {
return u.size
}
// Digest may return empty digest or panics until committed.
func (u *underlyingWriter) Digest() digest.Digest {
if u.hash != nil {
return *u.hash
}
return u.digester.Digest()
}
// PassthroughMultiWriter single writer that passes through to multiple writers, allowing the passthrough
// function to select which writer to use.
type PassthroughMultiWriter struct {
writers []*PassthroughWriter
pipew *io.PipeWriter
digester digest.Digester
size int64
reader *io.PipeReader
hash *digest.Digest
done chan error
startedAt time.Time
updatedAt time.Time
}
func NewPassthroughMultiWriter(writers func(name string) (content.Writer, error), f func(r io.Reader, getwriter func(name string) io.Writer, done chan<- error), opts ...WriterOpt) content.Writer {
// process opts for default
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil
}
}
r, w := io.Pipe()
pmw := &PassthroughMultiWriter{
startedAt: time.Now(),
updatedAt: time.Now(),
done: make(chan error, 1),
digester: digest.Canonical.Digester(),
hash: wOpts.InputHash,
pipew: w,
reader: r,
}
// get our output writers
getwriter := func(name string) io.Writer {
writer, err := writers(name)
if err != nil || writer == nil {
return nil
}
pw := &PassthroughWriter{
writer: writer,
digester: digest.Canonical.Digester(),
underlyingWriter: &underlyingWriter{
writer: writer,
digester: digest.Canonical.Digester(),
hash: wOpts.OutputHash,
},
done: make(chan error, 1),
}
pmw.writers = append(pmw.writers, pw)
return pw.underlyingWriter
}
go f(r, getwriter, pmw.done)
return pmw
}
func (pmw *PassthroughMultiWriter) Write(p []byte) (n int, err error) {
n, err = pmw.pipew.Write(p)
if pmw.hash == nil {
pmw.digester.Hash().Write(p[:n])
}
pmw.size += int64(n)
pmw.updatedAt = time.Now()
return
}
func (pmw *PassthroughMultiWriter) Close() error {
pmw.pipew.Close()
for _, w := range pmw.writers {
w.Close()
}
return nil
}
// Digest may return empty digest or panics until committed.
func (pmw *PassthroughMultiWriter) Digest() digest.Digest {
if pmw.hash != nil {
return *pmw.hash
}
return pmw.digester.Digest()
}
// Commit commits the blob (but no roll-back is guaranteed on an error).
// size and expected can be zero-value when unknown.
// Commit always closes the writer, even on error.
// ErrAlreadyExists aborts the writer.
func (pmw *PassthroughMultiWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
pmw.pipew.Close()
err := <-pmw.done
if pmw.reader != nil {
pmw.reader.Close()
}
if err != nil && err != io.EOF {
return err
}
// Some underlying writers will validate an expected digest, so we need the option to pass it
// that digest. That is why we caluclate the digest of the underlying writer throughout the write process.
for _, w := range pmw.writers {
// maybe this should be Commit(ctx, pw.underlyingWriter.size, pw.underlyingWriter.Digest(), opts...)
w.done <- err
if err := w.Commit(ctx, size, expected, opts...); err != nil {
return err
}
}
return nil
}
// Status returns the current state of write
func (pmw *PassthroughMultiWriter) Status() (content.Status, error) {
return content.Status{
StartedAt: pmw.startedAt,
UpdatedAt: pmw.updatedAt,
Total: pmw.size,
}, nil
}
// Truncate updates the size of the target blob, but cannot do anything with a multiwriter
func (pmw *PassthroughMultiWriter) Truncate(size int64) error {
return errors.New("truncate unavailable on multiwriter")
}

View File

@@ -0,0 +1,68 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"io"
"github.com/containerd/containerd/content"
)
// ensure interface
var (
_ content.ReaderAt = sizeReaderAt{}
)
type readAtCloser interface {
io.ReaderAt
io.Closer
}
type sizeReaderAt struct {
readAtCloser
size int64
}
func (ra sizeReaderAt) Size() int64 {
return ra.size
}
func NopCloserAt(r io.ReaderAt) nopCloserAt {
return nopCloserAt{r}
}
type nopCloserAt struct {
io.ReaderAt
}
func (n nopCloserAt) Close() error {
return nil
}
// readerAtWrapper wraps a ReaderAt to give a Reader
type ReaderAtWrapper struct {
offset int64
readerAt io.ReaderAt
}
func (r *ReaderAtWrapper) Read(p []byte) (n int, err error) {
n, err = r.readerAt.ReadAt(p, r.offset)
r.offset += int64(n)
return
}
func NewReaderAtWrapper(readerAt io.ReaderAt) *ReaderAtWrapper {
return &ReaderAtWrapper{readerAt: readerAt}
}

View File

@@ -0,0 +1,84 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"os"
auth "oras.land/oras-go/pkg/auth/docker"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
)
// RegistryOptions provide configuration options to a Registry
type RegistryOptions struct {
Configs []string
Username string
Password string
Insecure bool
PlainHTTP bool
}
// Registry provides content from a spec-compliant registry. Create an use a new one for each
// registry with unique configuration of RegistryOptions.
type Registry struct {
remotes.Resolver
}
// NewRegistry creates a new Registry store
func NewRegistry(opts RegistryOptions) (*Registry, error) {
return &Registry{
Resolver: newResolver(opts.Username, opts.Password, opts.Insecure, opts.PlainHTTP, opts.Configs...),
}, nil
}
func newResolver(username, password string, insecure bool, plainHTTP bool, configs ...string) remotes.Resolver {
opts := docker.ResolverOptions{
PlainHTTP: plainHTTP,
}
client := http.DefaultClient
if insecure {
client.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
}
opts.Client = client
if username != "" || password != "" {
opts.Credentials = func(hostName string) (string, string, error) {
return username, password, nil
}
return docker.NewResolver(opts)
}
cli, err := auth.NewClient(configs...)
if err != nil {
fmt.Fprintf(os.Stderr, "WARNING: Error loading auth file: %v\n", err)
}
resolver, err := cli.Resolver(context.Background(), client, plainHTTP)
if err != nil {
fmt.Fprintf(os.Stderr, "WARNING: Error loading resolver: %v\n", err)
resolver = docker.NewResolver(opts)
}
return resolver
}

View File

@@ -0,0 +1,157 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"archive/tar"
"fmt"
"io"
"github.com/containerd/containerd/content"
)
// NewUntarWriter wrap a writer with an untar, so that the stream is untarred
//
// By default, it calculates the hash when writing. If the option `skipHash` is true,
// it will skip doing the hash. Skipping the hash is intended to be used only
// if you are confident about the validity of the data being passed to the writer,
// and wish to save on the hashing time.
func NewUntarWriter(writer content.Writer, opts ...WriterOpt) content.Writer {
// process opts for default
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil
}
}
return NewPassthroughWriter(writer, func(r io.Reader, w io.Writer, done chan<- error) {
tr := tar.NewReader(r)
var err error
for {
_, err := tr.Next()
if err == io.EOF {
// clear the error, since we do not pass an io.EOF
err = nil
break // End of archive
}
if err != nil {
// pass the error on
err = fmt.Errorf("UntarWriter tar file header read error: %v", err)
break
}
// write out the untarred data
// we can handle io.EOF, just go to the next file
// any other errors should stop and get reported
b := make([]byte, wOpts.Blocksize, wOpts.Blocksize)
for {
var n int
n, err = tr.Read(b)
if err != nil && err != io.EOF {
err = fmt.Errorf("UntarWriter file data read error: %v\n", err)
break
}
l := n
if n > len(b) {
l = len(b)
}
if _, err2 := w.Write(b[:l]); err2 != nil {
err = fmt.Errorf("UntarWriter error writing to underlying writer: %v", err2)
break
}
if err == io.EOF {
// go to the next file
break
}
}
// did we break with a non-nil and non-EOF error?
if err != nil && err != io.EOF {
break
}
}
done <- err
}, opts...)
}
// NewUntarWriterByName wrap multiple writers with an untar, so that the stream is untarred and passed
// to the appropriate writer, based on the filename. If a filename is not found, it is up to the called func
// to determine how to process it.
func NewUntarWriterByName(writers func(string) (content.Writer, error), opts ...WriterOpt) content.Writer {
// process opts for default
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil
}
}
// need a PassthroughMultiWriter here
return NewPassthroughMultiWriter(writers, func(r io.Reader, getwriter func(name string) io.Writer, done chan<- error) {
tr := tar.NewReader(r)
var err error
for {
header, err := tr.Next()
if err == io.EOF {
// clear the error, since we do not pass an io.EOF
err = nil
break // End of archive
}
if err != nil {
// pass the error on
err = fmt.Errorf("UntarWriter tar file header read error: %v", err)
break
}
// get the filename
filename := header.Name
// get the writer for this filename
w := getwriter(filename)
if w == nil {
continue
}
// write out the untarred data
// we can handle io.EOF, just go to the next file
// any other errors should stop and get reported
b := make([]byte, wOpts.Blocksize, wOpts.Blocksize)
for {
var n int
n, err = tr.Read(b)
if err != nil && err != io.EOF {
err = fmt.Errorf("UntarWriter file data read error: %v\n", err)
break
}
l := n
if n > len(b) {
l = len(b)
}
if _, err2 := w.Write(b[:l]); err2 != nil {
err = fmt.Errorf("UntarWriter error writing to underlying writer at for name '%s': %v", filename, err2)
break
}
if err == io.EOF {
// go to the next file
break
}
}
// did we break with a non-nil and non-EOF error?
if err != nil && err != io.EOF {
break
}
}
done <- err
}, opts...)
}

View File

@@ -0,0 +1,223 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package content
import (
"archive/tar"
"compress/gzip"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// ResolveName resolves name from descriptor
func ResolveName(desc ocispec.Descriptor) (string, bool) {
name, ok := desc.Annotations[ocispec.AnnotationTitle]
return name, ok
}
// tarDirectory walks the directory specified by path, and tar those files with a new
// path prefix.
func tarDirectory(root, prefix string, w io.Writer, stripTimes bool) error {
tw := tar.NewWriter(w)
defer tw.Close()
if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Rename path
name, err := filepath.Rel(root, path)
if err != nil {
return err
}
name = filepath.Join(prefix, name)
name = filepath.ToSlash(name)
// Generate header
var link string
mode := info.Mode()
if mode&os.ModeSymlink != 0 {
if link, err = os.Readlink(path); err != nil {
return err
}
}
header, err := tar.FileInfoHeader(info, link)
if err != nil {
return errors.Wrap(err, path)
}
header.Name = name
header.Uid = 0
header.Gid = 0
header.Uname = ""
header.Gname = ""
if stripTimes {
header.ModTime = time.Time{}
header.AccessTime = time.Time{}
header.ChangeTime = time.Time{}
}
// Write file
if err := tw.WriteHeader(header); err != nil {
return errors.Wrap(err, "tar")
}
if mode.IsRegular() {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
if _, err := io.Copy(tw, file); err != nil {
return errors.Wrap(err, path)
}
}
return nil
}); err != nil {
return err
}
return nil
}
// extractTarDirectory extracts tar file to a directory specified by the `root`
// parameter. The file name prefix is ensured to be the string specified by the
// `prefix` parameter and is trimmed.
func extractTarDirectory(root, prefix string, r io.Reader) error {
tr := tar.NewReader(r)
for {
header, err := tr.Next()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
// Name check
name := header.Name
path, err := ensureBasePath(root, prefix, name)
if err != nil {
return err
}
path = filepath.Join(root, path)
// Link check
switch header.Typeflag {
case tar.TypeLink, tar.TypeSymlink:
link := header.Linkname
if !filepath.IsAbs(link) {
link = filepath.Join(filepath.Dir(name), link)
}
if _, err := ensureBasePath(root, prefix, link); err != nil {
return err
}
}
// Create content
switch header.Typeflag {
case tar.TypeReg:
err = writeFile(path, tr, header.FileInfo().Mode())
case tar.TypeDir:
err = os.MkdirAll(path, header.FileInfo().Mode())
case tar.TypeLink:
err = os.Link(header.Linkname, path)
case tar.TypeSymlink:
err = os.Symlink(header.Linkname, path)
default:
continue // Non-regular files are skipped
}
if err != nil {
return err
}
// Change access time and modification time if possible (error ignored)
os.Chtimes(path, header.AccessTime, header.ModTime)
}
}
// ensureBasePath ensures the target path is in the base path,
// returning its relative path to the base path.
func ensureBasePath(root, base, target string) (string, error) {
path, err := filepath.Rel(base, target)
if err != nil {
return "", err
}
cleanPath := filepath.ToSlash(filepath.Clean(path))
if cleanPath == ".." || strings.HasPrefix(cleanPath, "../") {
return "", fmt.Errorf("%q is outside of %q", target, base)
}
// No symbolic link allowed in the relative path
dir := filepath.Dir(path)
for dir != "." {
if info, err := os.Lstat(filepath.Join(root, dir)); err != nil {
if !os.IsNotExist(err) {
return "", err
}
} else if info.Mode()&os.ModeSymlink != 0 {
return "", fmt.Errorf("no symbolic link allowed between %q and %q", base, target)
}
dir = filepath.Dir(dir)
}
return path, nil
}
func writeFile(path string, r io.Reader, perm os.FileMode) error {
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(file, r)
return err
}
func extractTarGzip(root, prefix, filename, checksum string) error {
file, err := os.Open(filename)
if err != nil {
return err
}
defer file.Close()
zr, err := gzip.NewReader(file)
if err != nil {
return err
}
defer zr.Close()
var r io.Reader = zr
var verifier digest.Verifier
if checksum != "" {
if digest, err := digest.Parse(checksum); err == nil {
verifier = digest.Verifier()
r = io.TeeReader(r, verifier)
}
}
if err := extractTarDirectory(root, prefix, r); err != nil {
return err
}
if verifier != nil && !verifier.Verified() {
return errors.New("content digest mismatch")
}
return nil
}

View File

@@ -0,0 +1,24 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package context
import "context"
// Background returns a default context with logger discarded.
func Background() context.Context {
ctx := context.Background()
return WithLoggerDiscarded(ctx)
}

View File

@@ -0,0 +1,50 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package context
import (
"context"
"io"
"io/ioutil"
"github.com/containerd/containerd/log"
"github.com/sirupsen/logrus"
)
// WithLogger returns a new context with the provided logger.
// This method wraps github.com/containerd/containerd/log.WithLogger()
func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
return log.WithLogger(ctx, logger)
}
// WithLoggerFromWriter returns a new context with the logger, writting to the provided logger.
func WithLoggerFromWriter(ctx context.Context, writer io.Writer) context.Context {
logger := logrus.New()
logger.Out = writer
entry := logrus.NewEntry(logger)
return WithLogger(ctx, entry)
}
// WithLoggerDiscarded returns a new context with the logger, writting to nothing.
func WithLoggerDiscarded(ctx context.Context) context.Context {
return WithLoggerFromWriter(ctx, ioutil.Discard)
}
// GetLogger retrieves the current logger from the context.
// This method wraps github.com/containerd/containerd/log.GetLogger()
func GetLogger(ctx context.Context) *logrus.Entry {
return log.GetLogger(ctx)
}

View File

@@ -0,0 +1,213 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oras
import (
"bytes"
"context"
"fmt"
"sync"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/remotes"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"oras.land/oras-go/pkg/target"
)
// Copy copy a ref from one target.Target to a ref in another target.Target. If toRef is blank, reuses fromRef
// Returns the root
// Descriptor of the copied item. Can use the root to retrieve child elements from target.Target.
func Copy(ctx context.Context, from target.Target, fromRef string, to target.Target, toRef string, opts ...CopyOpt) (ocispec.Descriptor, error) {
if from == nil {
return ocispec.Descriptor{}, ErrFromTargetUndefined
}
if to == nil {
return ocispec.Descriptor{}, ErrToTargetUndefined
}
// blank toRef
if toRef == "" {
toRef = fromRef
}
opt := copyOptsDefaults()
for _, o := range opts {
if err := o(opt); err != nil {
return ocispec.Descriptor{}, err
}
}
if from == nil {
return ocispec.Descriptor{}, ErrFromResolverUndefined
}
if to == nil {
return ocispec.Descriptor{}, ErrToResolverUndefined
}
// for the "from", we resolve the ref, then use resolver.Fetcher to fetch the various content blobs
// for the "to", we simply use resolver.Pusher to push the various content blobs
_, desc, err := from.Resolve(ctx, fromRef)
if err != nil {
return ocispec.Descriptor{}, err
}
fetcher, err := from.Fetcher(ctx, fromRef)
if err != nil {
return ocispec.Descriptor{}, err
}
// construct the reference we send to the pusher using the digest, so it knows what the root is
pushRef := fmt.Sprintf("%s@%s", toRef, desc.Digest.String())
pusher, err := to.Pusher(ctx, pushRef)
if err != nil {
return ocispec.Descriptor{}, err
}
if err := transferContent(ctx, desc, fetcher, pusher, opt); err != nil {
return ocispec.Descriptor{}, err
}
return desc, nil
}
func transferContent(ctx context.Context, desc ocispec.Descriptor, fetcher remotes.Fetcher, pusher remotes.Pusher, opts *copyOpts) error {
var descriptors, manifests []ocispec.Descriptor
lock := &sync.Mutex{}
picker := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
if isAllowedMediaType(desc.MediaType, opts.allowedMediaTypes...) {
if opts.filterName(desc) {
lock.Lock()
defer lock.Unlock()
descriptors = append(descriptors, desc)
}
return nil, nil
}
return nil, nil
})
// we use a hybrid store - a cache wrapping the underlying pusher - for two reasons:
// 1. so that we can cache the manifests as pushing them, then retrieve them later to push in reverse order after the blobs
// 2. so that we can retrieve them to analyze and find children in the Dispatch routine
store := opts.contentProvideIngesterPusherFetcher
if store == nil {
store = newHybridStoreFromPusher(pusher, opts.cachedMediaTypes, true)
}
// fetchHandler pushes to the *store*, which may or may not cache it
baseFetchHandler := func(p remotes.Pusher, f remotes.Fetcher) images.HandlerFunc {
return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
cw, err := p.Push(ctx, desc)
if err != nil {
if !errdefs.IsAlreadyExists(err) {
return nil, err
}
return nil, nil
}
defer cw.Close()
rc, err := f.Fetch(ctx, desc)
if err != nil {
return nil, err
}
defer rc.Close()
return nil, content.Copy(ctx, cw, rc, desc.Size, desc.Digest)
})
}
// track all of our manifests that will be cached
fetchHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
if isAllowedMediaType(desc.MediaType, opts.cachedMediaTypes...) {
lock.Lock()
defer lock.Unlock()
manifests = append(manifests, desc)
}
return baseFetchHandler(store, fetcher)(ctx, desc)
})
handlers := []images.Handler{
filterHandler(opts, opts.allowedMediaTypes...),
}
handlers = append(handlers, opts.baseHandlers...)
handlers = append(handlers,
fetchHandler,
picker,
images.ChildrenHandler(&ProviderWrapper{Fetcher: store}),
)
handlers = append(handlers, opts.callbackHandlers...)
if err := opts.dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil {
return err
}
// we cached all of the manifests, so push those out
// Iterate in reverse order as seen, parent always uploaded after child
for i := len(manifests) - 1; i >= 0; i-- {
_, err := baseFetchHandler(pusher, store)(ctx, manifests[i])
if err != nil {
return err
}
}
// if the option to request the root manifest was passed, accommodate it
if opts.saveManifest != nil && len(manifests) > 0 {
rc, err := store.Fetch(ctx, manifests[0])
if err != nil {
return fmt.Errorf("could not get root manifest to save based on CopyOpt: %v", err)
}
defer rc.Close()
buf := new(bytes.Buffer)
if _, err := buf.ReadFrom(rc); err != nil {
return fmt.Errorf("unable to read data for root manifest to save based on CopyOpt: %v", err)
}
// get the root manifest from the store
opts.saveManifest(buf.Bytes())
}
// if the option to request the layers was passed, accommodate it
if opts.saveLayers != nil && len(descriptors) > 0 {
opts.saveLayers(descriptors)
}
return nil
}
func filterHandler(opts *copyOpts, allowedMediaTypes ...string) images.HandlerFunc {
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
switch {
case isAllowedMediaType(desc.MediaType, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex):
return nil, nil
case isAllowedMediaType(desc.MediaType, allowedMediaTypes...):
if opts.filterName(desc) {
return nil, nil
}
log.G(ctx).Warnf("blob no name: %v", desc.Digest)
default:
log.G(ctx).Warnf("unknown type: %v", desc.MediaType)
}
return nil, images.ErrStopHandler
}
}
func isAllowedMediaType(mediaType string, allowedMediaTypes ...string) bool {
if len(allowedMediaTypes) == 0 {
return true
}
for _, allowedMediaType := range allowedMediaTypes {
if mediaType == allowedMediaType {
return true
}
}
return false
}

View File

@@ -0,0 +1,42 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oras
import (
"errors"
"fmt"
)
// Common errors
var (
ErrResolverUndefined = errors.New("resolver undefined")
ErrFromResolverUndefined = errors.New("from target resolver undefined")
ErrToResolverUndefined = errors.New("to target resolver undefined")
ErrFromTargetUndefined = errors.New("from target undefined")
ErrToTargetUndefined = errors.New("from target undefined")
)
// Path validation related errors
var (
ErrDirtyPath = errors.New("dirty path")
ErrPathNotSlashSeparated = errors.New("path not slash separated")
ErrAbsolutePathDisallowed = errors.New("absolute path disallowed")
ErrPathTraversalDisallowed = errors.New("path traversal disallowed")
)
// ErrStopProcessing is used to stop processing an oras operation.
// This error only makes sense in sequential pulling operation.
var ErrStopProcessing = fmt.Errorf("stop processing")

View File

@@ -0,0 +1,254 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oras
import (
"context"
"fmt"
"io"
"path/filepath"
"strings"
"sync"
"github.com/containerd/containerd/images"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/sync/semaphore"
orascontent "oras.land/oras-go/pkg/content"
)
func copyOptsDefaults() *copyOpts {
return &copyOpts{
dispatch: images.Dispatch,
filterName: filterName,
cachedMediaTypes: []string{ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex},
validateName: ValidateNameAsPath,
}
}
type CopyOpt func(o *copyOpts) error
type copyOpts struct {
allowedMediaTypes []string
dispatch func(context.Context, images.Handler, *semaphore.Weighted, ...ocispec.Descriptor) error
baseHandlers []images.Handler
callbackHandlers []images.Handler
contentProvideIngesterPusherFetcher orascontent.Store
filterName func(ocispec.Descriptor) bool
cachedMediaTypes []string
saveManifest func([]byte)
saveLayers func([]ocispec.Descriptor)
validateName func(desc ocispec.Descriptor) error
userAgent string
}
// ValidateNameAsPath validates name in the descriptor as file path in order
// to generate good packages intended to be pulled using the FileStore or
// the oras cli.
// For cross-platform considerations, only unix paths are accepted.
func ValidateNameAsPath(desc ocispec.Descriptor) error {
// no empty name
path, ok := orascontent.ResolveName(desc)
if !ok || path == "" {
return orascontent.ErrNoName
}
// path should be clean
if target := filepath.ToSlash(filepath.Clean(path)); target != path {
return errors.Wrap(ErrDirtyPath, path)
}
// path should be slash-separated
if strings.Contains(path, "\\") {
return errors.Wrap(ErrPathNotSlashSeparated, path)
}
// disallow absolute path: covers unix and windows format
if strings.HasPrefix(path, "/") {
return errors.Wrap(ErrAbsolutePathDisallowed, path)
}
if len(path) > 2 {
c := path[0]
if path[1] == ':' && path[2] == '/' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
return errors.Wrap(ErrAbsolutePathDisallowed, path)
}
}
// disallow path traversal
if strings.HasPrefix(path, "../") || path == ".." {
return errors.Wrap(ErrPathTraversalDisallowed, path)
}
return nil
}
// dispatchBFS behaves the same as images.Dispatch() but in sequence with breath-first search.
func dispatchBFS(ctx context.Context, handler images.Handler, weighted *semaphore.Weighted, descs ...ocispec.Descriptor) error {
for i := 0; i < len(descs); i++ {
desc := descs[i]
children, err := handler.Handle(ctx, desc)
if err != nil {
switch err := errors.Cause(err); err {
case images.ErrSkipDesc:
continue // don't traverse the children.
case ErrStopProcessing:
return nil
}
return err
}
descs = append(descs, children...)
}
return nil
}
func filterName(desc ocispec.Descriptor) bool {
// needs to be filled in
return true
}
// WithAdditionalCachedMediaTypes adds media types normally cached in memory when pulling.
// This does not replace the default media types, but appends to them
func WithAdditionalCachedMediaTypes(cachedMediaTypes ...string) CopyOpt {
return func(o *copyOpts) error {
o.cachedMediaTypes = append(o.cachedMediaTypes, cachedMediaTypes...)
return nil
}
}
// WithAllowedMediaType sets the allowed media types
func WithAllowedMediaType(allowedMediaTypes ...string) CopyOpt {
return func(o *copyOpts) error {
o.allowedMediaTypes = append(o.allowedMediaTypes, allowedMediaTypes...)
return nil
}
}
// WithAllowedMediaTypes sets the allowed media types
func WithAllowedMediaTypes(allowedMediaTypes []string) CopyOpt {
return func(o *copyOpts) error {
o.allowedMediaTypes = append(o.allowedMediaTypes, allowedMediaTypes...)
return nil
}
}
// WithPullByBFS opt to pull in sequence with breath-first search
func WithPullByBFS(o *copyOpts) error {
o.dispatch = dispatchBFS
return nil
}
// WithPullBaseHandler provides base handlers, which will be called before
// any pull specific handlers.
func WithPullBaseHandler(handlers ...images.Handler) CopyOpt {
return func(o *copyOpts) error {
o.baseHandlers = append(o.baseHandlers, handlers...)
return nil
}
}
// WithPullCallbackHandler provides callback handlers, which will be called after
// any pull specific handlers.
func WithPullCallbackHandler(handlers ...images.Handler) CopyOpt {
return func(o *copyOpts) error {
o.callbackHandlers = append(o.callbackHandlers, handlers...)
return nil
}
}
// WithContentProvideIngester opt to the provided Provider and Ingester
// for file system I/O, including caches.
func WithContentStore(store orascontent.Store) CopyOpt {
return func(o *copyOpts) error {
o.contentProvideIngesterPusherFetcher = store
return nil
}
}
// WithPullEmptyNameAllowed allows pulling blobs with empty name.
func WithPullEmptyNameAllowed() CopyOpt {
return func(o *copyOpts) error {
o.filterName = func(ocispec.Descriptor) bool {
return true
}
return nil
}
}
// WithPullStatusTrack report results to stdout
func WithPullStatusTrack(writer io.Writer) CopyOpt {
return WithPullCallbackHandler(pullStatusTrack(writer))
}
func pullStatusTrack(writer io.Writer) images.Handler {
var printLock sync.Mutex
return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
if name, ok := orascontent.ResolveName(desc); ok {
digestString := desc.Digest.String()
if err := desc.Digest.Validate(); err == nil {
if algo := desc.Digest.Algorithm(); algo == digest.SHA256 {
digestString = desc.Digest.Encoded()[:12]
}
}
printLock.Lock()
defer printLock.Unlock()
fmt.Fprintln(writer, "Downloaded", digestString, name)
}
return nil, nil
})
}
// WithNameValidation validates the image title in the descriptor.
// Pass nil to disable name validation.
func WithNameValidation(validate func(desc ocispec.Descriptor) error) CopyOpt {
return func(o *copyOpts) error {
o.validateName = validate
return nil
}
}
// WithUserAgent set the user agent string in http communications
func WithUserAgent(agent string) CopyOpt {
return func(o *copyOpts) error {
o.userAgent = agent
return nil
}
}
// WithLayerDescriptors passes the slice of Descriptors for layers to the
// provided func. If the passed parameter is nil, returns an error.
func WithLayerDescriptors(save func([]ocispec.Descriptor)) CopyOpt {
return func(o *copyOpts) error {
if save == nil {
return errors.New("layers save func must be non-nil")
}
o.saveLayers = save
return nil
}
}
// WithRootManifest passes the root manifest for the artifacts to the provided
// func. If the passed parameter is nil, returns an error.
func WithRootManifest(save func(b []byte)) CopyOpt {
return func(o *copyOpts) error {
if save == nil {
return errors.New("manifest save func must be non-nil")
}
o.saveManifest = save
return nil
}
}

View File

@@ -0,0 +1,79 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oras
import (
"context"
"errors"
"io"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// ProviderWrapper wraps a remote.Fetcher to make a content.Provider, which is useful for things
type ProviderWrapper struct {
Fetcher remotes.Fetcher
}
func (p *ProviderWrapper) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
if p.Fetcher == nil {
return nil, errors.New("no Fetcher provided")
}
return &fetcherReaderAt{
ctx: ctx,
fetcher: p.Fetcher,
desc: desc,
offset: 0,
}, nil
}
type fetcherReaderAt struct {
ctx context.Context
fetcher remotes.Fetcher
desc ocispec.Descriptor
rc io.ReadCloser
offset int64
}
func (f *fetcherReaderAt) Close() error {
if f.rc == nil {
return nil
}
return f.rc.Close()
}
func (f *fetcherReaderAt) Size() int64 {
return f.desc.Size
}
func (f *fetcherReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
// if we do not have a readcloser, get it
if f.rc == nil || f.offset != off {
rc, err := f.fetcher.Fetch(f.ctx, f.desc)
if err != nil {
return 0, err
}
f.rc = rc
}
n, err = io.ReadFull(f.rc, p)
if err != nil {
return n, err
}
f.offset += int64(n)
return n, err
}

View File

@@ -0,0 +1,213 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oras
import (
"context"
"io"
"io/ioutil"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/remotes"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/sync/errgroup"
orascontent "oras.land/oras-go/pkg/content"
)
type hybridStore struct {
cache *orascontent.Memory
cachedMediaTypes []string
cacheOnly bool
provider content.Provider
ingester content.Ingester
}
func newHybridStoreFromPusher(pusher remotes.Pusher, cachedMediaTypes []string, cacheOnly bool) *hybridStore {
// construct an ingester from a pusher
ingester := pusherIngester{
pusher: pusher,
}
return &hybridStore{
cache: orascontent.NewMemory(),
cachedMediaTypes: cachedMediaTypes,
ingester: ingester,
cacheOnly: cacheOnly,
}
}
func (s *hybridStore) Set(desc ocispec.Descriptor, content []byte) {
s.cache.Set(desc, content)
}
func (s *hybridStore) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
reader, err := s.cache.Fetch(ctx, desc)
if err == nil {
return reader, err
}
if s.provider != nil {
rat, err := s.provider.ReaderAt(ctx, desc)
return ioutil.NopCloser(orascontent.NewReaderAtWrapper(rat)), err
}
return nil, err
}
func (s *hybridStore) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
return s.Writer(ctx, content.WithDescriptor(desc))
}
// Writer begins or resumes the active writer identified by desc
func (s *hybridStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
var wOpts content.WriterOpts
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil, err
}
}
if isAllowedMediaType(wOpts.Desc.MediaType, s.cachedMediaTypes...) || s.ingester == nil {
pusher, err := s.cache.Pusher(ctx, "")
if err != nil {
return nil, err
}
cacheWriter, err := pusher.Push(ctx, wOpts.Desc)
if err != nil {
return nil, err
}
// if we cache it only, do not pass it through
if s.cacheOnly {
return cacheWriter, nil
}
ingesterWriter, err := s.ingester.Writer(ctx, opts...)
switch {
case err == nil:
return newTeeWriter(wOpts.Desc, cacheWriter, ingesterWriter), nil
case errdefs.IsAlreadyExists(err):
return cacheWriter, nil
}
return nil, err
}
return s.ingester.Writer(ctx, opts...)
}
// teeWriter tees the content to one or more content.Writer
type teeWriter struct {
writers []content.Writer
digester digest.Digester
status content.Status
}
func newTeeWriter(desc ocispec.Descriptor, writers ...content.Writer) *teeWriter {
now := time.Now()
return &teeWriter{
writers: writers,
digester: digest.Canonical.Digester(),
status: content.Status{
Total: desc.Size,
StartedAt: now,
UpdatedAt: now,
},
}
}
func (t *teeWriter) Close() error {
g := new(errgroup.Group)
for _, w := range t.writers {
w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines
g.Go(func() error {
return w.Close()
})
}
return g.Wait()
}
func (t *teeWriter) Write(p []byte) (n int, err error) {
g := new(errgroup.Group)
for _, w := range t.writers {
w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines
g.Go(func() error {
n, err := w.Write(p[:])
if err != nil {
return err
}
if n != len(p) {
return io.ErrShortWrite
}
return nil
})
}
err = g.Wait()
n = len(p)
if err != nil {
return n, err
}
_, _ = t.digester.Hash().Write(p[:n])
t.status.Offset += int64(len(p))
t.status.UpdatedAt = time.Now()
return n, nil
}
// Digest may return empty digest or panics until committed.
func (t *teeWriter) Digest() digest.Digest {
return t.digester.Digest()
}
func (t *teeWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
g := new(errgroup.Group)
for _, w := range t.writers {
w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines
g.Go(func() error {
return w.Commit(ctx, size, expected, opts...)
})
}
return g.Wait()
}
// Status returns the current state of write
func (t *teeWriter) Status() (content.Status, error) {
return t.status, nil
}
// Truncate updates the size of the target blob
func (t *teeWriter) Truncate(size int64) error {
g := new(errgroup.Group)
for _, w := range t.writers {
w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines
g.Go(func() error {
return w.Truncate(size)
})
}
return g.Wait()
}
// pusherIngester simple wrapper to get an ingester from a pusher
type pusherIngester struct {
pusher remotes.Pusher
}
func (p pusherIngester) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
var wOpts content.WriterOpts
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil, err
}
}
return p.pusher.Push(ctx, wOpts.Desc)
}

View File

@@ -0,0 +1,177 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package registry
import (
"fmt"
"net/url"
"regexp"
"strings"
"github.com/opencontainers/go-digest"
errdef "oras.land/oras-go/pkg/content"
)
// regular expressions for components.
var (
// repositoryRegexp is adapted from the distribution implementation.
// The repository name set under OCI distribution spec is a subset of the
// the docker spec. For maximum compability, the docker spec is verified at
// the client side. Further check is left to the server side.
// References:
// - https://github.com/distribution/distribution/blob/v2.7.1/reference/regexp.go#L53
// - https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pulling-manifests
repositoryRegexp = regexp.MustCompile(`^[a-z0-9]+(?:(?:[._]|__|[-]*)[a-z0-9]+)*(?:/[a-z0-9]+(?:(?:[._]|__|[-]*)[a-z0-9]+)*)*$`)
// tagRegexp checks the tag name.
// The docker and OCI spec have the same regular expression.
// Reference: https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pulling-manifests
tagRegexp = regexp.MustCompile(`^[\w][\w.-]{0,127}$`)
)
// Reference references to a descriptor in the registry.
type Reference struct {
// Registry is the name of the registry.
// It is usually the domain name of the registry optionally with a port.
Registry string
// Repository is the name of the repository.
Repository string
// Reference is the reference of the object in the repository.
// A reference can be a tag or a digest.
Reference string
}
// ParseReference parses a string into a artifact reference.
// If the reference contains both the tag and the digest, the tag will be
// dropped.
// Digest is recognized only if the corresponding algorithm is available.
func ParseReference(raw string) (Reference, error) {
parts := strings.SplitN(raw, "/", 2)
if len(parts) == 1 {
return Reference{}, fmt.Errorf("%w: missing repository", errdef.ErrInvalidReference)
}
registry, path := parts[0], parts[1]
var repository string
var reference string
if index := strings.Index(path, "@"); index != -1 {
// digest found
repository = path[:index]
reference = path[index+1:]
// drop tag since the digest is present.
if index := strings.Index(repository, ":"); index != -1 {
repository = repository[:index]
}
} else if index := strings.Index(path, ":"); index != -1 {
// tag found
repository = path[:index]
reference = path[index+1:]
} else {
// empty reference
repository = path
}
res := Reference{
Registry: registry,
Repository: repository,
Reference: reference,
}
if err := res.Validate(); err != nil {
return Reference{}, err
}
return res, nil
}
// Validate validates the entire reference.
func (r Reference) Validate() error {
err := r.ValidateRegistry()
if err != nil {
return err
}
err = r.ValidateRepository()
if err != nil {
return err
}
return r.ValidateReference()
}
// ValidateRegistry validates the registry.
func (r Reference) ValidateRegistry() error {
uri, err := url.ParseRequestURI("dummy://" + r.Registry)
if err != nil || uri.Host != r.Registry {
return fmt.Errorf("%w: invalid registry", errdef.ErrInvalidReference)
}
return nil
}
// ValidateRepository validates the repository.
func (r Reference) ValidateRepository() error {
if !repositoryRegexp.MatchString(r.Repository) {
return fmt.Errorf("%w: invalid repository", errdef.ErrInvalidReference)
}
return nil
}
// ValidateReference validates the reference.
func (r Reference) ValidateReference() error {
if r.Reference == "" {
return nil
}
if _, err := r.Digest(); err == nil {
return nil
}
if !tagRegexp.MatchString(r.Reference) {
return fmt.Errorf("%w: invalid tag", errdef.ErrInvalidReference)
}
return nil
}
// Host returns the host name of the registry.
func (r Reference) Host() string {
if r.Registry == "docker.io" {
return "registry-1.docker.io"
}
return r.Registry
}
// ReferenceOrDefault returns the reference or the default reference if empty.
func (r Reference) ReferenceOrDefault() string {
if r.Reference == "" {
return "latest"
}
return r.Reference
}
// Digest returns the reference as a digest.
func (r Reference) Digest() (digest.Digest, error) {
return digest.Parse(r.Reference)
}
// String implements `fmt.Stringer` and returns the reference string.
// The resulted string is meaningful only if the reference is valid.
func (r Reference) String() string {
if r.Repository == "" {
return r.Registry
}
ref := r.Registry + "/" + r.Repository
if r.Reference == "" {
return ref
}
if d, err := r.Digest(); err == nil {
return ref + "@" + d.String()
}
return ref + ":" + r.Reference
}

View File

@@ -0,0 +1,158 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"context"
"strings"
"sync"
errdef "oras.land/oras-go/pkg/content"
"oras.land/oras-go/pkg/registry/remote/internal/syncutil"
)
// DefaultCache is the sharable cache used by DefaultClient.
var DefaultCache Cache = NewCache()
// Cache caches the auth-scheme and auth-token for the "Authorization" header in
// accessing the remote registry.
// Precisely, the header is `Authorization: auth-scheme auth-token`.
// The `auth-token` is a generic term as `token68` in RFC 7235 section 2.1.
type Cache interface {
// GetScheme returns the auth-scheme part cached for the given registry.
// A single registry is assumed to have a consistent scheme.
// If a registry has different schemes per path, the auth client is still
// workable. However, the cache may not be effective as the cache cannot
// correctly guess the scheme.
GetScheme(ctx context.Context, registry string) (Scheme, error)
// GetToken returns the auth-token part cached for the given registry of a
// given scheme.
// The underlying implementation MAY cache the token for all schemes for the
// given registry.
GetToken(ctx context.Context, registry string, scheme Scheme, key string) (string, error)
// Set fetches the token using the given fetch function and caches the token
// for the given scheme with the given key for the given registry.
// The return values of the fetch function is returned by this function.
// The underlying implementation MAY combine the fetch operation if the Set
// function is invoked multiple times at the same time.
Set(ctx context.Context, registry string, scheme Scheme, key string, fetch func(context.Context) (string, error)) (string, error)
}
// cacheEntry is a cache entry for a single registry.
type cacheEntry struct {
scheme Scheme
tokens sync.Map // map[string]string
}
// concurrentCache is a cache suitable for concurrent invocation.
type concurrentCache struct {
status sync.Map // map[string]*syncutil.Once
cache sync.Map // map[string]*cacheEntry
}
// NewCache creates a new go-routine safe cache instance.
func NewCache() Cache {
return &concurrentCache{}
}
// GetScheme returns the auth-scheme part cached for the given registry.
func (cc *concurrentCache) GetScheme(ctx context.Context, registry string) (Scheme, error) {
entry, ok := cc.cache.Load(registry)
if !ok {
return SchemeUnknown, errdef.ErrNotFound
}
return entry.(*cacheEntry).scheme, nil
}
// GetToken returns the auth-token part cached for the given registry of a given
// scheme.
func (cc *concurrentCache) GetToken(ctx context.Context, registry string, scheme Scheme, key string) (string, error) {
entryValue, ok := cc.cache.Load(registry)
if !ok {
return "", errdef.ErrNotFound
}
entry := entryValue.(*cacheEntry)
if entry.scheme != scheme {
return "", errdef.ErrNotFound
}
if token, ok := entry.tokens.Load(key); ok {
return token.(string), nil
}
return "", errdef.ErrNotFound
}
// Set fetches the token using the given fetch function and caches the token
// for the given scheme with the given key for the given registry.
// Set combines the fetch operation if the Set is invoked multiple times at the
// same time.
func (cc *concurrentCache) Set(ctx context.Context, registry string, scheme Scheme, key string, fetch func(context.Context) (string, error)) (string, error) {
// fetch token
statusKey := strings.Join([]string{
registry,
scheme.String(),
key,
}, " ")
statusValue, _ := cc.status.LoadOrStore(statusKey, syncutil.NewOnce())
fetchOnce := statusValue.(*syncutil.Once)
fetchedFirst, result, err := fetchOnce.Do(ctx, func() (interface{}, error) {
return fetch(ctx)
})
if fetchedFirst {
cc.status.Delete(statusKey)
}
if err != nil {
return "", err
}
token := result.(string)
if !fetchedFirst {
return token, nil
}
// cache token
newEntry := &cacheEntry{
scheme: scheme,
}
entryValue, exists := cc.cache.LoadOrStore(registry, newEntry)
entry := entryValue.(*cacheEntry)
if exists && entry.scheme != scheme {
// there is a scheme change, which is not expected in most scenarios.
// force invalidating all previous cache.
entry = newEntry
cc.cache.Store(registry, entry)
}
entry.tokens.Store(key, token)
return token, nil
}
// noCache is a cache implementation that does not do cache at all.
type noCache struct{}
// GetScheme always returns not found error as it has no cache.
func (noCache) GetScheme(ctx context.Context, registry string) (Scheme, error) {
return SchemeUnknown, errdef.ErrNotFound
}
// GetToken always returns not found error as it has no cache.
func (noCache) GetToken(ctx context.Context, registry string, scheme Scheme, key string) (string, error) {
return "", errdef.ErrNotFound
}
// Set calls fetch directly without caching.
func (noCache) Set(ctx context.Context, registry string, scheme Scheme, key string, fetch func(context.Context) (string, error)) (string, error) {
return fetch(ctx)
}

View File

@@ -0,0 +1,166 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"strconv"
"strings"
)
// Scheme define the authentication method.
type Scheme byte
const (
// SchemeUnknown represents unknown or unsupported schemes
SchemeUnknown Scheme = iota
// SchemeBasic represents the "Basic" HTTP authentication scheme.
// Reference: https://tools.ietf.org/html/rfc7617
SchemeBasic
// SchemeBearer represents the Bearer token in OAuth 2.0.
// Reference: https://tools.ietf.org/html/rfc6750
SchemeBearer
)
// parseScheme parse the authentication scheme from the given string
// case-insensitively.
func parseScheme(scheme string) Scheme {
switch {
case strings.EqualFold(scheme, "basic"):
return SchemeBasic
case strings.EqualFold(scheme, "bearer"):
return SchemeBearer
}
return SchemeUnknown
}
// String return the string for the scheme.
func (s Scheme) String() string {
switch s {
case SchemeBasic:
return "Basic"
case SchemeBearer:
return "Bearer"
}
return "Unknown"
}
// parseChallenge parses the "WWW-Authenticate" header returned by the remote
// registry, and extracts parameters if scheme is Bearer.
// References:
// - https://docs.docker.com/registry/spec/auth/token/#how-to-authenticate
// - https://tools.ietf.org/html/rfc7235#section-2.1
func parseChallenge(header string) (scheme Scheme, params map[string]string) {
// as defined in RFC 7235 section 2.1, we have
// challenge = auth-scheme [ 1*SP ( token68 / #auth-param ) ]
// auth-scheme = token
// auth-param = token BWS "=" BWS ( token / quoted-string )
//
// since we focus parameters only on Bearer, we have
// challenge = auth-scheme [ 1*SP #auth-param ]
schemeString, rest := parseToken(header)
scheme = parseScheme(schemeString)
// fast path for non bearer challenge
if scheme != SchemeBearer {
return
}
// parse params for bearer auth.
// combining RFC 7235 section 2.1 with RFC 7230 section 7, we have
// #auth-param => auth-param *( OWS "," OWS auth-param )
var key, value string
for {
key, rest = parseToken(skipSpace(rest))
if key == "" {
return
}
rest = skipSpace(rest)
if rest == "" || rest[0] != '=' {
return
}
rest = skipSpace(rest[1:])
if rest == "" {
return
}
if rest[0] == '"' {
prefix, err := strconv.QuotedPrefix(rest)
if err != nil {
return
}
value, err = strconv.Unquote(prefix)
if err != nil {
return
}
rest = rest[len(prefix):]
} else {
value, rest = parseToken(rest)
if value == "" {
return
}
}
if params == nil {
params = map[string]string{
key: value,
}
} else {
params[key] = value
}
rest = skipSpace(rest)
if rest == "" || rest[0] != ',' {
return
}
rest = rest[1:]
}
}
// isNotTokenChar reports whether rune is not a `tchar` defined in RFC 7230
// section 3.2.6.
func isNotTokenChar(r rune) bool {
// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
// / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
// / DIGIT / ALPHA
// ; any VCHAR, except delimiters
return (r < 'A' || r > 'Z') && (r < 'a' || r > 'z') &&
(r < '0' || r > '9') && !strings.ContainsRune("!#$%&'*+-.^_`|~", r)
}
// parseToken finds the next token from the given string. If no token found,
// an empty token is returned and the whole of the input is returned in rest.
// Note: Since token = 1*tchar, empty string is not a valid token.
func parseToken(s string) (token, rest string) {
if i := strings.IndexFunc(s, isNotTokenChar); i != -1 {
return s[:i], s[i:]
}
return s, ""
}
// skipSpace skips "bad" whitespace (BWS) defined in RFC 7230 section 3.2.3.
func skipSpace(s string) string {
// OWS = *( SP / HTAB )
// ; optional whitespace
// BWS = OWS
// ; "bad" whitespace
if i := strings.IndexFunc(s, func(r rune) bool {
return r != ' ' && r != '\t'
}); i != -1 {
return s[i:]
}
return s
}

View File

@@ -0,0 +1,367 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"oras.land/oras-go/pkg/registry/remote/internal/errutil"
)
// DefaultClient is the default auth-decorated client.
var DefaultClient = &Client{
Header: http.Header{
"User-Agent": {"oras-go"},
},
Cache: DefaultCache,
}
// maxResponseBytes specifies the default limit on how many response bytes are
// allowed in the server's response from authorization service servers.
// A typical response message from authorization service servers is around 1 to
// 4 KiB. Since the size of a token must be smaller than the HTTP header size
// limit, which is usually 16 KiB. As specified by the distribution, the
// response may contain 2 identical tokens, that is, 16 x 2 = 32 KiB.
// Hence, 128 KiB should be sufficient.
// References: https://docs.docker.com/registry/spec/auth/token/
var maxResponseBytes int64 = 128 * 1024 // 128 KiB
// defaultClientID specifies the default client ID used in OAuth2.
// See also ClientID.
var defaultClientID = "oras-go"
// Client is an auth-decorated HTTP client.
// Its zero value is a usable client that uses http.DefaultClient with no cache.
type Client struct {
// Client is the underlying HTTP client used to access the remote
// server.
// If nil, http.DefaultClient is used.
Client *http.Client
// Header contains the custom headers to be added to each request.
Header http.Header
// Credential specifies the function for resolving the credential for the
// given registry (i.e. host:port).
// `EmptyCredential` is a valid return value and should not be considered as
// an error.
// If nil, the credential is always resolved to `EmptyCredential`.
Credential func(context.Context, string) (Credential, error)
// Cache caches credentials for direct accessing the remote registry.
// If nil, no cache is used.
Cache Cache
// ClientID used in fetching OAuth2 token as a required field.
// If empty, a default client ID is used.
// Reference: https://docs.docker.com/registry/spec/auth/oauth/#getting-a-token
ClientID string
// ForceAttemptOAuth2 controls whether to follow OAuth2 with password grant
// instead the distribution spec when authenticating using username and
// password.
// References:
// - https://docs.docker.com/registry/spec/auth/jwt/
// - https://docs.docker.com/registry/spec/auth/oauth/
ForceAttemptOAuth2 bool
}
// client returns an HTTP client used to access the remote registry.
// http.DefaultClient is return if the client is not configured.
func (c *Client) client() *http.Client {
if c.Client == nil {
return http.DefaultClient
}
return c.Client
}
// send adds headers to the request and sends the request to the remote server.
func (c *Client) send(req *http.Request) (*http.Response, error) {
for key, values := range c.Header {
req.Header[key] = append(req.Header[key], values...)
}
return c.client().Do(req)
}
// credential resolves the credential for the given registry.
func (c *Client) credential(ctx context.Context, reg string) (Credential, error) {
if c.Credential == nil {
return EmptyCredential, nil
}
return c.Credential(ctx, reg)
}
// cache resolves the cache.
// noCache is return if the cache is not configured.
func (c *Client) cache() Cache {
if c.Cache == nil {
return noCache{}
}
return c.Cache
}
// SetUserAgent sets the user agent for all out-going requests.
func (c *Client) SetUserAgent(userAgent string) {
if c.Header == nil {
c.Header = http.Header{}
}
c.Header.Set("User-Agent", userAgent)
}
// Do sends the request to the remote server with resolving authentication
// attempted.
// On authentication failure due to bad credential,
// - Do returns error if it fails to fetch token for bearer auth.
// - Do returns the registry response without error for basic auth.
func (c *Client) Do(originalReq *http.Request) (*http.Response, error) {
ctx := originalReq.Context()
req := originalReq.Clone(ctx)
// attempt cached auth token
var attemptedKey string
cache := c.cache()
registry := originalReq.Host
scheme, err := cache.GetScheme(ctx, registry)
if err == nil {
switch scheme {
case SchemeBasic:
token, err := cache.GetToken(ctx, registry, SchemeBasic, "")
if err == nil {
req.Header.Set("Authorization", "Basic "+token)
}
case SchemeBearer:
scopes := GetScopes(ctx)
attemptedKey = strings.Join(scopes, " ")
token, err := cache.GetToken(ctx, registry, SchemeBearer, attemptedKey)
if err == nil {
req.Header.Set("Authorization", "Bearer "+token)
}
}
}
resp, err := c.send(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusUnauthorized {
return resp, nil
}
// attempt again with credentials for recognized schemes
challenge := resp.Header.Get("Www-Authenticate")
scheme, params := parseChallenge(challenge)
switch scheme {
case SchemeBasic:
resp.Body.Close()
token, err := cache.Set(ctx, registry, SchemeBasic, "", func(ctx context.Context) (string, error) {
return c.fetchBasicAuth(ctx, registry)
})
if err != nil {
return nil, fmt.Errorf("%s %q: %w", resp.Request.Method, resp.Request.URL, err)
}
req = originalReq.Clone(ctx)
req.Header.Set("Authorization", "Basic "+token)
case SchemeBearer:
resp.Body.Close()
// merge hinted scopes with challenged scopes
scopes := GetScopes(ctx)
if scope := params["scope"]; scope != "" {
scopes = append(scopes, strings.Split(scope, " ")...)
scopes = CleanScopes(scopes)
}
key := strings.Join(scopes, " ")
// attempt the cache again if there is a scope change
if key != attemptedKey {
if token, err := cache.GetToken(ctx, registry, SchemeBearer, key); err == nil {
req = originalReq.Clone(ctx)
req.Header.Set("Authorization", "Bearer "+token)
resp, err := c.send(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusUnauthorized {
return resp, nil
}
resp.Body.Close()
}
}
// attempt with credentials
realm := params["realm"]
service := params["service"]
token, err := cache.Set(ctx, registry, SchemeBearer, key, func(ctx context.Context) (string, error) {
return c.fetchBearerToken(ctx, registry, realm, service, scopes)
})
if err != nil {
return nil, fmt.Errorf("%s %q: %w", resp.Request.Method, resp.Request.URL, err)
}
req = originalReq.Clone(ctx)
req.Header.Set("Authorization", "Bearer "+token)
default:
return resp, nil
}
return c.send(req)
}
// fetchBasicAuth fetches a basic auth token for the basic challenge.
func (c *Client) fetchBasicAuth(ctx context.Context, registry string) (string, error) {
cred, err := c.credential(ctx, registry)
if err != nil {
return "", fmt.Errorf("failed to resolve credential: %w", err)
}
if cred == EmptyCredential {
return "", errors.New("credential required for basic auth")
}
if cred.Username == "" || cred.Password == "" {
return "", errors.New("missing username or password for basic auth")
}
auth := cred.Username + ":" + cred.Password
return base64.StdEncoding.EncodeToString([]byte(auth)), nil
}
// fetchBearerToken fetches an access token for the bearer challenge.
func (c *Client) fetchBearerToken(ctx context.Context, registry, realm, service string, scopes []string) (string, error) {
cred, err := c.credential(ctx, registry)
if err != nil {
return "", err
}
if cred.AccessToken != "" {
return cred.AccessToken, nil
}
if cred == EmptyCredential || (cred.RefreshToken == "" && !c.ForceAttemptOAuth2) {
return c.fetchDistributionToken(ctx, realm, service, scopes, cred.Username, cred.Password)
}
return c.fetchOAuth2Token(ctx, realm, service, scopes, cred)
}
// fetchDistributionToken fetches an access token as defined by the distribution
// specification.
// It fetches anonymous tokens if no credential is provided.
// References:
// - https://docs.docker.com/registry/spec/auth/jwt/
// - https://docs.docker.com/registry/spec/auth/token/
func (c *Client) fetchDistributionToken(ctx context.Context, realm, service string, scopes []string, username, password string) (string, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil)
if err != nil {
return "", err
}
if username != "" || password != "" {
req.SetBasicAuth(username, password)
}
q := req.URL.Query()
if service != "" {
q.Set("service", service)
}
for _, scope := range scopes {
q.Add("scope", scope)
}
req.URL.RawQuery = q.Encode()
resp, err := c.send(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", errutil.ParseErrorResponse(resp)
}
// As specified in https://docs.docker.com/registry/spec/auth/token/ section
// "Token Response Fields", the token is either in `token` or
// `access_token`. If both present, they are identical.
var result struct {
Token string `json:"token"`
AccessToken string `json:"access_token"`
}
lr := io.LimitReader(resp.Body, maxResponseBytes)
if err := json.NewDecoder(lr).Decode(&result); err != nil {
return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err)
}
if result.AccessToken != "" {
return result.AccessToken, nil
}
if result.Token != "" {
return result.Token, nil
}
return "", fmt.Errorf("%s %q: empty token returned", resp.Request.Method, resp.Request.URL)
}
// fetchOAuth2Token fetches an OAuth2 access token.
// Reference: https://docs.docker.com/registry/spec/auth/oauth/
func (c *Client) fetchOAuth2Token(ctx context.Context, realm, service string, scopes []string, cred Credential) (string, error) {
form := url.Values{}
if cred.RefreshToken != "" {
form.Set("grant_type", "refresh_token")
form.Set("refresh_token", cred.RefreshToken)
} else if cred.Username != "" && cred.Password != "" {
form.Set("grant_type", "password")
form.Set("username", cred.Username)
form.Set("password", cred.Password)
} else {
return "", errors.New("missing username or password for bearer auth")
}
form.Set("service", service)
clientID := c.ClientID
if clientID == "" {
clientID = defaultClientID
}
form.Set("client_id", clientID)
if len(scopes) != 0 {
form.Set("scope", strings.Join(scopes, " "))
}
body := strings.NewReader(form.Encode())
req, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, body)
if err != nil {
return "", err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := c.send(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", errutil.ParseErrorResponse(resp)
}
var result struct {
AccessToken string `json:"access_token"`
}
lr := io.LimitReader(resp.Body, maxResponseBytes)
if err := json.NewDecoder(lr).Decode(&result); err != nil {
return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err)
}
if result.AccessToken != "" {
return result.AccessToken, nil
}
return "", fmt.Errorf("%s %q: empty token returned", resp.Request.Method, resp.Request.URL)
}

View File

@@ -0,0 +1,39 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
// EmptyCredential represents an empty credential.
var EmptyCredential Credential
// Credential contains authentication credentials used to access remote
// registries.
type Credential struct {
// Username is the name of the user for the remote registry.
Username string
// Password is the secret associated with the username.
Password string
// RefreshToken is a bearer token to be sent to the authorization service
// for fetching access tokens.
// A refresh token is often referred as an identity token.
// Reference: https://docs.docker.com/registry/spec/auth/oauth/
RefreshToken string
// AccessToken is a bearer token to be sent to the registry.
// An access token is often referred as a registry token.
// Reference: https://docs.docker.com/registry/spec/auth/token/
AccessToken string
}

View File

@@ -0,0 +1,231 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"context"
"sort"
"strings"
)
// Actions used in scopes.
// Reference: https://docs.docker.com/registry/spec/auth/scope/
const (
// ActionPull represents generic read access for resources of the repository
// type.
ActionPull = "pull"
// ActionPush represents generic write access for resources of the
// repository type.
ActionPush = "push"
// ActionDelete represents the delete permission for resources of the
// repository type.
ActionDelete = "delete"
)
// ScopeRegistryCatalog is the scope for registry catalog access.
const ScopeRegistryCatalog = "registry:catalog:*"
// ScopeRepository returns a repository scope with given actions.
// Reference: https://docs.docker.com/registry/spec/auth/scope/
func ScopeRepository(repository string, actions ...string) string {
actions = cleanActions(actions)
if repository == "" || len(actions) == 0 {
return ""
}
return strings.Join([]string{
"repository",
repository,
strings.Join(actions, ","),
}, ":")
}
// scopesContextKey is the context key for scopes.
type scopesContextKey struct{}
// WithScopes returns a context with scopes added. Scopes are de-duplicated.
// Scopes are used as hints for the auth client to fetch bearer tokens with
// larger scopes.
// For example, uploading blob to the repository "hello-world" does HEAD request
// first then POST and PUT. The HEAD request will return a challenge for scope
// `repository:hello-world:pull`, and the auth client will fetch a token for
// that challenge. Later, the POST request will return a challenge for scope
// `repository:hello-world:push`, and the auth client will fetch a token for
// that challenge again. By invoking `WithScopes()` with the scope
// `repository:hello-world:pull,push`, the auth client with cache is hinted to
// fetch a token via a single token fetch request for all the HEAD, POST, PUT
// requests.
// Passing an empty list of scopes will virtually remove the scope hints in the
// context.
// Reference: https://docs.docker.com/registry/spec/auth/scope/
func WithScopes(ctx context.Context, scopes ...string) context.Context {
scopes = CleanScopes(scopes)
return context.WithValue(ctx, scopesContextKey{}, scopes)
}
// AppendScopes appends additional scopes to the existing scopes in the context
// and returns a new context. The resulted scopes are de-duplicated.
// The append operation does modify the existing scope in the context passed in.
func AppendScopes(ctx context.Context, scopes ...string) context.Context {
if len(scopes) == 0 {
return ctx
}
return WithScopes(ctx, append(GetScopes(ctx), scopes...)...)
}
// GetScopes returns the scopes in the context.
func GetScopes(ctx context.Context) []string {
if scopes, ok := ctx.Value(scopesContextKey{}).([]string); ok {
return append([]string(nil), scopes...)
}
return nil
}
// CleanScopes merges and sort the actions in ascending order if the scopes have
// the same resource type and name. The final scopes are sorted in ascending
// order. In other words, the scopes passed in are de-duplicated and sorted.
// Therefore, the output of this function is deterministic.
// If there is a wildcard `*` in the action, other actions in the same resource
// type and name are ignored.
func CleanScopes(scopes []string) []string {
// fast paths
switch len(scopes) {
case 0:
return nil
case 1:
scope := scopes[0]
i := strings.LastIndex(scope, ":")
if i == -1 {
return []string{scope}
}
actionList := strings.Split(scope[i+1:], ",")
actionList = cleanActions(actionList)
if len(actionList) == 0 {
return nil
}
actions := strings.Join(actionList, ",")
scope = scope[:i+1] + actions
return []string{scope}
}
// slow path
var result []string
// merge recognizable scopes
resourceTypes := make(map[string]map[string]map[string]struct{})
for _, scope := range scopes {
// extract resource type
i := strings.Index(scope, ":")
if i == -1 {
result = append(result, scope)
continue
}
resourceType := scope[:i]
// extract resource name and actions
rest := scope[i+1:]
i = strings.LastIndex(rest, ":")
if i == -1 {
result = append(result, scope)
continue
}
resourceName := rest[:i]
actions := rest[i+1:]
if actions == "" {
// drop scope since no action found
continue
}
// add to the intermediate map for de-duplication
namedActions := resourceTypes[resourceType]
if namedActions == nil {
namedActions = make(map[string]map[string]struct{})
resourceTypes[resourceType] = namedActions
}
actionSet := namedActions[resourceName]
if actionSet == nil {
actionSet = make(map[string]struct{})
namedActions[resourceName] = actionSet
}
for _, action := range strings.Split(actions, ",") {
if action != "" {
actionSet[action] = struct{}{}
}
}
}
// reconstruct scopes
for resourceType, namedActions := range resourceTypes {
for resourceName, actionSet := range namedActions {
if len(actionSet) == 0 {
continue
}
var actions []string
for action := range actionSet {
if action == "*" {
actions = []string{"*"}
break
}
actions = append(actions, action)
}
sort.Strings(actions)
scope := resourceType + ":" + resourceName + ":" + strings.Join(actions, ",")
result = append(result, scope)
}
}
// sort and return
sort.Strings(result)
return result
}
// cleanActions removes the duplicated actions and sort in ascending order.
// If there is a wildcard `*` in the action, other actions are ignored.
func cleanActions(actions []string) []string {
// fast paths
switch len(actions) {
case 0:
return nil
case 1:
if actions[0] == "" {
return nil
}
return actions
}
// slow path
sort.Strings(actions)
n := 0
for i := 0; i < len(actions); i++ {
if actions[i] == "*" {
return []string{"*"}
}
if actions[i] != actions[n] {
n++
if n != i {
actions[n] = actions[i]
}
}
}
n++
if actions[0] == "" {
if n == 1 {
return nil
}
return actions[1:n]
}
return actions[:n]
}

View File

@@ -0,0 +1,83 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errutil
import (
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"unicode"
)
// maxErrorBytes specifies the default limit on how many response bytes are
// allowed in the server's error response.
// A typical error message is around 200 bytes. Hence, 8 KiB should be
// sufficient.
var maxErrorBytes int64 = 8 * 1024 // 8 KiB
// requestError contains a single error.
type requestError struct {
Code string `json:"code"`
Message string `json:"message"`
}
// Error returns a error string describing the error.
func (e requestError) Error() string {
code := strings.Map(func(r rune) rune {
if r == '_' {
return ' '
}
return unicode.ToLower(r)
}, e.Code)
if e.Message == "" {
return code
}
return fmt.Sprintf("%s: %s", code, e.Message)
}
// requestErrors is a bundle of requestError.
type requestErrors []requestError
// Error returns a error string describing the error.
func (errs requestErrors) Error() string {
switch len(errs) {
case 0:
return "<nil>"
case 1:
return errs[0].Error()
}
var errmsgs []string
for _, err := range errs {
errmsgs = append(errmsgs, err.Error())
}
return strings.Join(errmsgs, "; ")
}
// ParseErrorResponse parses the error returned by the remote registry.
func ParseErrorResponse(resp *http.Response) error {
var errmsg string
var body struct {
Errors requestErrors `json:"errors"`
}
lr := io.LimitReader(resp.Body, maxErrorBytes)
if err := json.NewDecoder(lr).Decode(&body); err == nil && len(body.Errors) > 0 {
errmsg = body.Errors.Error()
} else {
errmsg = http.StatusText(resp.StatusCode)
}
return fmt.Errorf("%s %q: unexpected status code %d: %s", resp.Request.Method, resp.Request.URL, resp.StatusCode, errmsg)
}

View File

@@ -0,0 +1,69 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package syncutil
import "context"
// Once is an object that will perform exactly one action.
// Unlike sync.Once, this Once allowes the action to have return values.
type Once struct {
result interface{}
err error
status chan bool
}
// NewOnce creates a new Once instance.
func NewOnce() *Once {
status := make(chan bool, 1)
status <- true
return &Once{
status: status,
}
}
// Do calls the function f if and only if Do is being called first time or all
// previous function calls are cancelled, deadline exceeded, or panicking.
// When `once.Do(ctx, f)` is called multiple times, the return value of the
// first call of the function f is stored, and is directly returned for other
// calls.
// Besides the return value of the function f, including the error, Do returns
// true if the function f passed is called first and is not cancelled, deadline
// exceeded, or panicking. Otherwise, returns false.
func (o *Once) Do(ctx context.Context, f func() (interface{}, error)) (bool, interface{}, error) {
defer func() {
if r := recover(); r != nil {
o.status <- true
panic(r)
}
}()
for {
select {
case inProgress := <-o.status:
if !inProgress {
return false, o.result, o.err
}
result, err := f()
if err == context.Canceled || err == context.DeadlineExceeded {
o.status <- true
return false, nil, err
}
o.result, o.err = result, err
close(o.status)
return true, result, err
case <-ctx.Done():
return false, nil, ctx.Err()
}
}
}

View File

@@ -0,0 +1,171 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remote
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strconv"
errdef "oras.land/oras-go/pkg/content"
"oras.land/oras-go/pkg/registry"
"oras.land/oras-go/pkg/registry/remote/auth"
"oras.land/oras-go/pkg/registry/remote/internal/errutil"
)
// Client is an interface for a HTTP client.
type Client interface {
// Do sends an HTTP request and returns an HTTP response.
//
// Unlike http.RoundTripper, Client can attempt to interpret the response
// and handle higher-level protocol details such as redirects and
// authentication.
//
// Like http.RoundTripper, Client should not modify the request, and must
// always close the request body.
Do(*http.Request) (*http.Response, error)
}
// Repository is an HTTP client to a remote repository.
type Repository struct {
// Client is the underlying HTTP client used to access the remote registry.
// If nil, auth.DefaultClient is used.
Client Client
// Reference references the remote repository.
Reference registry.Reference
// PlainHTTP signals the transport to access the remote repository via HTTP
// instead of HTTPS.
PlainHTTP bool
// ManifestMediaTypes is used in `Accept` header for resolving manifests from
// references. It is also used in identifying manifests and blobs from
// descriptors.
// If an empty list is present, default manifest media types are used.
ManifestMediaTypes []string
// TagListPageSize specifies the page size when invoking the tag list API.
// If zero, the page size is determined by the remote registry.
// Reference: https://docs.docker.com/registry/spec/api/#tags
TagListPageSize int
// ReferrerListPageSize specifies the page size when invoking the Referrers
// API.
// If zero, the page size is determined by the remote registry.
// Reference: https://github.com/oras-project/artifacts-spec/blob/main/manifest-referrers-api.md
ReferrerListPageSize int
// MaxMetadataBytes specifies a limit on how many response bytes are allowed
// in the server's response to the metadata APIs, such as catalog list, tag
// list, and referrers list.
// If zero, a default (currently 4MiB) is used.
MaxMetadataBytes int64
}
// NewRepository creates a client to the remote repository identified by a
// reference.
// Example: localhost:5000/hello-world
func NewRepository(reference string) (*Repository, error) {
ref, err := registry.ParseReference(reference)
if err != nil {
return nil, err
}
return &Repository{
Reference: ref,
}, nil
}
// client returns an HTTP client used to access the remote repository.
// A default HTTP client is return if the client is not configured.
func (r *Repository) client() Client {
if r.Client == nil {
return auth.DefaultClient
}
return r.Client
}
// parseReference validates the reference.
// Both simplified or fully qualified references are accepted as input.
// A fully qualified reference is returned on success.
func (r *Repository) parseReference(reference string) (registry.Reference, error) {
ref, err := registry.ParseReference(reference)
if err != nil {
ref = registry.Reference{
Registry: r.Reference.Registry,
Repository: r.Reference.Repository,
Reference: reference,
}
if err = ref.ValidateReference(); err != nil {
return registry.Reference{}, err
}
return ref, nil
}
if ref.Registry == r.Reference.Registry && ref.Repository == r.Reference.Repository {
return ref, nil
}
return registry.Reference{}, fmt.Errorf("%w %q: expect %q", errdef.ErrInvalidReference, ref, r.Reference)
}
// Tags lists the tags available in the repository.
func (r *Repository) Tags(ctx context.Context, fn func(tags []string) error) error {
ctx = withScopeHint(ctx, r.Reference, auth.ActionPull)
url := buildRepositoryTagListURL(r.PlainHTTP, r.Reference)
var err error
for err == nil {
url, err = r.tags(ctx, fn, url)
}
if err != errNoLink {
return err
}
return nil
}
// tags returns a single page of tag list with the next link.
func (r *Repository) tags(ctx context.Context, fn func(tags []string) error, url string) (string, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return "", err
}
if r.TagListPageSize > 0 {
q := req.URL.Query()
q.Set("n", strconv.Itoa(r.TagListPageSize))
req.URL.RawQuery = q.Encode()
}
resp, err := r.client().Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", errutil.ParseErrorResponse(resp)
}
var page struct {
Tags []string `json:"tags"`
}
lr := limitReader(resp.Body, r.MaxMetadataBytes)
if err := json.NewDecoder(lr).Decode(&page); err != nil {
return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err)
}
if err := fn(page.Tags); err != nil {
return "", err
}
return parseLink(resp)
}

View File

@@ -0,0 +1,42 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remote
import (
"fmt"
"oras.land/oras-go/pkg/registry"
)
// buildScheme returns HTTP scheme used to access the remote registry.
func buildScheme(plainHTTP bool) string {
if plainHTTP {
return "http"
}
return "https"
}
// buildRepositoryBaseURL builds the base endpoint of the remote repository.
// Format: <scheme>://<registry>/v2/<repository>
func buildRepositoryBaseURL(plainHTTP bool, ref registry.Reference) string {
return fmt.Sprintf("%s://%s/v2/%s", buildScheme(plainHTTP), ref.Host(), ref.Repository)
}
// buildRepositoryTagListURL builds the URL for accessing the tag list API.
// Format: <scheme>://<registry>/v2/<repository>/tags/list
// Reference: https://docs.docker.com/registry/spec/api/#tags
func buildRepositoryTagListURL(plainHTTP bool, ref registry.Reference) string {
return buildRepositoryBaseURL(plainHTTP, ref) + "/tags/list"
}

View File

@@ -0,0 +1,72 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remote
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"strings"
"oras.land/oras-go/pkg/registry"
"oras.land/oras-go/pkg/registry/remote/auth"
)
// defaultMaxMetadataBytes specifies the default limit on how many response
// bytes are allowed in the server's response to the metadata APIs.
// See also: Repository.MaxMetadataBytes
var defaultMaxMetadataBytes int64 = 4 * 1024 * 1024 // 4 MiB
// errNoLink is returned by parseLink() when no Link header is present.
var errNoLink = errors.New("no Link header in response")
// parseLink returns the URL of the response's "Link" header, if present.
func parseLink(resp *http.Response) (string, error) {
link := resp.Header.Get("Link")
if link == "" {
return "", errNoLink
}
if link[0] != '<' {
return "", fmt.Errorf("invalid next link %q: missing '<'", link)
}
if i := strings.IndexByte(link, '>'); i == -1 {
return "", fmt.Errorf("invalid next link %q: missing '>'", link)
} else {
link = link[1:i]
}
linkURL, err := resp.Request.URL.Parse(link)
if err != nil {
return "", err
}
return linkURL.String(), nil
}
// limitReader returns a Reader that reads from r but stops with EOF after n
// bytes. If n is zero, defaultMaxMetadataBytes is used.
func limitReader(r io.Reader, n int64) io.Reader {
if n == 0 {
n = defaultMaxMetadataBytes
}
return io.LimitReader(r, n)
}
// withScopeHint adds a hinted scope to the context.
func withScopeHint(ctx context.Context, ref registry.Reference, actions ...string) context.Context {
scope := auth.ScopeRepository(ref.Repository, actions...)
return auth.AppendScopes(ctx, scope)
}

View File

@@ -0,0 +1,57 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package registry
import (
"context"
)
// Repository is an ORAS target and an union of the blob and the manifest CASs.
// As specified by https://docs.docker.com/registry/spec/api/, it is natural to
// assume that content.Resolver interface only works for manifests. Tagging a
// blob may be resulted in an `ErrUnsupported` error. However, this interface
// does not restrict tagging blobs.
// Since a repository is an union of the blob and the manifest CASs, all
// operations defined in the `BlobStore` are executed depending on the media
// type of the given descriptor accordingly.
// Furthurmore, this interface also provides the ability to enforce the
// separation of the blob and the manifests CASs.
type Repository interface {
// Tags lists the tags available in the repository.
// Since the returned tag list may be paginated by the underlying
// implementation, a function should be passed in to process the paginated
// tag list.
// Note: When implemented by a remote registry, the tags API is called.
// However, not all registries supports pagination or conforms the
// specification.
// References:
// - https://github.com/opencontainers/distribution-spec/blob/main/spec.md#content-discovery
// - https://docs.docker.com/registry/spec/api/#tags
// See also `Tags()` in this package.
Tags(ctx context.Context, fn func(tags []string) error) error
}
// Tags lists the tags available in the repository.
func Tags(ctx context.Context, repo Repository) ([]string, error) {
var res []string
if err := repo.Tags(ctx, func(tags []string) error {
res = append(res, tags...)
return nil
}); err != nil {
return nil, err
}
return res, nil
}

View File

@@ -0,0 +1,26 @@
/*
Copyright The ORAS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package target
import (
"github.com/containerd/containerd/remotes"
)
// Target represents a place to which one can send/push or retrieve/pull artifacts.
// Anything that implements the Target interface can be used as a place to send or
// retrieve artifacts.
type Target interface {
remotes.Resolver
}